hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
01490053cb9fd905829b36cc4370a3f6de2a8949
| 2,655
|
py
|
Python
|
azure-graphrbac/azure/graphrbac/models/application_create_parameters.py
|
CharaD7/azure-sdk-for-python
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
[
"MIT"
] | null | null | null |
azure-graphrbac/azure/graphrbac/models/application_create_parameters.py
|
CharaD7/azure-sdk-for-python
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
[
"MIT"
] | null | null | null |
azure-graphrbac/azure/graphrbac/models/application_create_parameters.py
|
CharaD7/azure-sdk-for-python
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationCreateParameters(Model):
"""Request parameters for create a new application.
:param available_to_other_tenants: Indicates if the application will be
available to other tenants
:type available_to_other_tenants: bool
:param display_name: Application display name
:type display_name: str
:param homepage: Application homepage
:type homepage: str
:param identifier_uris: Application Uris
:type identifier_uris: list of str
:param reply_urls: Application reply Urls
:type reply_urls: list of str
:param key_credentials: the list of KeyCredential objects
:type key_credentials: list of :class:`KeyCredential
<azure.graphrbac.models.KeyCredential>`
:param password_credentials: the list of PasswordCredential objects
:type password_credentials: list of :class:`PasswordCredential
<azure.graphrbac.models.PasswordCredential>`
"""
_validation = {
'available_to_other_tenants': {'required': True},
'display_name': {'required': True},
'identifier_uris': {'required': True},
}
_attribute_map = {
'available_to_other_tenants': {'key': 'availableToOtherTenants', 'type': 'bool'},
'display_name': {'key': 'displayName', 'type': 'str'},
'homepage': {'key': 'homepage', 'type': 'str'},
'identifier_uris': {'key': 'identifierUris', 'type': '[str]'},
'reply_urls': {'key': 'replyUrls', 'type': '[str]'},
'key_credentials': {'key': 'keyCredentials', 'type': '[KeyCredential]'},
'password_credentials': {'key': 'passwordCredentials', 'type': '[PasswordCredential]'},
}
def __init__(self, available_to_other_tenants, display_name, identifier_uris, homepage=None, reply_urls=None, key_credentials=None, password_credentials=None):
self.available_to_other_tenants = available_to_other_tenants
self.display_name = display_name
self.homepage = homepage
self.identifier_uris = identifier_uris
self.reply_urls = reply_urls
self.key_credentials = key_credentials
self.password_credentials = password_credentials
| 43.52459
| 163
| 0.66629
|
c224040ce1540203cf28bc0237e09cf40944fbda
| 8,604
|
py
|
Python
|
frappeclient/frappeclient.py
|
ard-ly/frappe-client
|
a0c4bdc116b3101e52950020a95f42d9c1401ab9
|
[
"MIT"
] | null | null | null |
frappeclient/frappeclient.py
|
ard-ly/frappe-client
|
a0c4bdc116b3101e52950020a95f42d9c1401ab9
|
[
"MIT"
] | null | null | null |
frappeclient/frappeclient.py
|
ard-ly/frappe-client
|
a0c4bdc116b3101e52950020a95f42d9c1401ab9
|
[
"MIT"
] | 1
|
2020-12-27T11:33:06.000Z
|
2020-12-27T11:33:06.000Z
|
import requests
import json
import frappe
from urllib.parse import quote
try:
from StringIO import StringIO
except:
from io import StringIO
try:
unicode
except NameError:
unicode = str
class AuthError(Exception):
pass
class FrappeException(Exception):
pass
class NotUploadableException(FrappeException):
def __init__(self, doctype):
self.message = "The doctype `{1}` is not uploadable, so you can't download the template".format(doctype)
class FrappeClient(object):
def __init__(self, url=None, username=None, password=None, api_key=None, api_secret=None, verify=True):
self.headers = dict(Accept='application/json')
self.session = requests.Session()
self.can_download = []
self.verify = verify
self.url = url
if username and password:
self.login(username, password)
if api_key and api_secret:
self.authenticate(api_key, api_secret)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.logout()
def login(self, username, password):
r = self.session.post(self.url, data={
'cmd': 'login',
'usr': username,
'pwd': password
}, verify=self.verify, headers=self.headers)
if r.json().get('message') == "Logged In":
self.can_download = []
return r.json()
else:
raise AuthError
def authenticate(self, api_key, api_secret):
auth_header = {'Authorization': 'token {}:{}'.format(api_key, api_secret)}
self.session.headers.update(auth_header)
def logout(self):
self.session.get(self.url, params={
'cmd': 'logout',
})
def get_list(self, doctype, fields='"*"', filters=None, limit_start=0, limit_page_length=0, order_by=None):
'''Returns list of records of a particular type'''
if not isinstance(fields, unicode):
fields = json.dumps(fields)
params = {
"fields": fields,
}
if filters:
params["filters"] = json.dumps(filters)
if limit_page_length:
params["limit_start"] = limit_start
params["limit_page_length"] = limit_page_length
if order_by:
params['order_by'] = order_by
res = self.session.get(self.url + "/api/resource/" + doctype, params=params,
verify=self.verify, headers=self.headers)
return self.post_process(res)
def insert(self, doc):
'''Insert a document to the remote server
:param doc: A dict or Document object to be inserted remotely'''
temp = frappe.as_json(doc.as_dict(convert_dates_to_str=True, no_nulls = True))
res = self.session.post(self.url + "/api/resource/" + quote(doc.get("doctype")),
data={"data":temp}, headers=self.headers)
return self.post_process(res)
def insert_many(self, docs):
'''Insert multiple documents to the remote server
:param docs: List of dict or Document objects to be inserted in one request'''
return self.post_request({
"cmd": "frappe.client.insert_many",
"docs": frappe.as_json(docs)
})
def update(self, doc):
'''Update a remote document
:param doc: dict or Document object to be updated remotely. `name` is mandatory for this'''
url = self.url + "/api/resource/" + quote(doc.get("doctype")) + "/" + quote(doc.get("name"))
res = self.session.put(url, data={"data":json.dumps(doc)}, headers=self.headers)
return self.post_process(res)
def bulk_update(self, docs):
'''Bulk update documents remotely
:param docs: List of dict or Document objects to be updated remotely (by `name`)'''
return self.post_request({
'cmd': 'frappe.client.bulk_update',
'docs': json.dumps(docs)
})
def delete(self, doctype, name):
'''Delete remote document by name
:param doctype: `doctype` to be deleted
:param name: `name` of document to be deleted'''
return self.post_request({
'cmd': 'frappe.client.delete',
'doctype': doctype,
'name': name
})
def submit(self, doclist):
'''Submit remote document
:param doc: dict or Document object to be submitted remotely'''
return self.post_request({
'cmd': 'frappe.client.submit',
'doclist': json.dumps(doclist)
})
def get_value(self, doctype, fieldname=None, filters=None):
return self.get_request({
'cmd': 'frappe.client.get_value',
'doctype': doctype,
'fieldname': fieldname or 'name',
'filters': json.dumps(filters)
})
def set_value(self, doctype, docname, fieldname, value):
return self.post_request({
'cmd': 'frappe.client.set_value',
'doctype': doctype,
'name': docname,
'fieldname': fieldname,
'value': value
})
def cancel(self, doctype, name):
return self.post_request({
'cmd': 'frappe.client.cancel',
'doctype': doctype,
'name': name
})
def get_doc(self, doctype, name="", filters=None, fields=None):
'''Returns a single remote document
:param doctype: DocType of the document to be returned
:param name: (optional) `name` of the document to be returned
:param filters: (optional) Filter by this dict if name is not set
:param fields: (optional) Fields to be returned, will return everythign if not set'''
params = {}
if filters:
params["filters"] = json.dumps(filters)
if fields:
params["fields"] = json.dumps(fields)
res = self.session.get(self.url + '/api/resource/' + doctype + '/' + name,
params=params)
return self.post_process(res)
def rename_doc(self, doctype, old_name, new_name):
'''Rename remote document
:param doctype: DocType of the document to be renamed
:param old_name: Current `name` of the document to be renamed
:param new_name: New `name` to be set'''
params = {
'cmd': 'frappe.client.rename_doc',
'doctype': doctype,
'old_name': old_name,
'new_name': new_name
}
return self.post_request(params)
def get_pdf(self, doctype, name, print_format='Standard', letterhead=True):
params = {
'doctype': doctype,
'name': name,
'format': print_format,
'no_letterhead': int(not bool(letterhead))
}
response = self.session.get(
self.url + '/api/method/frappe.templates.pages.print.download_pdf',
params=params, stream=True)
return self.post_process_file_stream(response)
def get_html(self, doctype, name, print_format='Standard', letterhead=True):
params = {
'doctype': doctype,
'name': name,
'format': print_format,
'no_letterhead': int(not bool(letterhead))
}
response = self.session.get(
self.url + '/print', params=params, stream=True
)
return self.post_process_file_stream(response)
def __load_downloadable_templates(self):
self.can_download = self.get_api('frappe.core.page.data_import_tool.data_import_tool.get_doctypes')
def get_upload_template(self, doctype, with_data=False):
if not self.can_download:
self.__load_downloadable_templates()
if doctype not in self.can_download:
raise NotUploadableException(doctype)
params = {
'doctype': doctype,
'parent_doctype': doctype,
'with_data': 'Yes' if with_data else 'No',
'all_doctypes': 'Yes'
}
request = self.session.get(
self.url + '/api/method/frappe.core.page.data_import_tool.exporter.get_template',
params=params
)
return self.post_process_file_stream(request)
def get_api(self, method, params={}):
res = self.session.get(self.url + '/api/method/' + method, params=params)
return self.post_process(res)
def post_api(self, method, params={}):
res = self.session.post(self.url + '/api/method/' + method, params=params)
return self.post_process(res)
def get_request(self, params):
res = self.session.get(self.url, params=self.preprocess(params))
res = self.post_process(res)
return res
def post_request(self, data):
res = self.session.post(self.url, data=self.preprocess(data))
res = self.post_process(res)
return res
def preprocess(self, params):
'''convert dicts, lists to json'''
for key, value in params.iteritems():
if isinstance(value, (dict, list)):
params[key] = json.dumps(value)
return params
def post_process(self, response):
try:
rjson = response.json()
except ValueError:
print(response.text)
raise
if rjson and ('exc' in rjson) and rjson['exc']:
raise FrappeException(rjson['exc'])
if 'message' in rjson:
return rjson['message']
elif 'data' in rjson:
return rjson['data']
else:
return None
def post_process_file_stream(self, response):
if response.ok:
output = StringIO()
for block in response.iter_content(1024):
output.write(block)
return output
else:
try:
rjson = response.json()
except ValueError:
print(response.text)
raise
if rjson and ('exc' in rjson) and rjson['exc']:
raise FrappeException(rjson['exc'])
if 'message' in rjson:
return rjson['message']
elif 'data' in rjson:
return rjson['data']
else:
return None
| 27.227848
| 108
| 0.695026
|
418113a628a60f9b440ee95c29a923c0374200e6
| 1,539
|
py
|
Python
|
tests/test_parser.py
|
decorator-factory/py-lark-lispy
|
bafd11fb311fefbb2cb40563ef2e113eeb6b4ff5
|
[
"MIT"
] | 1
|
2020-08-13T19:16:24.000Z
|
2020-08-13T19:16:24.000Z
|
tests/test_parser.py
|
decorator-factory/py-lark-lispy
|
bafd11fb311fefbb2cb40563ef2e113eeb6b4ff5
|
[
"MIT"
] | null | null | null |
tests/test_parser.py
|
decorator-factory/py-lark-lispy
|
bafd11fb311fefbb2cb40563ef2e113eeb6b4ff5
|
[
"MIT"
] | null | null | null |
import pylarklispy.parser as parser
from pylarklispy.entities import *
FULL_EXAMPLE = \
"""
; This is a comment
(hello world)
(lorem :ipsum [dolor-sit amet])
(quoted &ex &[pression])
(turtles (all (the (way :down))))
(whitespace [:does not
:mat ter])
(commas,,,,, :are [white , , space])
(strings "work fine")
(sigils ~r"are cool")
((higher order) stuff)
"""
def test_smoke():
# check that all the language constructs work
lark_parser = parser.parser
tree = lark_parser.parse(FULL_EXAMPLE)
def test_ast():
# check that the generated AST is correct
lark_parser = parser.parser
lark_transformer = parser.Transformer()
tree = lark_parser.parse(FULL_EXAMPLE)
ast = lark_transformer.transform(tree)
expected = (
SExpr( Name("hello"), Name("world") ),
SExpr( Name("lorem"), Atom("ipsum"), Vector(Name("dolor-sit"), Name("amet")) ),
SExpr( Name("quoted"), Quoted(Name("ex")), Quoted(Vector(Name("pression"))) ),
SExpr( Name("turtles"), SExpr( Name("all"), SExpr( Name("the"), SExpr( Name("way"), Atom("down") )) ) ),
SExpr( Name("whitespace"), Vector(Atom("does"), Name("not"), Atom("mat"), Name("ter")) ),
SExpr( Name("commas"), Atom("are"), Vector(Name("white"), Name("space")) ),
SExpr( Name("strings"), String("work fine") ),
SExpr( Name("sigils"), SigilString("r", "are cool") ),
SExpr( SExpr(Name("higher"), Name("order")), Name("stuff") ),
)
assert len(ast) == len(expected)
assert ast == expected
| 34.2
| 114
| 0.612736
|
b516d6004c7966648735348006913ba5c56bbe7e
| 338
|
py
|
Python
|
old files/problem0006.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
old files/problem0006.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
old files/problem0006.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
###########################
# Project Euler Problem 6
# Sum square difference
#
# Code by Kevin Marciniak
###########################
sum_of_squares = 0
square_of_sum = 0
# for i in range(1, 11):
for i in range(1, 101):
sum_of_squares += i * i
square_of_sum += i
square_of_sum *= square_of_sum
print(square_of_sum - sum_of_squares)
| 17.789474
| 37
| 0.606509
|
56dbac5bff931c332aaf5871218ded9d970b2616
| 74
|
py
|
Python
|
9 - Text Bounding Box with OpenCV (EAST)/east/__init__.py
|
IgorMeloS/OCR
|
f4916dde9accbba3b86b6c6d5db850ef04daf426
|
[
"Apache-2.0"
] | null | null | null |
9 - Text Bounding Box with OpenCV (EAST)/east/__init__.py
|
IgorMeloS/OCR
|
f4916dde9accbba3b86b6c6d5db850ef04daf426
|
[
"Apache-2.0"
] | null | null | null |
9 - Text Bounding Box with OpenCV (EAST)/east/__init__.py
|
IgorMeloS/OCR
|
f4916dde9accbba3b86b6c6d5db850ef04daf426
|
[
"Apache-2.0"
] | null | null | null |
from .east import EAST_OUTPUT_LAYERS
from .east import decode_predictions
| 24.666667
| 36
| 0.864865
|
60a56466df866d49c622fcbb63fa235500f003c0
| 4,992
|
py
|
Python
|
train_scripts/train_hopper_from_load.py
|
hari-sikchi/stable-baselines
|
f3627c4b8625c1021a1a893b0a2fd8bfed9e84ed
|
[
"MIT"
] | null | null | null |
train_scripts/train_hopper_from_load.py
|
hari-sikchi/stable-baselines
|
f3627c4b8625c1021a1a893b0a2fd8bfed9e84ed
|
[
"MIT"
] | null | null | null |
train_scripts/train_hopper_from_load.py
|
hari-sikchi/stable-baselines
|
f3627c4b8625c1021a1a893b0a2fd8bfed9e84ed
|
[
"MIT"
] | null | null | null |
#from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines.sac.policies import MlpPolicy
from stable_baselines import PPO2, SAC
import hopper_rep
import os
import gym
import os
import numpy as np
import matplotlib.pyplot as plt
from stable_baselines.common import set_global_seeds
from stable_baselines.bench import Monitor
from stable_baselines.results_plotter import load_results, ts2xy
import json
best_mean_reward, n_steps = -np.inf, 0
best_eval_mean_reward = -np.inf
seed = 600
log_dir = "logs/mujoco/Hopper_load_saved_"+str(seed)+ "/"
os.makedirs(log_dir, exist_ok=True)
log_data = {'dt':[],'eval':[],'train':[],'timesteps':[]}
# f = open(log_dir+"eval.txt", "w")
set_global_seeds(seed)
test_env = DummyVecEnv([lambda: gym.make("Hopper-v2")])
max_eval_timesteps = 5000
# Automatically normalize the input features
# test_env = VecNormalize(test_env, norm_obs=True, norm_reward=False,
# clip_obs=10.)
def callback(_locals, _globals):
"""
Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)
:param _locals: (dict)
:param _globals: (dict)
"""
global n_steps, best_mean_reward, best_eval_mean_reward,model
# Print stats every 1000 calls
total_reward=0
mean_reward=0
if (n_steps + 1) % 1000== 0:
for i in range(100):
dones=False
timesteps = 0
obs = test_env.reset()
while not dones:
action, _states = model.predict(obs)
timesteps+=1
obs, rewards, dones, info = test_env.step(action)
total_reward+=rewards
if(timesteps==max_eval_timesteps):
dones=True
if(dones):
break
mean_reward=total_reward/100.0
print("Value of gamma is: {}".format(model.gamma))
print("Steps: {} 100 Episode eval: {} Best eval {} ".format(n_steps,mean_reward,best_eval_mean_reward))
# f.write("Steps: {} 100 Episode eval: {} Best eval {}\n".format(n_steps,mean_reward,best_eval_mean_reward))
if mean_reward > best_eval_mean_reward:
best_eval_mean_reward = mean_reward
# Example for saving best model
print("Saving new best model")
_locals['self'].save(log_dir + 'best_model_eval.pkl')
print("dt: {}".format(model.action_repetition))
log_data['dt'].append(model.action_repetition)
log_data['eval'].append(mean_reward)
log_data['timesteps'].append(model.num_timesteps)
# Evaluate policy training performance
if (n_steps + 1) % 1000 == 0:
x, y = ts2xy(load_results(log_dir), 'timesteps')
if len(x) > 0:
mean_reward = np.mean(y[-100:])
print(x[-1], 'timesteps')
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > best_mean_reward:
best_mean_reward = mean_reward
# Example for saving best model
print("Saving new best model")
_locals['self'].save(log_dir + 'best_model.pkl')
log_data['train'].append(mean_reward)
n_steps += 1
# Returning False will stop training early
return True
# env_s= lambda: gym.make("HopperEnvRep-v0")
# env_s = Monitor(env_s, log_dir, allow_early_resets=True)
env = DummyVecEnv([lambda: gym.make("Hopper-v2")])
# Automatically normalize the input features
# env = VecNormalize(env, norm_obs=True, norm_reward=False,
# clip_obs=10.)
env = Monitor(env.envs[0], log_dir, allow_early_resets=True)
#env.act_rep = 20
model = SAC(MlpPolicy, env, verbose=1)
model = model.load("/home/hsikchi/work/stable-baselines/logs/mujoco/Hopper_replay_append_100/best_model_eval.pkl",env)
## Eval once
obs = env.reset()
cum_reward=0
step = 0
while True:
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
cum_reward+=rewards
step+=1
if(dones):
obs = env.reset()
break
#print(rewards)
#env.render()
print("Initialization evaluation :{}, steps :{}".format(cum_reward,step))
## Evaluation complete of intialization
print("Starting Experiment with seed: {}".format(seed))
#model = PPO2(MlpPolicy, env,verbose=True)
model.learn(total_timesteps=1000000,use_action_repeat= False,poisson=False, callback=callback,only_explore_with_act_rep = False)
# f.close()
# json = json.dumps(log_data)
# f = open(log_dir+"log_data.json","w")
# f.write(json)
# f.close()
np.save(log_dir+"log_data.npy",log_data)
# Don't forget to save the VecNormalize statistics when saving the agent
# log_dir = "logs/hopper_aneal/"
# model.save(log_dir + "sac_hopper")
#env.save(os.path.join(log_dir, "vec_normalize.pkl"))
| 33.059603
| 128
| 0.66226
|
46b34b3d019fc19eb554b0bc6afa753d7a56a67f
| 3,751
|
py
|
Python
|
app/lambda_functions/social_integration/twitter/publish.py
|
aveek22/wordpress-social
|
14bb18450e0885c239bf2e0ff213be1d57fe1155
|
[
"MIT"
] | null | null | null |
app/lambda_functions/social_integration/twitter/publish.py
|
aveek22/wordpress-social
|
14bb18450e0885c239bf2e0ff213be1d57fe1155
|
[
"MIT"
] | null | null | null |
app/lambda_functions/social_integration/twitter/publish.py
|
aveek22/wordpress-social
|
14bb18450e0885c239bf2e0ff213be1d57fe1155
|
[
"MIT"
] | null | null | null |
# Import system libraries
import boto3 # To interact with the AWS resources
import tweepy # Use Tweepy to interact with Twitter
import logging # Log messages for debugging
import os # Get environment variables
# Import custom libraries
from credentials import TwitterCredentials
# Setup Logger
log = logging.getLogger(__name__)
class PublishTwitter:
""" Publish content to Twitter profile. """
def __init__(self):
"""Initiate the PublishTwitter object."""
log.debug(f'Initiate PublishTwitter object.')
self.tweepy_client = self._create_tweepy_client()
log.info(f'PublishLinkedIn object initiated.')
def _create_tweepy_client(self):
""" Create the Tweepy Client object. """
# Get the twitter credentials from Parameter Store
log.debug(f'Get twitter credentials from AWS parameter store.')
twitter_credentials = self._get_credentials_from_parameter_store()
log.debug(f'Twitter credentials fetched.')
# Prepare the twitter client
if(twitter_credentials):
try:
log.debug(f'Creating Tweepy Client.')
client = tweepy.Client(
consumer_key = twitter_credentials.CONSUMER_KEY,
consumer_secret = twitter_credentials.CONSUMER_KEY_SECRET,
access_token = twitter_credentials.ACCESS_TOKEN,
access_token_secret = twitter_credentials.ACCESS_TOKEN_SECRET
)
log.debug(f'Tweepy Client created.')
except Exception as e:
log.error(f'Unable to create Tweepy client. Error: {e}')
client = False
else:
client = False
log.warning(f'Tweepy client not created. Application will quit..')
return client
def _get_credentials_from_parameter_store(self):
""" Get the Twitter Credentials. """
# Create the credentials object
twitter_credentials = TwitterCredentials()
# Fetch Twitter credentials from parameter store
twitter_credentials.get_twitter_credentials_from_parameter_store()
return twitter_credentials
def get_payload_from_event(self, event):
"""Prepare payload text from event."""
try:
log.debug(f'Preparing payload from event.')
share_text = event['twitter']['tw_share_text']
hashtags = event['twitter']['tw_hashtags']
share_url = event['twitter']['tw_share_url']
payload = f"{share_text}\n{hashtags}\n{share_url}"
log.info(f'Tweet text prepared. {payload}')
except Exception as e:
payload = False
log.error(f'Unable to prepare tweet text. Error: {e}')
return payload
def post_content(self, payload):
""" Share the content on Twitter Profile. """
# Set control vaariables.
client = False
client = self.tweepy_client
if(payload):
try:
log.debug(f'Sharing post to Twitter.')
response = client.create_tweet(text=payload)
log.debug(f"Response Content: {response.data['text']}")
if(len(response.data['text']) > 0):
log.info(f'Post shared to Twitter successfully.')
else:
log.warning(f'Unable to share content to Twitter. Run in debug mode to view detailed error.')
except Exception as e:
log.error(f'Error in sharing post to Twitter. Error: {e}')
| 34.1
| 113
| 0.592909
|
e2ac1309ddf3c55bfc6a30864c39a0f67589cf7e
| 4,691
|
py
|
Python
|
tests/examples/market_maker/test_on_chain_market_maker.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 1,347
|
2019-11-22T06:49:38.000Z
|
2022-03-31T19:49:32.000Z
|
tests/examples/market_maker/test_on_chain_market_maker.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 915
|
2019-11-21T05:48:16.000Z
|
2022-03-31T23:51:03.000Z
|
tests/examples/market_maker/test_on_chain_market_maker.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 262
|
2019-11-28T01:44:04.000Z
|
2022-03-31T21:33:43.000Z
|
import pytest
@pytest.fixture
def market_maker(get_contract):
with open("examples/market_maker/on_chain_market_maker.vy") as f:
contract_code = f.read()
return get_contract(contract_code)
TOKEN_NAME = "Vypercoin"
TOKEN_SYMBOL = "FANG"
TOKEN_DECIMALS = 18
TOKEN_INITIAL_SUPPLY = 21 * 10 ** 6
TOKEN_TOTAL_SUPPLY = TOKEN_INITIAL_SUPPLY * (10 ** TOKEN_DECIMALS)
@pytest.fixture
def erc20(get_contract):
with open("examples/tokens/ERC20.vy") as f:
contract_code = f.read()
return get_contract(
contract_code,
*[TOKEN_NAME, TOKEN_SYMBOL, TOKEN_DECIMALS, TOKEN_INITIAL_SUPPLY],
)
def test_initial_state(market_maker):
assert market_maker.totalEthQty() == 0
assert market_maker.totalTokenQty() == 0
assert market_maker.invariant() == 0
assert market_maker.owner() is None
def test_initiate(w3, market_maker, erc20, assert_tx_failed):
a0 = w3.eth.accounts[0]
erc20.approve(market_maker.address, w3.toWei(2, "ether"), transact={})
market_maker.initiate(
erc20.address,
w3.toWei(1, "ether"),
transact={"value": w3.toWei(2, "ether")},
)
assert market_maker.totalEthQty() == w3.toWei(2, "ether")
assert market_maker.totalTokenQty() == w3.toWei(1, "ether")
assert market_maker.invariant() == 2 * 10 ** 36
assert market_maker.owner() == a0
assert erc20.name() == TOKEN_NAME
assert erc20.decimals() == TOKEN_DECIMALS
# Initiate cannot be called twice
assert_tx_failed(
lambda: market_maker.initiate(
erc20.address, w3.toWei(1, "ether"), transact={"value": w3.toWei(2, "ether")}
)
) # noqa: E501
def test_eth_to_tokens(w3, market_maker, erc20):
a1 = w3.eth.accounts[1]
erc20.approve(market_maker.address, w3.toWei(2, "ether"), transact={})
market_maker.initiate(
erc20.address,
w3.toWei(1, "ether"),
transact={"value": w3.toWei(2, "ether")},
)
assert erc20.balanceOf(market_maker.address) == w3.toWei(1, "ether")
assert erc20.balanceOf(a1) == 0
assert market_maker.totalTokenQty() == w3.toWei(1, "ether")
assert market_maker.totalEthQty() == w3.toWei(2, "ether")
market_maker.ethToTokens(transact={"value": 100, "from": a1})
assert erc20.balanceOf(market_maker.address) == 999999999999999950
assert erc20.balanceOf(a1) == 50
assert market_maker.totalTokenQty() == 999999999999999950
assert market_maker.totalEthQty() == 2000000000000000100
def test_tokens_to_eth(w3, market_maker, erc20):
a1 = w3.eth.accounts[1]
a1_balance_before = w3.eth.getBalance(a1)
erc20.transfer(a1, w3.toWei(2, "ether"), transact={})
erc20.approve(market_maker.address, w3.toWei(2, "ether"), transact={"from": a1})
market_maker.initiate(
erc20.address,
w3.toWei(1, "ether"),
transact={"value": w3.toWei(2, "ether"), "from": a1},
)
assert w3.eth.getBalance(market_maker.address) == w3.toWei(2, "ether")
# sent 2 eth, with initiate.
assert w3.eth.getBalance(a1) == a1_balance_before - w3.toWei(2, "ether")
assert market_maker.totalTokenQty() == w3.toWei(1, "ether")
erc20.approve(market_maker.address, w3.toWei(1, "ether"), transact={"from": a1})
market_maker.tokensToEth(w3.toWei(1, "ether"), transact={"from": a1})
# 1 eth less in market.
assert w3.eth.getBalance(market_maker.address) == w3.toWei(1, "ether")
# got 1 eth back, for trade.
assert w3.eth.getBalance(a1) == a1_balance_before - w3.toWei(1, "ether")
# Tokens increased by 1
assert market_maker.totalTokenQty() == w3.toWei(2, "ether")
assert market_maker.totalEthQty() == w3.toWei(1, "ether")
def test_owner_withdraw(w3, market_maker, erc20, assert_tx_failed):
a0, a1 = w3.eth.accounts[:2]
a0_balance_before = w3.eth.getBalance(a0)
# Approve 2 eth transfers.
erc20.approve(market_maker.address, w3.toWei(2, "ether"), transact={})
# Initiate market with 2 eth value.
market_maker.initiate(
erc20.address,
w3.toWei(1, "ether"),
transact={"value": w3.toWei(2, "ether")},
)
# 2 eth was sent to market_maker contract.
assert w3.eth.getBalance(a0) == a0_balance_before - w3.toWei(2, "ether")
# a0's balance is locked up in market_maker contract.
assert erc20.balanceOf(a0) == TOKEN_TOTAL_SUPPLY - w3.toWei(1, "ether")
# Only owner can call ownerWithdraw
assert_tx_failed(lambda: market_maker.ownerWithdraw(transact={"from": a1}))
market_maker.ownerWithdraw(transact={})
assert w3.eth.getBalance(a0) == a0_balance_before # Eth balance restored.
assert erc20.balanceOf(a0) == TOKEN_TOTAL_SUPPLY # Tokens returned to a0.
| 37.230159
| 89
| 0.676188
|
3516c03b57743946de0f13b55c0eac484ea70631
| 6,162
|
py
|
Python
|
onix/salameche/cram.py
|
AI-Pranto/ONIX
|
2d9ef4598e1e6e982a236bddcc1b9f04bcbed706
|
[
"MIT"
] | 16
|
2019-09-18T06:03:55.000Z
|
2022-02-16T05:23:53.000Z
|
onix/salameche/cram.py
|
AI-Pranto/ONIX
|
2d9ef4598e1e6e982a236bddcc1b9f04bcbed706
|
[
"MIT"
] | 7
|
2020-11-17T20:41:00.000Z
|
2022-01-31T17:55:21.000Z
|
onix/salameche/cram.py
|
AI-Pranto/ONIX
|
2d9ef4598e1e6e982a236bddcc1b9f04bcbed706
|
[
"MIT"
] | 11
|
2019-09-11T13:52:05.000Z
|
2021-11-24T09:43:22.000Z
|
"""Compute the solution of the matricial depletion equation using the CRAM method"""
import numpy as np
import warnings
import time
def CRAM16(At,N_0):
"""CRAM uses a Chebishev Rational Approximation Method of order 16 to compute the solution of the matricial depletion equation.
Parameters
----------
At: numpy.array
Depletion matrix multiplied by the time interval over which nuclides are depleted
N_0: numpy.array
Initial nuclides' densities vector
"""
print ('CRAM CALLED')
t0 = time.time()
lN = len(N_0)
theta = np.array([
-1.0843917078696988026e1 +1.9277446167181652284e1j,
-5.2649713434426468895 +1.6220221473167927305e1j,
+5.9481522689511774808 +3.5874573620183222829j,
+3.5091036084149180974 +8.4361989858843750826j,
+6.4161776990994341923 +1.1941223933701386874j,
+1.4193758971856659786 +1.0925363484496722585e1j,
+4.9931747377179963991 +5.9968817136039422260j,
-1.4139284624888862114 +1.3497725698892745389e1j], dtype = np.complex256)
alpha_0 = np.complex256(2.1248537104952237488e-16 + 0.0j)
alpha = np.array([
-5.0901521865224915650e-7 -2.4220017652852287970e-5j,
+2.1151742182466030907e-4 +4.3892969647380673918e-3j,
+1.1339775178483930527e2 +1.0194721704215856450e2j,
+1.5059585270023467528e1 -5.7514052776421819979j,
-6.4500878025539646595e1 -2.2459440762652096056e2j,
-1.4793007113557999718 +1.7686588323782937906j,
-6.2518392463207918892e1 -1.1190391094283228480e1j,
+4.1023136835410021273e-2 -1.5743466173455468191e-1j], dtype = np.complex256)
l = len(theta)
N = N_0*0
_N = np.zeros((lN),dtype=np.complex128)
for i in range(l):
term1 = At - theta[i]*np.identity(np.shape(At)[0])
term2 = alpha[i]*N_0
_N += np.linalg.solve(term1,term2)
N = 2*_N.real
N = N + alpha_0*N_0
# For some reason here N is still complex and not only real
print('CRAM took:{} s'.format(time.time() - t0))
return N.real
# CRAM is yielding non zero values for nuclides that should be at zero because no one is producing them
# This algorithm check which nuclide are in this situation and set their density to zero
def CRAM_reality_check(bucell, index_dic, N):
"""This functions checks against negative and extremely small densities (same as onix.salameche.CRAM_density_check). In addition, it compares the calculated new densities with the BUCell.leave attribute (this attribute enables to know which isotopes should be produced or not during depletion). The comparison enables the function to detect nuclides that have a non-zero density but should have a zero density. Likewise, it can detect nuclides that should be produced but have zero density.
Parameters
----------
bucell: onix.Cell
BUCell being depleted
index_dict: dict
Dictionnary where keys are nuclides z-a-m id and entries are their indexes in the density vector
N: numpy.array
New density vector solution to the depletion equation
"""
print('reality check called')
passlist = bucell.passlist
leaves = bucell.leaves
fission_leaves = bucell.fission_leaves
total_leaves = leaves + fission_leaves
negative_count = 0
small_count = 0
intruder_count = 0
missing_count = 0
for nuc_pass in passlist:
nuc_zamid = nuc_pass.zamid
nuc_name = nuc_pass.name
nuc_dens = nuc_pass.dens
index = index_dic[nuc_pass.zamid]
N_val = N[index]
# if nuc_name == 'Au-200':
# print nuc_name, nuc_zamid
# print nuc_dens
if N_val < 0:
# warnings.warn('NEGATIVE: Nuclide {}/{} has a negative density of {}'.format(nuc_name, nuc_zamid, N_val))
N[index] = 0.0
negative_count += 1
N_val = N[index]
if N_val < 1e-24:
# warnings.warn('TOO SMALL: Nuclide {}/{} has a density of {} below 1e-24'.format(nuc_name, nuc_zamid, N_val))
N[index] = 0.0
small_count += 1
N_val = N[index]
if nuc_zamid in total_leaves and N_val == 0:
# warnings.warn('MISSING: Nuclide {} has a density of 0 while it belongs to the creation tree'.format(nuc_name))
missing_count += 1
elif nuc_zamid not in total_leaves and N_val != 0:
# warnings.warn('INTRUDER: Nuclide {} has a density of {} while it is not in the creation tree'.format(nuc_name, N_val ))
intruder_count += 1
print(('There are {} negative'.format(negative_count)))
print(('There are {} too small'.format(small_count)))
print(('There are {} intruders'.format(intruder_count)))
print(('There are {} missings'.format(missing_count)))
def CRAM_density_check(bucell, N):
"""This function checks for extremely low densities and negative densities. Densities below one atom per cubic centimeter are set to zero. Negative densities are produced by mathematical approximations inherent to the CRAM method and therefore do not bear any physical meaning. They are also set to zero.
Parameters
----------
bucell: onix.Cell
BUCell being depleted
N: numpy.array
New density vector solution to the depletion equation
"""
passlist = bucell.passlist
index_dict = passlist.get_index_dict()
passport_list = passlist.passport_list
negative_count = 0
small_count = 0
for nuc_pass in passport_list:
nuc_zamid = nuc_pass.zamid
nuc_name = nuc_pass.name
index = index_dict[nuc_pass.zamid]
N_val = N[index]
if N_val < 0:
#warnings.warn('NEGATIVE: Nuclide {}/{} has a negative density of {}'.format(nuc_name, nuc_zamid, N_val))
N[index] = 0.0
negative_count += 1
N_val = N[index]
if N_val < 1e-24:
#warnings.warn('TOO SMALL: Nuclide {}/{} has a density of {} below 1e-24'.format(nuc_name, nuc_zamid, N_val))
N[index] = 0.0
small_count += 1
print(('There are {} negative'.format(negative_count)))
print(('There are {} too small'.format(small_count)))
| 36.461538
| 494
| 0.672671
|
077fa31b2d4f69b8707fd29fbb6668e1de90e901
| 15,319
|
py
|
Python
|
ctapipe/core/tool.py
|
mservillat/ctapipe
|
81ce758f9594751142333bca339ac69fe91c92cb
|
[
"BSD-3-Clause"
] | null | null | null |
ctapipe/core/tool.py
|
mservillat/ctapipe
|
81ce758f9594751142333bca339ac69fe91c92cb
|
[
"BSD-3-Clause"
] | null | null | null |
ctapipe/core/tool.py
|
mservillat/ctapipe
|
81ce758f9594751142333bca339ac69fe91c92cb
|
[
"BSD-3-Clause"
] | null | null | null |
"""Classes to handle configurable command-line user interfaces."""
import logging
import logging.config
import textwrap
from abc import abstractmethod
import pathlib
import os
from traitlets import default
from traitlets.config import Application, Configurable
from .. import __version__ as version
from .traits import Path, Enum, Bool, Dict
from . import Provenance
from .component import Component
from .logging import create_logging_config, ColoredFormatter, DEFAULT_LOGGING
class ToolConfigurationError(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
self.message = message
class Tool(Application):
"""A base class for all executable tools (applications) that handles
configuration loading/saving, logging, command-line processing,
and provenance meta-data handling. It is based on
`traitlets.config.Application`. Tools may contain configurable
`ctapipe.core.Component` classes that do work, and their
configuration parameters will propagate automatically to the
`Tool`.
Tool developers should create sub-classes, and a name,
description, usage examples should be added by defining the
`name`, `description` and `examples` class attributes as
strings. The `aliases` attribute can be set to cause a lower-level
`Component` parameter to become a high-level command-line
parameter (See example below). The `setup()`, `start()`, and
`finish()` methods should be defined in the sub-class.
Additionally, any `ctapipe.core.Component` used within the `Tool`
should have their class in a list in the `classes` attribute,
which will automatically add their configuration parameters to the
tool.
Once a tool is constructed and the virtual methods defined, the
user can call the `run()` method to setup and start it.
.. code:: python
from ctapipe.core import Tool
from traitlets import (Integer, Float, Dict, Unicode)
class MyTool(Tool):
name = "mytool"
description = "do some things and stuff"
aliases = Dict({'infile': 'AdvancedComponent.infile',
'iterations': 'MyTool.iterations'})
# Which classes are registered for configuration
classes = [MyComponent, AdvancedComponent, SecondaryMyComponent]
# local configuration parameters
iterations = Integer(5,help="Number of times to run",
allow_none=False).tag(config=True)
def setup_comp(self):
self.comp = MyComponent(self, config=self.config)
self.comp2 = SecondaryMyComponent(self, config=self.config)
def setup_advanced(self):
self.advanced = AdvancedComponent(self, config=self.config)
def setup(self):
self.setup_comp()
self.setup_advanced()
def start(self):
self.log.info("Performing {} iterations..."\
.format(self.iterations))
for ii in range(self.iterations):
self.log.info("ITERATION {}".format(ii))
self.comp.do_thing()
self.comp2.do_thing()
sleep(0.5)
def finish(self):
self.log.warning("Shutting down.")
def main():
tool = MyTool()
tool.run()
if __name__ == "main":
main()
If this `main()` method is registered in `setup.py` under
*entry_points*, it will become a command-line tool (see examples
in the `ctapipe/tools` subdirectory).
"""
config_file = Path(
exists=True,
directory_ok=False,
help=(
"name of a configuration file with "
"parameters to load in addition to "
"command-line parameters"
),
).tag(config=True)
log_config = Dict(default_value=DEFAULT_LOGGING).tag(config=True)
log_file = Path(
default_value=None, exists=None, directory_ok=False, help="Filename for the log"
).tag(config=True)
log_file_level = Enum(
values=Application.log_level.values,
default_value="INFO",
help="Logging Level for File Logging",
).tag(config=True)
quiet = Bool(default_value=False).tag(config=True)
_log_formatter_cls = ColoredFormatter
provenance_log = Path(directory_ok=False).tag(config=True)
@default("provenance_log")
def _default_provenance_log(self):
return self.name + ".provenance.log"
def __init__(self, **kwargs):
# make sure there are some default aliases in all Tools:
super().__init__(**kwargs)
aliases = {
"config": "Tool.config_file",
"log-level": "Tool.log_level",
("l", "log-file"): "Tool.log_file",
"log-file-level": "Tool.log_file_level",
}
self.aliases.update(aliases)
flags = {
("q", "quiet"): ({"Tool": {"quiet": True}}, "Disable console logging.")
}
self.flags.update(flags)
self.is_setup = False
self.version = version
self.raise_config_file_errors = True # override traitlets.Application default
self.log = logging.getLogger("ctapipe." + self.name)
self.update_logging_config()
def initialize(self, argv=None):
""" handle config and any other low-level setup """
self.parse_command_line(argv)
self.update_logging_config()
if self.config_file is not None:
self.log.debug(f"Loading config from '{self.config_file}'")
try:
self.load_config_file(self.config_file)
except Exception as err:
raise ToolConfigurationError(f"Couldn't read config file: {err}")
# ensure command-line takes precedence over config file options:
self.update_config(self.cli_config)
self.update_logging_config()
self.log.info(f"ctapipe version {self.version_string}")
def update_logging_config(self):
"""Update the configuration of loggers."""
cfg = create_logging_config(
log_level=self.log_level,
log_file=self.log_file,
log_file_level=self.log_file_level,
log_config=self.log_config,
quiet=self.quiet,
)
logging.config.dictConfig(cfg)
def add_component(self, component_instance):
"""
constructs and adds a component to the list of registered components,
so that later we can ask for the current configuration of all instances,
e.g. in`get_full_config()`. All sub-components of a tool should be
constructed using this function, in order to ensure the configuration is
properly traced.
Parameters
----------
component_instance: Component
constructed instance of a component
Returns
-------
Component:
the same component instance that was passed in, so that the call
can be chained.
Examples
--------
.. code-block:: python3
self.mycomp = self.add_component(MyComponent(parent=self))
"""
self._registered_components.append(component_instance)
return component_instance
@abstractmethod
def setup(self):
"""set up the tool (override in subclass). Here the user should
construct all `Components` and open files, etc."""
pass
@abstractmethod
def start(self):
"""main body of tool (override in subclass). This is automatically
called after `initialize()` when the `run()` is called.
"""
pass
@abstractmethod
def finish(self):
"""finish up (override in subclass). This is called automatically
after `start()` when `run()` is called."""
self.log.info("Goodbye")
def run(self, argv=None):
"""Run the tool. This automatically calls `initialize()`,
`start()` and `finish()`
Parameters
----------
argv: list(str)
command-line arguments, or None to get them
from sys.argv automatically
"""
# return codes are taken from:
# http://tldp.org/LDP/abs/html/exitcodes.html
exit_status = 0
try:
self.initialize(argv)
self.log.info(f"Starting: {self.name}")
Provenance().start_activity(self.name)
self.setup()
self.is_setup = True
self.log.debug(f"CONFIG: {self.get_current_config()}")
Provenance().add_config(self.get_current_config())
self.start()
self.finish()
self.log.info(f"Finished: {self.name}")
Provenance().finish_activity(activity_name=self.name)
except ToolConfigurationError as err:
self.log.error(f"{err}. Use --help for more info")
exit_status = 2 # wrong cmd line parameter
except KeyboardInterrupt:
self.log.warning("WAS INTERRUPTED BY CTRL-C")
Provenance().finish_activity(activity_name=self.name, status="interrupted")
exit_status = 130 # Script terminated by Control-C
except Exception as err:
self.log.exception(f"Caught unexpected exception: {err}")
Provenance().finish_activity(activity_name=self.name, status="error")
exit_status = 1 # any other error
finally:
if not {"-h", "--help", "--help-all"}.intersection(self.argv):
self.write_provenance()
self.exit(exit_status)
def write_provenance(self):
for activity in Provenance().finished_activities:
output_str = " ".join([x["url"] for x in activity.output])
self.log.info("Output: %s", output_str)
self.log.debug("PROVENANCE: '%s'", Provenance().as_json(indent=3))
self.provenance_log.parent.mkdir(parents=True, exist_ok=True)
with open(self.provenance_log, mode="a+") as provlog:
provlog.write(Provenance().as_json(indent=3))
@property
def version_string(self):
""" a formatted version string with version, release, and git hash"""
return f"{version}"
def get_current_config(self):
""" return the current configuration as a dict (e.g. the values
of all traits, even if they were not set during configuration)
"""
conf = {
self.__class__.__name__: {
k: v.get(self) for k, v in self.traits(config=True).items()
}
}
for val in self.__dict__.values():
if isinstance(val, Component):
conf[self.__class__.__name__].update(val.get_current_config())
return conf
def _repr_html_(self):
""" nice HTML rep, with blue for non-default values"""
traits = self.traits()
name = self.__class__.__name__
lines = [
f"<b>{name}</b>",
f"<p> {self.__class__.__doc__ or self.description} </p>",
"<table>",
]
for key, val in self.get_current_config()[name].items():
default = traits[key].default_value
thehelp = f"{traits[key].help} (default: {default})"
lines.append(f"<tr><th>{key}</th>")
if val != default:
lines.append(f"<td><span style='color:blue'>{val}</span></td>")
else:
lines.append(f"<td>{val}</td>")
lines.append(f'<td style="text-align:left"><i>{thehelp}</i></td></tr>')
lines.append("</table>")
lines.append("<p><i>Components:</i>")
lines.append(", ".join([x.__name__ for x in self.classes]))
lines.append("</p>")
return "\n".join(lines)
def export_tool_config_to_commented_yaml(tool_instance: Tool, classes=None):
"""
Turn the config of a single Component into a commented YAML string.
This is a hacked version of
traitlets.config.Configurable._class_config_section() changed to
output a YAML file with defaults *and* current values filled in.
Parameters
----------
tool_instance: Tool
a constructed Tool instance
classes: list, optional
The list of other classes in the config file.
Used to reduce redundant information.
"""
tool = tool_instance.__class__
config = tool_instance.get_current_config()[tool_instance.__class__.__name__]
def commented(text, indent_level=2, width=70):
"""return a commented, wrapped block."""
return textwrap.fill(
text,
width=width,
initial_indent=" " * indent_level + "# ",
subsequent_indent=" " * indent_level + "# ",
)
# section header
breaker = "#" + "-" * 78
parent_classes = ", ".join(
p.__name__ for p in tool.__bases__ if issubclass(p, Configurable)
)
section_header = f"# {tool.__name__}({parent_classes}) configuration"
lines = [breaker, section_header]
# get the description trait
desc = tool.class_traits().get("description")
if desc:
desc = desc.default_value
if not desc:
# no description from trait, use __doc__
desc = getattr(tool, "__doc__", "")
if desc:
lines.append(commented(desc, indent_level=0))
lines.append(breaker)
lines.append(f"{tool.__name__}:")
for name, trait in sorted(tool.class_traits(config=True).items()):
default_repr = trait.default_value_repr()
current_repr = config.get(name, "")
if isinstance(current_repr, str):
current_repr = f'"{current_repr}"'
if classes:
defining_class = tool._defining_class(trait, classes)
else:
defining_class = tool
if defining_class is tool:
# cls owns the trait, show full help
if trait.help:
lines.append(commented(trait.help))
if "Enum" in type(trait).__name__:
# include Enum choices
lines.append(commented(f"Choices: {trait.info()}"))
lines.append(commented(f"Default: {default_repr}"))
else:
# Trait appears multiple times and isn't defined here.
# Truncate help to first line + "See also Original.trait"
if trait.help:
lines.append(commented(trait.help.split("\n", 1)[0]))
lines.append(f" # See also: {defining_class.__name__}.{name}")
lines.append(f" {name}: {current_repr}")
lines.append("")
return "\n".join(lines)
def run_tool(tool: Tool, argv=None, cwd=None):
"""
Utility run a certain tool in a python session without exitinig
Returns
-------
exit_code: int
The return code of the tool, 0 indicates success, everything else an error
"""
current_cwd = pathlib.Path().absolute()
cwd = pathlib.Path(cwd) if cwd is not None else current_cwd
try:
# switch to cwd for running and back after
os.chdir(cwd)
tool.run(argv or [])
return 0
except SystemExit as e:
return e.code
finally:
os.chdir(current_cwd)
| 35.05492
| 88
| 0.607677
|
0e19755f71b69f7b0c19ae66893b8188b62562df
| 15,545
|
py
|
Python
|
ensembler/ensemble/replicas_dynamic_parameters.py
|
philthiel/Ensembler
|
943efac3c673eb40165927e81336386788e3a19f
|
[
"MIT"
] | 39
|
2020-05-19T08:45:27.000Z
|
2022-03-17T16:58:34.000Z
|
ensembler/ensemble/replicas_dynamic_parameters.py
|
SchroederB/Ensembler
|
943efac3c673eb40165927e81336386788e3a19f
|
[
"MIT"
] | 38
|
2020-06-18T13:02:18.000Z
|
2022-02-25T14:29:17.000Z
|
ensembler/ensemble/replicas_dynamic_parameters.py
|
SchroederB/Ensembler
|
943efac3c673eb40165927e81336386788e3a19f
|
[
"MIT"
] | 13
|
2020-05-19T08:45:57.000Z
|
2022-03-10T16:18:20.000Z
|
"""
.. automodule: replica_approach_dynamic_parameters
This module shall be used to implement subclasses of ensemble.
It is a class, that is using multiple system. It can be used for RE or Conveyor belt
"""
import numpy as np
import pandas as pd
from scipy import constants as const
from tqdm.notebook import tqdm
from ensembler import potentials as pot
from ensembler.ensemble._replica_graph import _mutliReplicaApproach
from ensembler.samplers import stochastic
from ensembler.system import perturbed_system
from ensembler.util.ensemblerTypes import systemCls, Dict, Tuple, NoReturn
class conveyorBelt(_mutliReplicaApproach):
"""
Conveyor belt ensemble class
organizes the replicas and their coupling
"""
_parameter_name: str = "lam"
coordinate_dimensions: int = 1
replica_graph_dimensions: int = 1
exchange_dimensions: Dict[str, np.array]
nSteps_between_trials: int = 1
exchange_information: pd.DataFrame = pd.DataFrame(columns=["Step", "capital_lambda", "TotE", "biasE", "doAccept"])
system_trajs: dict = {}
_default_metropolis_criterion = lambda self, originalParams, swappedParams: (
np.greater_equal(originalParams, swappedParams) or self._default_randomness(originalParams,
swappedParams))
exchange_criterium = _default_metropolis_criterion
###random part of Metropolis Criterion:
_randomness_factor = 0.1
_temperature_exchange: float = 298
_default_randomness = lambda self, originalParams, swappedParams: (
(1 / self._randomness_factor) * np.random.rand() <= np.exp(
-1.0 / (const.gas_constant / 1000.0 * self._temperature_exchange) * (
originalParams - swappedParams + 0.0000001))) # pseudo count, if params are equal
def __str__(self):
outstr = '{:<5s}{:<10s}{:<10s}\n'.format("i", "lambda_i", "E_i" )
outstr += "-" * 25 + "\n"
for i in self.replicas:
outstr += '{:5d}{:10.2f}{:10.3f}\n'.format(i, self.replicas[i].lam,
float(self.replicas[i].total_system_energy))
return outstr
def __repr__(self):
return self.__str__()
def __init__(self,
capital_lambda: float,
n_replicas: int,
system:systemCls =
perturbed_system.perturbedSystem(
temperature=300.0,
lam=0.0,
potential=pot.OneD.linearCoupledPotentials(
Va=pot.OneD.harmonicOscillatorPotential(k=1, x_shift=0),
Vb=pot.OneD.harmonicOscillatorPotential(k=2, x_shift=0)
),
sampler=stochastic.metropolisMonteCarloIntegrator()
),
build: bool = False):
"""
initialize Ensemble object
Parameters
----------
capital_lambda: float
state of ensemble, 0 <= capital_lambda < pi
n_replicas: int
number of replicas
system: systemCls, optional
a system1D instance
build:bool, optional
build memory?
"""
assert 0.0 <= capital_lambda <= 2 * np.pi, "capital_lambda not allowed"
assert n_replicas >= 1, "At least one system is needed"
super().__init__()
self.system = system
self.capital_lambda = capital_lambda
self.build = build # build
self.dis = 2.0 * np.pi / n_replicas
self.exchange_dimensions = {
self._parameter_name:
[
self.calculate_replica_lambda(self.capital_lambda, i)
for i in range(n_replicas)
]
}
self._temperature_exchange = system.temperature
self.initialise()
self.system_trajs: dict = {}
# public functions
def initialise(self) -> NoReturn:
"""
Initialises a conveyor belt ensemble: deletes biasing potential,
initialises the replica graph and updates the systems.
Returns
-------
NoReturn
"""
##Simulation
self._currentTrial = 0
self.reject = 0
# initialize memory variables
self.num_gp = None
self.mem_fc = None
self.mem = None
self.gp_spacing = None
self.biasene = None
self.init_mem()
# BUILD replicas
self._initialise_replica_graph()
## * Conveyor belt specifics
for i in self.replicas:
self.replicas[i].lam = self.calculate_replica_lambda(self.capital_lambda, i)
self.replicas[i].update_current_state()
self.replicas[i].clear_trajectory()
self.exchange_information = pd.DataFrame(
[{
"Step": self._currentTrial,
"capital_lambda": self.capital_lambda,
"TotE": self.calculate_total_ensemble_energy(),
"biasE": self.biasene,
"doAccept": True
}
]
)
def simulate(self,
ntrials: int,
nSteps_between_trials: int = 1,
reset_ensemble: bool = False,
verbosity: bool = True):
"""
Integrates the conveyor belt ensemble
Parameters
----------
ntrials:int
Number of conveyor belt steps
nSteps_between_trials: int, optional
number of integration steps of replicas between a move of the conveyor belt (Default: None)
reset_ensemble: bool, optional
reset ensemble for starting the simulation? (Default: False)
verbosity: bool, optional
verbose output? (Default: False)
Returns
-------
NoReturn
"""
if (isinstance(nSteps_between_trials, int)):
self.set_simulation_n_steps_between_trials(n_steps=nSteps_between_trials)
self.__tmp_exchange_traj = []
for _ in tqdm(range(ntrials), desc="Trials: ", mininterval=1.0, leave=verbosity):
self.accept_move()
self.run()
self.exchange_information = pd.concat([self.exchange_information, pd.DataFrame(self.__tmp_exchange_traj)],ignore_index=True)
# self.exchange_information = self.exchange_information
def run(self, verbosity: bool = False) -> NoReturn:
"""
Integrates the systems of the ensemble for the :var:nSteps_between_trials.
"""
self._currentTrial += 1
for replica_coords, replica in self.replicas.items():
replica.simulate(steps=self.nSteps_between_trials, verbosity=verbosity)
def accept_move(self) -> NoReturn:
"""
Performs one trial move of the capital lambda, either accepts or rejects it and
updates the lambdas of all replicas.
"""
self.state = []
# metropolis criterium for moving capital_lambda?
oldEne = self.calculate_total_ensemble_energy()
oldBiasene = self.biasene
oldBlam = self.capital_lambda
self.capital_lambda += (np.random.rand() * 2.0 - 1.0) * np.pi / 4.0
self.capital_lambda = self.capital_lambda % (2.0 * np.pi)
self.update_all_lambda(self.capital_lambda)
newEne = self.calculate_total_ensemble_energy()
if self._default_metropolis_criterion(originalParams=oldEne, swappedParams=newEne):
for i in self.replicas:
self.replicas[i]._update_dHdLambda()
self.__tmp_exchange_traj.append({"Step": self._currentTrial, "capital_lambda": self.capital_lambda, "TotE": float(newEne),
"biasE": self.biasene, "doAccept": True})
else:
self.reject += 1
self.update_all_lambda(oldBlam)
for i in self.replicas:
self.replicas[i]._update_dHdLambda()
self.__tmp_exchange_traj.append({"Step": self._currentTrial, "capital_lambda": oldBlam, "TotE": float(oldEne),
"biasE": float(oldBiasene), "doAccept": False})
if self.build:
self.build_mem()
def revert(self) -> NoReturn:
"""
reverts last propagation step
"""
for j in self.replicas:
self.replicas[j].revert()
self.calculate_total_ensemble_energy()
self.exchange_information = self.exchange_information[:-1]
def add_replica(self, clam: float, add_n_replicas: int = 1) -> NoReturn:
'''
Not Implemented!!!
adds a replica to the ensemble
'''
raise NotImplementedError("Please Implement this function!")
# PRIVATE functions
## * Move the belt
def calculate_total_ensemble_energy(self) -> float:
"""
calculates energy of Conveyor Belt Ensemble
Returns
-------
float
total energy of the Conveyor Belt Ensemble.
"""
ene = 0.0
for i in self.replicas:
ene += self.replicas[i]._currentTotPot
ene += self.replicas[i]._currentTotKin if (not np.isnan(self.replicas[i]._currentTotKin)) else 0
ene = ene + self.biasene
return ene
def calculate_replica_lambda(self, capital_lambda: float, i: int) -> float:
"""
Parameters
----------
capital_lambda: float
state of ensemble 0 <= capital_lambda < 2 pi
i: int
index of replica
Returns
-------
float
lambda of replica i
"""
ome = (capital_lambda + i * self.dis) % (2. * np.pi)
if ome > np.pi:
ome = 2.0 * np.pi - ome
return ome / np.pi
def update_all_lambda(self, capital_lambda: float) -> float:
"""
updates the state of the ensemble and the replicas accordingly
Parameters
----------
capital_lambda:float
capital lambda 0 <= capital_lambda < 2 pi
Returns
-------
float
capital_lambda
"""
'''
:param capital_lambda:
:type capital_lambda: float
:return: capital_lambda
:rtype: float
'''
self.capital_lambda = capital_lambda
for i in self.replicas:
self.replicas[i].lam = self.calculate_replica_lambda(capital_lambda, i)
self.apply_mem()
return capital_lambda
## * Bias Memory Functions
def init_mem(self) -> NoReturn:
"""
initializes memory
"""
self.num_gp = 11
self.mem_fc = 0.0001
# self.mem=np.array([2.2991 , 2.00274, 1.84395, 1.83953, 2.0147])
# memory for perturbed hosc, alpha=10.0, gamma=0.0, 8 replica, num_gp=6, fc=0.00001, 1E6 steps
self.mem = np.zeros(self.num_gp - 1)
self.gp_spacing = self.dis / float(self.num_gp - 1.0)
self.biasene = 0.0
# print('Distance: ', self.dis, self.dis / np.pi)
# print('GP Distance: ', self.gp_spacing, self.gp_spacing / np.pi)
# print('Gridpoints: ', np.linspace(0, self.num_gp - 1, self.num_gp) * self.gp_spacing)
# print('Gridpoints: ', np.linspace(0, self.num_gp - 1, self.num_gp) * self.gp_spacing / np.pi)
def build_mem(self) -> NoReturn:
"""
increments biasing memory
"""
active_gp = int(np.floor((self.capital_lambda % self.dis) / self.gp_spacing + 0.5))
self.mem[active_gp % (self.num_gp - 1)] += self.mem_fc
def apply_mem(self) -> NoReturn:
"""
applies memory biasing
"""
active_gp = int(np.floor((self.capital_lambda % self.dis) / self.gp_spacing + 0.5))
dg = (self.capital_lambda % self.dis) / self.gp_spacing - float(active_gp)
if dg < 0:
self.biasene = self.mem[(active_gp - 1) % (self.num_gp - 1)] * self.spline(1.0 + dg) + self.mem[
active_gp % (self.num_gp - 1)] * self.spline(-dg)
else:
self.biasene = self.mem[active_gp % (self.num_gp - 1)] * self.spline(dg) + self.mem[
(active_gp + 1) % (self.num_gp - 1)] * self.spline(1.0 - dg)
# print("{:5.2f}{:5.2f}{:8.3f}{:3d}{:8.3f}{:8.3f}{:8.3f} {:s}".format(self.capital_lambda, (self.capital_lambda%self.dis),
# (self.capital_lambda%self.dis)/self.gp_spacing, active_gp,
# self.gp_spacing*active_gp, dg, ene, np.array2string(self.mem)))
## * Trajectories
def get_trajs(self) -> Tuple[pd.DataFrame, Dict[int, pd.DataFrame]]:
"""
returns all Trajectories of this Ensemble.
Returns
-------
Tuple[pd.DataFrame, Dict[int, pd.DataFrame]]
Conveyor Belt Trajectory, Replica Trajectories.
"""
return self.get_conveyorbelt_trajectory(), self.get_replica_trajectories()
def get_conveyorbelt_trajectory(self) -> pd.DataFrame:
"""
get_conveyorbelt_trajectory returns the pandas DataFrame of the conveyorbelt trajectory
Returns
-------
pd.DataFrame
conveyorbelt_trajectory
"""
return self.exchange_information
def get_replica_trajectories(self) -> Dict[int, pd.DataFrame]:
"""
get_replica_trajectories
Returns
-------
Dict[int, pd.DataFrame]
trajectories of all replicas
"""
self.system_trajs = {}
for i in self.replicas:
self.system_trajs.update({i: self.replicas[i].trajectory})
return self.system_trajs
def clear_all_trajs(self) -> NoReturn:
"""
deletes trajectories of replicas
"""
self.system_trajs = {}
for i in self.replicas:
self.replicas[i].clear_trajectory()
self.exchange_information = pd.DataFrame(columns=["Step", "capital_lambda", "TotE", "biasE", "doAccept"])
def set_simulation_n_steps_between_trials(self, n_steps: int) -> NoReturn:
"""
Sets the integration steps of the replicas between a trail move.
Parameters
----------
n_steps:int
number of steps
"""
self.nSteps_between_trials = n_steps
for coord, replica in self.replicas.items():
replica.nsteps = self.nSteps_between_trials
@staticmethod
def spline(dg):
"""
calculates the value of the spline function depending on the deviation dg from the grid point
# Todo: PUT SOMEWHERE ELSE OR NUMPY? : numpy.interp¶.
Parameters
----------
dg:float
deviation from gridpoint (absolute value)
Returns
-------
float
value of spline (float)
"""
if dg < 0.0:
print('distance smaller than 0')
elif dg < 1.0:
return 1.0 - 3.0 * dg * dg + 2 * dg * dg * dg
else:
return 0.0
| 34.621381
| 134
| 0.562432
|
fd5be35da127e8cdc96477e659e58faefce78e35
| 6,881
|
py
|
Python
|
examples/rossman_reg.py
|
SimonCarozza/autolrn
|
d0875844a3e9b4fc22510ef320aa498e339b6192
|
[
"MIT"
] | null | null | null |
examples/rossman_reg.py
|
SimonCarozza/autolrn
|
d0875844a3e9b4fc22510ef320aa498e339b6192
|
[
"MIT"
] | null | null | null |
examples/rossman_reg.py
|
SimonCarozza/autolrn
|
d0875844a3e9b4fc22510ef320aa498e339b6192
|
[
"MIT"
] | null | null | null |
from autolrn.regression import param_grids_distros as pgd
from autolrn import auto_utils as au
from autolrn.regression import r_eval_utils as reu
from autolrn.regression import evaluate as eu
from autolrn.regression import train as tr
from pandas import read_csv
import numpy as np
from pkg_resources import resource_string
import os
from io import StringIO
from sklearn.model_selection import TimeSeriesSplit
from sklearn.dummy import DummyRegressor
if __name__ == "__main__":
seed = 7
np.random.seed(seed)
d_name = "rossmann"
# names = [
# "Store","DayOfWeek","Date","Sales","Customers","Open",
# "Promo","StateHoliday","SchoolHoliday"]
# load data
ross_bytes = resource_string(
"autolrn", os.path.join("datasets", 'rossman_store_train.csv'))
ross_file = StringIO(str(ross_bytes,'utf-8'))
# n_rows = 15000 # 5000, 7000, 10000, 25000, 50000
df = read_csv(
ross_file, delimiter=",",
# names=names,
parse_dates=['Date'],
# nrows = n_rows,
dtype={"StateHoliday": "category"})
print("Dataset shape:", df.shape)
print("Dataset types:\n: ", df.dtypes)
# statistical summary
description = df.describe()
print("Dataset description:\n", description)
print()
target = "Sales"
df.dropna(subset=[target], inplace=True)
print("Date column type", df["Date"].dtype)
print()
print("Open uniques:", df["Open"].unique())
print()
try:
df["Date"] = df["Date"].astype('datetime64[D]')
except Exception as e:
print(e)
df["Date"] = to_datetime(df["Date"], errors='coerce')
# use this if you took a random sample of full df
# df.sort_values(by=["Date"], inplace=True)
df["DayOfWeek"] = df["DayOfWeek"].astype(str)
df["Open"] = df["Open"].astype(str)
df["Promo"] = df["Promo"].astype(str)
df["StateHoliday"].replace(to_replace='0', value='n', inplace=True)
df["SchoolHoliday"] = df["SchoolHoliday"].astype(str)
df["Sales"] = df["Sales"].astype(str) # .astype(int)
print("After some processing")
print("DayOfWeek uniques:", df["DayOfWeek"].unique())
print("Open uniques:", df["Open"].unique())
print("Promo uniques:", df["Promo"].unique())
print("StateHoliday uniques:", df["StateHoliday"].unique())
print("SchoolHoliday uniques:", df["SchoolHoliday"].unique())
print()
print(df[df["Open"] == '0'].head())
print(len(df[df["Open"] == '0'].index))
scoring = "r2" # 'neg_mean_squared_error', 'r2'
# adding 'has_Sales' col
if (df["Open"] == '0').any():
# keeping zero sales records
df['has_Sales'] = 1
print("Some record has zero sale")
# you don't have a target in unseen data
# df['has_Sales'][df[target] == '0'] = 0
df.loc[df["Open"] == '0', 'has_Sales'] = 0
print()
print("df shape after little cleaning: ", df.shape)
X = df.drop([target], axis=1)
y = df[target]
print("X.dimensions: ", X.shape)
print("y.dtypes:", y.dtypes)
print()
print("Let's have a look at the first row and output")
print("X\n", X.head())
print("y\n", y.head())
###
best_attr = reu.best_regressor_attributes()
best_model_name, best_model, best_reg_score, best_reg_std = best_attr
test_size = .1
freqs = ['Year', 'Month', 'Day', 'Week']
encoding = 'le' # 'le', 'ohe'
splitted_data = reu.split_and_encode_Xy(
X, y,
encoding=encoding, freqs=freqs,
# dummy_cols=7, # 31, 52
test_size=test_size,
shuffle=False, scoring=scoring)
X_train, X_test, y_train, y_test = splitted_data["data"]
scaler, tgt_scaler = splitted_data["scalers"]
featselector = None
if 'f_selector' in splitted_data:
featselector = splitted_data["f_selector"]
print()
tscv = TimeSeriesSplit(n_splits=2)
# cv = tscv
estimators = dict(pgd.full_search_models_and_parameters)
# adding keras regressor to candidate estimators
input_dim = int(X_train.shape[1])
nb_epoch = au.select_nr_of_iterations('nn')
refit, nested_cv, tuning = eu.select_cv_process(cv_proc='cv')
print(
"refit:'%s', nested_cv:'%s', tuning:'%s'" % (refit, nested_cv, tuning))
if refit:
# This is gonna work even with no prior optimization
keras_reg_name = "KerasReg"
keras_nn_model, keras_param_grid = reu.create_best_keras_reg_architecture(
keras_reg_name, input_dim, nb_epoch, pgd.Keras_param_grid)
estimators[keras_reg_name] = (keras_nn_model, keras_param_grid)
else:
keras_regressors = reu.create_keras_regressors(input_dim, nb_epoch, batch_size=32)
estimators.update(keras_regressors)
print()
print("[task] === Model evaluation")
print("*** Best model %s has score %.3f +/- %.3f" % (
best_model_name, best_reg_score, best_reg_std))
print()
best_attr = eu.evaluate_regressor(
'DummyReg', DummyRegressor(strategy="median"),
X_train, y_train, tscv, scoring, best_attr, time_dep=True)
# cv_proc in ['cv', 'non_nested', 'nested']
refit, nested_cv, tuning = eu.select_cv_process(cv_proc='cv')
best_model_name, _, _ , _ = eu.get_best_regressor_attributes(
X_train, y_train, estimators, best_attr, scoring,
refit=refit, nested_cv=nested_cv,
cv=tscv, time_dep=True, random_state=seed)
time_dep=True
print("set time_dep=%s" % time_dep)
# train phase, no saving: scaler and featselector are useless
tested = tr.train_test_process(
best_model_name, estimators, X_train, X_test, y_train, y_test,
scaler=scaler, y_scaler=tgt_scaler, feat_selector=featselector,
tuning=tuning, cv=tscv, scoring=scoring, random_state=seed,
time_dep=time_dep)
# if a best model has been successfully tested, proceed to full training
# for prediction on unseen data
if tested:
print()
print("[task] === Train %s for predictions on unseen data" % best_model_name)
X_train = X
y_train = y
del X, y
encoded_data = reu.split_and_encode_Xy(
X_train, y_train, encoding='le', feat_select=True, enc_Xy=True,
scoring=scoring)
X_train, _, y_train, _ = encoded_data["data"]
scaler, tgt_scaler = encoded_data["scalers"]
featselector = None
if 'f_selector' in splitted_data:
featselector = splitted_data["f_selector"]
tr.train_test_process(
best_model_name, estimators, X_train, X_test, y_train, y_test,
scaler=scaler, y_scaler=tgt_scaler, feat_selector=featselector,
tuning=tuning, cv=tscv, scoring=scoring, random_state=seed,
time_dep=time_dep, test_phase=False, d_name=d_name)
print()
print("End of program\n")
| 31.277273
| 90
| 0.640459
|
b0d271f6204b0472f966a499a1ca109a2bf2a0a1
| 11,994
|
py
|
Python
|
docs/conf.py
|
BrainModes/fmriprep
|
50e6dc739be3d67bf63ab7418eb80738fde7d59b
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
BrainModes/fmriprep
|
50e6dc739be3d67bf63ab7418eb80738fde7d59b
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
BrainModes/fmriprep
|
50e6dc739be3d67bf63ab7418eb80738fde7d59b
|
[
"BSD-3-Clause"
] | 1
|
2021-03-22T10:59:59.000Z
|
2021-03-22T10:59:59.000Z
|
# -*- coding: utf-8 -*-
#
# fmriprep documentation build configuration file, created by
# sphinx-quickstart on Mon May 9 09:04:25 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
from sphinx import __version__ as sphinxversion
from packaging import version as pver # Avoid distutils.LooseVersion which is deprecated
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath("sphinxext"))
sys.path.insert(0, os.path.abspath("../wrapper"))
from github_link import make_linkcode_resolve
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.linkcode",
"sphinxarg.ext", # argparse extension
"nipype.sphinxext.plot_workflow",
"nbsphinx",
"sphinxcontrib.napoleon",
]
# Mock modules in autodoc:
autodoc_mock_imports = [
"numpy",
"nitime",
"matplotlib",
]
if pver.parse(sphinxversion) >= pver.parse("1.7.0"):
autodoc_mock_imports += [
"pandas",
"nilearn",
"seaborn",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Accept custom section names to be parsed for numpy-style docstrings
# of parameters.
# Requires pinning sphinxcontrib-napoleon to a specific commit while
# https://github.com/sphinx-contrib/napoleon/pull/10 is merged.
napoleon_use_param = False
napoleon_custom_sections = [
("Inputs", "Parameters"),
("Outputs", "Parameters"),
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "fmriprep"
author = "The fMRIPrep developers"
copyright = "2016-%s, %s" % (datetime.now().year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "version"
# The full version, including alpha/beta/rc tags.
release = "version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# html_title = u'fmriprep vversion'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g., ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "fmriprepdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "fmriprep.tex", "fMRIprep Documentation",
author,
"manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "fmriprep", "fmriprep Documentation",
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, "fmriprep", "fMRIprep Documentation",
author, "fmriprep", "One line description of project.",
"Miscellaneous"),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve("fmriprep",
"https://github.com/poldracklab/"
"fmriprep/blob/{revision}/"
"{package}/{path}#L{lineno}")
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://docs.scipy.org/doc/numpy", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"matplotlib": ("https://matplotlib.org/", None),
"bids": ("https://bids-standard.github.io/pybids/", None),
"nibabel": ("https://nipy.org/nibabel/", None),
"nipype": ("https://nipype.readthedocs.io/en/latest/", None),
"niworkflows": ("https://www.nipreps.org/niworkflows/", None),
"sdcflows": ("https://www.nipreps.org/sdcflows/", None),
"smriprep": ("https://poldracklab.github.io/smriprep/", None),
"templateflow": ("https://www.templateflow.org/python-client", None),
}
suppress_warnings = ["image.nonlocal_uri"]
def setup(app):
app.add_css_file("theme_overrides.css")
# We need this for the boilerplate script
app.add_js_file("https://cdn.rawgit.com/chrisfilo/zenodo.js/v0.1/zenodo.js")
| 33.502793
| 89
| 0.701267
|
79232ea3a4d5948b82480dc6e30eced57f3b9896
| 737
|
py
|
Python
|
config/trades.py
|
ashwinath/stocks-graph
|
de92ef613f597e4dabba3226a70194000fd2ae70
|
[
"Apache-2.0"
] | null | null | null |
config/trades.py
|
ashwinath/stocks-graph
|
de92ef613f597e4dabba3226a70194000fd2ae70
|
[
"Apache-2.0"
] | null | null | null |
config/trades.py
|
ashwinath/stocks-graph
|
de92ef613f597e4dabba3226a70194000fd2ae70
|
[
"Apache-2.0"
] | null | null | null |
import os
import yaml
from google.protobuf import json_format
from generated.proto.trades_pb2 import TradeHistory
from generated.proto.config_pb2 import Config
from typing import List
def get_all_trade_configs(config: Config) -> List[TradeHistory]:
all_trade_histories = []
for dir_path, dir_names, file_names in os.walk(config.trades.folder):
for file_name in file_names:
file_path = os.path.join(dir_path, file_name)
with open(file_path, "r") as file:
trade_history = json_format.ParseDict(
yaml.safe_load(file),
TradeHistory(),
)
all_trade_histories.append(trade_history)
return all_trade_histories
| 33.5
| 73
| 0.671642
|
40a8499e2082b72622267136776b5c8a09b62ded
| 1,167
|
py
|
Python
|
tools/linter_lib/pyflakes.py
|
Debilski/zulip
|
ff4b5d8ce699d43ffc648986354592235274b70c
|
[
"Apache-2.0"
] | 1
|
2020-03-17T10:29:40.000Z
|
2020-03-17T10:29:40.000Z
|
tools/linter_lib/pyflakes.py
|
Debilski/zulip
|
ff4b5d8ce699d43ffc648986354592235274b70c
|
[
"Apache-2.0"
] | null | null | null |
tools/linter_lib/pyflakes.py
|
Debilski/zulip
|
ff4b5d8ce699d43ffc648986354592235274b70c
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
import argparse
from typing import List
from zulint.linters import run_pyflakes
def check_pyflakes(files, options):
# type: (List[str], argparse.Namespace) -> bool
suppress_patterns = [
("scripts/lib/pythonrc.py", "imported but unused"),
# Intentionally imported by zerver/lib/webhooks/common.py
('', "'zerver.lib.exceptions.UnexpectedWebhookEventType' imported but unused"),
# Our ipython startup pythonrc file intentionally imports *
("scripts/lib/pythonrc.py",
" import *' used; unable to detect undefined names"),
# Special dev_settings.py import
('', "from .prod_settings_template import *"),
("settings.py", "settings import *' used; unable to detect undefined names"),
("settings.py", "may be undefined, or defined from star imports"),
# Sphinx adds `tags` specially to the environment when running conf.py.
("docs/conf.py", "undefined name 'tags'"),
]
if options.full:
suppress_patterns = []
return run_pyflakes(files, options, suppress_patterns)
| 33.342857
| 87
| 0.67952
|
357d7ed23b58f87700bd8cf08dcb764532771fc7
| 11,231
|
py
|
Python
|
Phase_2/pytools-master/ycsb/ycsb.py
|
manu2504/WANCom
|
8acdffb947d36fa7d7c8c78cbe9c3b06864128a6
|
[
"MIT"
] | null | null | null |
Phase_2/pytools-master/ycsb/ycsb.py
|
manu2504/WANCom
|
8acdffb947d36fa7d7c8c78cbe9c3b06864128a6
|
[
"MIT"
] | null | null | null |
Phase_2/pytools-master/ycsb/ycsb.py
|
manu2504/WANCom
|
8acdffb947d36fa7d7c8c78cbe9c3b06864128a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import pandas as pd
import numpy as np
import logging
import subprocess
import time
import os
import psutil
import random
import itertools
import multiprocessing
import copy
from pytools.common.common import remove_repeated_spaces
from pytools.ycsb.parsing.parse_ycsb_file import cpp_parse_ycsb_file
from pytools.ycsb.ycsb_common import YCSB_MASK, PARSED_FILE_EXTENSION, PARSING_VALID_PREFIXES, \
DEFAULT_YCSB_COLUMNS
random.seed(time.time())
def verify_token_read(tokens, column_order, dtypes_dic, allowed_strings=["READ", "RawREAD"]):
res = []
try:
for i, column in enumerate(column_order):
v = -1
if dtypes_dic[column] == np.uint16:
v = np.uint64(tokens[i])
if v > np.iinfo(np.uint16).max:
raise RuntimeError("Detected overflow in np.uint16")
elif dtypes_dic[column] == np.uint32:
v = np.uint64(tokens[i])
if v > np.iinfo(np.uint32).max:
raise RuntimeError("Detected overflow in np.uint32")
elif dtypes_dic[column] == np.uint64:
v = np.uint64(tokens[i])
elif dtypes_dic[column] == str:
v = str(tokens[i])
if v not in allowed_strings:
raise RuntimeError("Not allowed string [%s]", v)
else:
assert False, "Cannot verify column data type [%s]" % column
res.append(v)
except:
# print tokens, res
return False
return True
def raw_reparse_singular(lines, column_order, dtypes, verify_function, separator=","):
""" This function attempts to deal with all sort of issues, occured due to parallel
unsyncrhonized write to a single file, ie., we can have something like:
5 49178 8 1490978084662906109 252
5532796 32819 88 14909780846629834721 241
58
5 49166 8 1490978084663144067 264"""
print "Processing [%d lines]" % len(lines)
removed_lines = []
saved_lines = []
for line in lines:
tokens = remove_repeated_spaces(line).rstrip().split(separator)
if len(tokens) == len(dtypes) \
and verify_function(tokens, column_order, dtypes):
saved_lines.append(remove_repeated_spaces(line))
else:
removed_lines.append(line)
logging.info("Number of corrupted lines found [%i]", len(removed_lines))
# for l in removed_lines:
# logging.info("Corrupted line removed [%s]", l.rstrip())
return saved_lines
def raw_reparse_singular_wrapper(args):
return raw_reparse_singular(*args)
def raw_reparse(ifile, ofile, column_order, dtypes, verify_function, separator=","):
""" This function attempts to deal with all sort of issues, occured due to parallel
unsyncrhonized write to a single file, ie., we can have something like:
5 49178 8 1490978084662906109 252
5532796 32819 88 14909780846629834721 241
58
5 49166 8 1490978084663144067 264"""
removed_lines = []
with open(ifile) as f:
content = f.readlines()
n = psutil.cpu_count()
step = int(len(content) / n)
intervals = []
for i in range(0, n):
if i+1 == n:
logging.info("%i) %i -> %i", i, i*step, len(content))
intervals.append( (i*step, len(content)) )
else:
logging.info("%i) %i -> %i", i, i*step, (i+1)*step)
intervals.append( (i*step, (i+1)*step) )
print "STEPS ", intervals
subs = []
for i in intervals:
subs.append(copy.deepcopy(content[ i[0] : i[1] ]))
# with open(ofile, "w") as of:
# for line in content:
# tokens = remove_repeated_spaces(line).rstrip().split(separator)
# if len(tokens) == len(dtypes) \
# and verify_function(tokens, column_order, dtypes):
# of.write(remove_repeated_spaces(line)+"\n")
# else:
# removed_lines.append(line)
# logging.info("Number of corrupted lines found [%i]", len(removed_lines))
# # for l in removed_lines:
# # logging.info("Corrupted line removed [%s]", l.rstrip())
# column_order, dtypes, verify_function, separator=","
pool = multiprocessing.Pool(processes=n)
results = pool.map(raw_reparse_singular_wrapper,
itertools.izip(
subs,
itertools.repeat(column_order),
itertools.repeat(dtypes),
itertools.repeat(verify_function),
itertools.repeat(separator)
)
)
pool.close()
pool.join()
with open(ofile, "w") as of:
for r in results:
print "Results length [%d]" % len(r)
for l in r:
of.write("%s\n"%l)
def load_ycsb_raw_2ts(ycsb_file, nrows2read=None):
assert False, "Deprecated use load_ycsb_raw_2_ts"
# logging.info("Loading raw [%s]", ycsb_file)
# tmp_ycsb = "%s.%s" % (ycsb_file, parallel_fast_parse)
# raw_reparse(ycsb_file, tmp_ycsb,
# column_order=["opcode", "timestamp", "latency_us"],
# dtypes={ "opcode":str, "timestamp":np.uint64, "latency_us":np.uint32},
# verify_function=verify_token_read
# )
# print "LOADING from ", tmp_ycsb
# return l_load_ycsb_raw_2ts(tmp_ycsb, nrows2read)
def l_load_ycsb_raw_2ts(ycsb_file, nrows2read=None):
assert False, "Deprecated use load_ycsb_raw_2_df/ts"
# logging.error("FUNCTION l_load_ycsb_raw_2ts is deprecated. use load_ycsb_raw_2_ts instead!")
# df = pd.read_csv(ycsb_file, sep=",", names=["opcode", "time", "latency"],
# usecols=["opcode", "time", "latency"], error_bad_lines=False, nrows=nrows2read)
# logging.info("Read values %s" % (len(df)))
# # Removing all junk columns
# df1 = df[df["opcode"] == "READ"]
# df2 = df[df["opcode"] == "RawREAD"]
# if len(df1) > len(df2):
# df = df1
# else:
# df = df2
# logging.info("After filtering %s" % (len(df)))
# df["latency"] = df["latency"].astype(np.uint64)
# df["time"] = df["time"].astype(np.float64) * 1000.0 * 1000
# df['time'] = pd.to_datetime(df['time'])
# logging.info("Converting dataframe to pd.Series...")
# return pd.Series(np.asarray(df["latency"]), index=df["time"])
def convert_df_column_ms2datetime(df, src_column_ms="timestamp_ms", dst_column="datetimestamp"):
""" Function takes a dataframe, then takes src_column and create dst_column that contains
datetime timestamp """
df[dst_column] = df[src_column_ms] * 1000.0 * 1000
df[dst_column] = pd.to_datetime(df[dst_column])
return df
def bash_filter_pattern(
ifile, tmp_folder="/tmp/", allowed_patterns=["READ"], excluded_patterns=["Intended"]):
tmp_file = "%s.%d" % (os.path.basename(ifile), random.randint(0,10000))
tmp_file = os.path.join(tmp_folder, tmp_file)
grep_string = '| grep -E "'
for pattern in allowed_patterns:
grep_string = "%s%s|" % (grep_string, pattern)
grep_string = grep_string[:-1]
grep_string += '"'
exclude_string = ""
if excluded_patterns:
exclude_string = '| grep -v "'
for pattern in excluded_patterns:
exclude_string = "%s%s|" % (exclude_string, pattern)
exclude_string = exclude_string[:-1]
exclude_string += '"'
cmd = "cat {ifile} {grep} {exclude} > {tmp_file}".format(
ifile=ifile, grep=grep_string, tmp_file=tmp_file, exclude=exclude_string)
logging.info("Executing [%s]", cmd)
subprocess.check_call(cmd, shell=True)
logging.info("Bash filter complete")
return tmp_file
####################################################################################################
## YCSB --> TO DATAFRAMES
####################################################################################################
# RAW
def load_ycsb_raw_2_df(ycsb_file, columns=DEFAULT_YCSB_COLUMNS, nrows2read=None):
"""This function parses out """
out_file_name = "%s.%s" % (ycsb_file, PARSED_FILE_EXTENSION)
cpp_parse_ycsb_file(
ycsb_file, out_file_name,
valid_prefixes=PARSING_VALID_PREFIXES)
df = load_ycsb_parsed_2_df(out_file_name, columns, nrows2read)
return df
# PARSED
def load_ycsb_parsed_2_df(
parsed_ycsb_file, columns=DEFAULT_YCSB_COLUMNS, nrows2read=None):
""" Reads parsed YCSB file of the format
...
READ,1503069594211,1919843
READ,1503069594211,1844322
READ,1503069594211,1844352
...
"""
df = pd.read_csv(parsed_ycsb_file,
names=columns,
dtype={"opcode":str, "timestamp_ms":np.uint64, "latency_us":np.uint64},
usecols=[0,1,2],
nrows=nrows2read)
return df
# SKIP FRONT HEAD
def skip_df_head(df, skip_first_mins=1, timestamp_column="timestamp"):
""" Function takes a dataframe and a column that is converted to a datetime format, based on
this column initial samples are skipped """
starting_period = pd.Period(df[timestamp_column][0], "S")
offset_period = starting_period + 60 * skip_first_mins
logging.info("DF start %s, offset to %s",
df[timestamp_column][0], offset_period)
shortened_df = df[df[timestamp_column] > offset_period.to_timestamp()]
logging.info("First %s minutes was skipped, Total length reduction %s %s prcnt",
skip_first_mins,
len(df)-len(shortened_df),
(len(df)-len(shortened_df)) / (len(df)/100)
)
return shortened_df
####################################################################################################
## YCSB --> TO TIME SERIES
####################################################################################################
# RAW
def load_ycsb_raw_2_ts(ycsb_file, nrows2read=None):
"""This function parses out """
out_file_name = "%s.%s" % (ycsb_file, PARSED_FILE_EXTENSION)
cpp_parse_ycsb_file(
ycsb_file, out_file_name,
valid_prefixes=["READ", "UPDATE", "RawREAD", "RawUPDATE"])
ts = load_ycsb_parsed_2_ts(out_file_name, DEFAULT_YCSB_COLUMNS, nrows2read)
return ts
# PARSED
def load_ycsb_parsed_2_ts(parsed_ycsb_file, columns=DEFAULT_YCSB_COLUMNS, nrows2read=None):
df = load_ycsb_parsed_2_df(parsed_ycsb_file, columns, nrows2read)
df = convert_df_column_ms2datetime(df)
# ts = pd.Series(np.asarray(df["latency_us"]), index=df["datetimestamp"])
ts = convert_parsed_df_to_ts(df)
return ts
def convert_parsed_df_to_ts(df, latency_column="latency_us", timestamp_column="datetimestamp"):
return pd.Series(np.asarray(df[latency_column]), index=df[timestamp_column])
# SKIP FRONT HEAD
def skip_ts_head(ts, skip_first_mins=1):
starting_period = pd.Period(ts.index[0], "S")
offset_period = starting_period + 60 * skip_first_mins
logging.info("TS start %s, ts end %s, offset to %s",
ts.index[0], ts.index[-1], offset_period)
shortened_ts = ts[offset_period.to_timestamp():]
logging.info("First %s minutes was skipped, Total length reduction %s %s prcnt",
skip_first_mins,
len(ts)-len(shortened_ts),
(len(ts)-len(shortened_ts)) / (len(ts)/100)
)
assert len(shortened_ts)>0, "Too few samples!"
return shortened_ts
| 31.63662
| 100
| 0.622474
|
41657f26ba81951574a8e78009912a30c0ca3da3
| 293
|
py
|
Python
|
tests/utests/ofagent/test_loxi_ofp13_util.py
|
jonohart/voltha
|
87314cd53cb4c61e7e62b0ed3fc6da94603cc507
|
[
"Apache-2.0"
] | null | null | null |
tests/utests/ofagent/test_loxi_ofp13_util.py
|
jonohart/voltha
|
87314cd53cb4c61e7e62b0ed3fc6da94603cc507
|
[
"Apache-2.0"
] | null | null | null |
tests/utests/ofagent/test_loxi_ofp13_util.py
|
jonohart/voltha
|
87314cd53cb4c61e7e62b0ed3fc6da94603cc507
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase, main
import loxi.of13 as ofp
class TestConection_mgr(TestCase):
def test_bitmap_to_version(self):
bitmaps = [18]
versions = ofp.util.bitmap_to_version(bitmaps)
self.assertEqual(versions,[1,4])
if __name__ == '__main__':
main()
| 22.538462
| 54
| 0.692833
|
f80c5d18eb2db35906ad9b87261fb789d1761e20
| 5,425
|
py
|
Python
|
python/ray/serve/benchmarks/microbenchmark.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 22
|
2018-05-08T05:52:34.000Z
|
2020-04-01T10:09:55.000Z
|
python/ray/serve/benchmarks/microbenchmark.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 73
|
2021-09-25T07:11:39.000Z
|
2022-03-26T07:10:59.000Z
|
python/ray/serve/benchmarks/microbenchmark.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 10
|
2018-04-27T10:50:59.000Z
|
2020-02-24T02:41:43.000Z
|
# Runs several scenarios with varying max batch size, max concurrent queries,
# number of replicas, and with intermediate serve handles (to simulate ensemble
# models) either on or off.
import aiohttp
import asyncio
import logging
import time
import requests
import numpy as np
import ray
from ray import serve
logger = logging.getLogger(__file__)
NUM_CLIENTS = 8
CALLS_PER_BATCH = 100
async def timeit(name, fn, multiplier=1):
# warmup
start = time.time()
while time.time() - start < 1:
await fn()
# real run
stats = []
for _ in range(4):
start = time.time()
count = 0
while time.time() - start < 2:
await fn()
count += 1
end = time.time()
stats.append(multiplier * count / (end - start))
logger.info(
"\t{} {} +- {} requests/s".format(
name, round(np.mean(stats), 2), round(np.std(stats), 2)
)
)
return round(np.mean(stats), 2)
async def fetch(session, data):
async with session.get("http://localhost:8000/api", data=data) as response:
response = await response.text()
assert response == "ok", response
@ray.remote
class Client:
def __init__(self):
self.session = aiohttp.ClientSession()
def ready(self):
return "ok"
async def do_queries(self, num, data):
for _ in range(num):
await fetch(self.session, data)
async def trial(
result_json,
intermediate_handles,
num_replicas,
max_batch_size,
max_concurrent_queries,
data_size,
):
trial_key_base = (
f"replica:{num_replicas}/batch_size:{max_batch_size}/"
f"concurrent_queries:{max_concurrent_queries}/"
f"data_size:{data_size}/intermediate_handle:{intermediate_handles}"
)
logger.info(
f"intermediate_handles={intermediate_handles},"
f"num_replicas={num_replicas},"
f"max_batch_size={max_batch_size},"
f"max_concurrent_queries={max_concurrent_queries},"
f"data_size={data_size}"
)
deployment_name = "api"
if intermediate_handles:
deployment_name = "downstream"
@serve.deployment(name="api", max_concurrent_queries=1000)
class ForwardActor:
def __init__(self):
self.handle = None
async def __call__(self, req):
if self.handle is None:
self.handle = serve.get_deployment(deployment_name).get_handle(
sync=False
)
obj_ref = await self.handle.remote(req)
return await obj_ref
ForwardActor.deploy()
routes = requests.get("http://localhost:8000/-/routes").json()
assert "/api" in routes, routes
@serve.deployment(
name=deployment_name,
num_replicas=num_replicas,
max_concurrent_queries=max_concurrent_queries,
)
class D:
@serve.batch(max_batch_size=max_batch_size)
async def batch(self, reqs):
return [b"ok"] * len(reqs)
async def __call__(self, req):
if max_batch_size > 1:
return await self.batch(req)
else:
return b"ok"
D.deploy()
routes = requests.get("http://localhost:8000/-/routes").json()
assert f"/{deployment_name}" in routes, routes
if data_size == "small":
data = None
elif data_size == "large":
data = b"a" * 1024 * 1024
else:
raise ValueError("data_size should be 'small' or 'large'.")
async with aiohttp.ClientSession() as session:
async def single_client():
for _ in range(CALLS_PER_BATCH):
await fetch(session, data)
single_client_avg_tps = await timeit(
"single client {} data".format(data_size),
single_client,
multiplier=CALLS_PER_BATCH,
)
key = "num_client:1/" + trial_key_base
result_json.update({key: single_client_avg_tps})
clients = [Client.remote() for _ in range(NUM_CLIENTS)]
ray.get([client.ready.remote() for client in clients])
async def many_clients():
ray.get([a.do_queries.remote(CALLS_PER_BATCH, data) for a in clients])
multi_client_avg_tps = await timeit(
"{} clients {} data".format(len(clients), data_size),
many_clients,
multiplier=CALLS_PER_BATCH * len(clients),
)
key = f"num_client:{len(clients)}/" + trial_key_base
result_json.update({key: multi_client_avg_tps})
logger.info(result_json)
async def main():
result_json = {}
for intermediate_handles in [False, True]:
for num_replicas in [1, 8]:
for max_batch_size, max_concurrent_queries in [
(1, 1),
(1, 10000),
(10000, 10000),
]:
# TODO(edoakes): large data causes broken pipe errors.
for data_size in ["small"]:
await trial(
result_json,
intermediate_handles,
num_replicas,
max_batch_size,
max_concurrent_queries,
data_size,
)
return result_json
if __name__ == "__main__":
ray.init()
serve.start()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 28.255208
| 83
| 0.58894
|
66a0cef7dd8cdf1cb5b4047ad80d8ce57fc138f6
| 44,300
|
py
|
Python
|
frappe/model/document.py
|
smehata/frappe
|
99b4825a319b3e7474d83ab8d3be6e854e8fd7e0
|
[
"MIT"
] | null | null | null |
frappe/model/document.py
|
smehata/frappe
|
99b4825a319b3e7474d83ab8d3be6e854e8fd7e0
|
[
"MIT"
] | null | null | null |
frappe/model/document.py
|
smehata/frappe
|
99b4825a319b3e7474d83ab8d3be6e854e8fd7e0
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import hashlib
import json
import time
from werkzeug.exceptions import NotFound
import frappe
from frappe import _, msgprint, is_whitelisted
from frappe.utils import flt, cstr, now, get_datetime_str, file_lock, date_diff
from frappe.model.base_document import BaseDocument, get_controller
from frappe.model.naming import set_new_name, validate_name
from frappe.model.docstatus import DocStatus
from frappe.model import optional_fields, table_fields
from frappe.model.workflow import validate_workflow
from frappe.model.workflow import set_workflow_state_on_action
from frappe.utils.global_search import update_global_search
from frappe.integrations.doctype.webhook import run_webhooks
from frappe.desk.form.document_follow import follow_document
from frappe.core.doctype.server_script.server_script_utils import run_server_script_for_doc_event
from frappe.utils.data import get_absolute_url
# once_only validation
# methods
def get_doc(*args, **kwargs):
"""returns a frappe.model.Document object.
:param arg1: Document dict or DocType name.
:param arg2: [optional] document name.
:param for_update: [optional] select document for update.
There are multiple ways to call `get_doc`
# will fetch the latest user object (with child table) from the database
user = get_doc("User", "test@example.com")
# create a new object
user = get_doc({
"doctype":"User"
"email_id": "test@example.com",
"roles: [
{"role": "System Manager"}
]
})
# create new object with keyword arguments
user = get_doc(doctype='User', email_id='test@example.com')
# select a document for update
user = get_doc("User", "test@example.com", for_update=True)
"""
if args:
if isinstance(args[0], BaseDocument):
# already a document
return args[0]
elif isinstance(args[0], str):
doctype = args[0]
elif isinstance(args[0], dict):
# passed a dict
kwargs = args[0]
else:
raise ValueError('First non keyword argument must be a string or dict')
if len(args) < 2 and kwargs:
if 'doctype' in kwargs:
doctype = kwargs['doctype']
else:
raise ValueError('"doctype" is a required key')
controller = get_controller(doctype)
if controller:
return controller(*args, **kwargs)
raise ImportError(doctype)
class Document(BaseDocument):
"""All controllers inherit from `Document`."""
def __init__(self, *args, **kwargs):
"""Constructor.
:param arg1: DocType name as string or document **dict**
:param arg2: Document name, if `arg1` is DocType name.
If DocType name and document name are passed, the object will load
all values (including child documents) from the database.
"""
self.doctype = self.name = None
self._default_new_docs = {}
self.flags = frappe._dict()
if args and args[0] and isinstance(args[0], str):
# first arugment is doctype
if len(args)==1:
# single
self.doctype = self.name = args[0]
else:
self.doctype = args[0]
if isinstance(args[1], dict):
# filter
self.name = frappe.db.get_value(args[0], args[1], "name")
if self.name is None:
frappe.throw(_("{0} {1} not found").format(_(args[0]), args[1]),
frappe.DoesNotExistError)
else:
self.name = args[1]
if 'for_update' in kwargs:
self.flags.for_update = kwargs.get('for_update')
self.load_from_db()
return
if args and args[0] and isinstance(args[0], dict):
# first argument is a dict
kwargs = args[0]
if kwargs:
# init base document
super(Document, self).__init__(kwargs)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise ValueError('Illegal arguments')
@staticmethod
def whitelist(fn):
"""Decorator: Whitelist method to be called remotely via REST API."""
frappe.whitelist()(fn)
return fn
def reload(self):
"""Reload document from database"""
self.load_from_db()
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1, for_update=self.flags.for_update)
if not d:
frappe.throw(_("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
super(Document, self).__init__(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import DOCTYPE_TABLE_FIELDS
table_fields = DOCTYPE_TABLE_FIELDS
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
def get_latest(self):
if not getattr(self, "latest", None):
self.latest = frappe.get_doc(self.doctype, self.name)
return self.latest
def check_permission(self, permtype='read', permlevel=None):
"""Raise `frappe.PermissionError` if not permitted"""
if not self.has_permission(permtype):
self.raise_no_permission_to(permlevel or permtype)
def has_permission(self, permtype="read", verbose=False):
"""Call `frappe.has_permission` if `self.flags.ignore_permissions`
is not set.
:param permtype: one of `read`, `write`, `submit`, `cancel`, `delete`"""
import frappe.permissions
if self.flags.ignore_permissions:
return True
return frappe.permissions.has_permission(self.doctype, permtype, self, verbose=verbose)
def raise_no_permission_to(self, perm_type):
"""Raise `frappe.PermissionError`."""
frappe.flags.error_message = _('Insufficient Permission for {0}').format(self.doctype)
raise frappe.PermissionError
def insert(self, ignore_permissions=None, ignore_links=None, ignore_if_duplicate=False,
ignore_mandatory=None, set_name=None, set_child_names=True):
"""Insert the document in the database (as a new document).
This will check for user permissions and execute `before_insert`,
`validate`, `on_update`, `after_insert` methods if they are written.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions is not None:
self.flags.ignore_permissions = ignore_permissions
if ignore_links is not None:
self.flags.ignore_links = ignore_links
if ignore_mandatory is not None:
self.flags.ignore_mandatory = ignore_mandatory
self.set("__islocal", True)
self._set_defaults()
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self._validate_links()
self.check_permission("create")
self.run_method("before_insert")
self.set_new_name(set_name=set_name, set_child_names=set_child_names)
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.flags.in_insert = True
self.run_before_save_methods()
self._validate()
self.set_docstatus()
self.flags.in_insert = False
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
try:
self.db_insert()
except frappe.DuplicateEntryError as e:
if not ignore_if_duplicate:
raise e
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.flags.in_insert = True
if self.get("amended_from"):
self.copy_attachments_from_amended_from()
# flag to prevent creation of event update log for create and update both
# during document creation
self.flags.update_log_for_doc_creation = True
self.run_post_save_methods()
self.flags.in_insert = False
# delete __islocal
if hasattr(self, "__islocal"):
delattr(self, "__islocal")
# clear unsaved flag
if hasattr(self, "__unsaved"):
delattr(self, "__unsaved")
if not (frappe.flags.in_migrate or frappe.local.flags.in_install or frappe.flags.in_setup_wizard):
follow_document(self.doctype, self.name, frappe.session.user)
return self
def save(self, *args, **kwargs):
"""Wrapper for _save"""
return self._save(*args, **kwargs)
def _save(self, ignore_permissions=None, ignore_version=None):
"""Save the current document in the database in the **DocType**'s table or
`tabSingles` (for single types).
This will check for user permissions and execute
`validate` before updating, `on_update` after updating triggers.
:param ignore_permissions: Do not check permissions if True.
:param ignore_version: Do not save version if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions is not None:
self.flags.ignore_permissions = ignore_permissions
self.flags.ignore_version = frappe.flags.in_test if ignore_version is None else ignore_version
if self.get("__islocal") or not self.get("name"):
return self.insert()
self.check_permission("write", "save")
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.set_parent_in_children()
self.set_name_in_children()
self.validate_higher_perm_levels()
self._validate_links()
self.run_before_save_methods()
if self._action != "cancel":
self._validate()
if self._action == "update_after_submit":
self.validate_update_after_submit()
self.set_docstatus()
# parent
if self.meta.issingle:
self.update_single(self.get_valid_dict())
else:
self.db_update()
self.update_children()
self.run_post_save_methods()
# clear unsaved flag
if hasattr(self, "__unsaved"):
delattr(self, "__unsaved")
return self
def copy_attachments_from_amended_from(self):
"""Copy attachments from `amended_from`"""
from frappe.desk.form.load import get_attachments
#loop through attachments
for attach_item in get_attachments(self.doctype, self.amended_from):
#save attachments to new doc
_file = frappe.get_doc({
"doctype": "File",
"file_url": attach_item.file_url,
"file_name": attach_item.file_name,
"attached_to_name": self.name,
"attached_to_doctype": self.doctype,
"folder": "Home/Attachments"})
_file.save()
def update_children(self):
"""update child tables"""
for df in self.meta.get_table_fields():
self.update_child_table(df.fieldname, df)
def update_child_table(self, fieldname, df=None):
"""sync child table for given fieldname"""
rows = []
if not df:
df = self.meta.get_field(fieldname)
for d in self.get(df.fieldname):
d.db_update()
rows.append(d.name)
if df.options in (self.flags.ignore_children_type or []):
# do not delete rows for this because of flags
# hack for docperm :(
return
if rows:
# select rows that do not match the ones in the document
deleted_rows = frappe.db.sql("""select name from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s
and name not in ({1})""".format(df.options, ','.join(['%s'] * len(rows))),
[self.name, self.doctype, fieldname] + rows)
if len(deleted_rows) > 0:
# delete rows that do not match the ones in the document
frappe.db.delete(df.options, {"name": ("in", tuple(row[0] for row in deleted_rows))})
else:
# no rows found, delete all rows
frappe.db.delete(df.options, {
"parent": self.name,
"parenttype": self.doctype,
"parentfield": fieldname
})
def get_doc_before_save(self):
return getattr(self, '_doc_before_save', None)
def has_value_changed(self, fieldname):
'''Returns true if value is changed before and after saving'''
previous = self.get_doc_before_save()
return previous.get(fieldname)!=self.get(fieldname) if previous else True
def set_new_name(self, force=False, set_name=None, set_child_names=True):
"""Calls `frappe.naming.set_new_name` for parent and child docs."""
if self.flags.name_set and not force:
return
# If autoname has set as Prompt (name)
if self.get("__newname"):
self.name = validate_name(self.doctype, self.get("__newname"))
self.flags.name_set = True
return
if set_name:
self.name = validate_name(self.doctype, set_name)
else:
set_new_name(self)
if set_child_names:
# set name for children
for d in self.get_all_children():
set_new_name(d)
self.flags.name_set = True
def get_title(self):
"""Get the document title based on title_field or `title` or `name`"""
return self.get(self.meta.get_title_field())
def set_title_field(self):
"""Set title field based on template"""
def get_values():
values = self.as_dict()
# format values
for key, value in values.items():
if value is None:
values[key] = ""
return values
if self.meta.get("title_field")=="title":
df = self.meta.get_field(self.meta.title_field)
if df.options:
self.set(df.fieldname, df.options.format(**get_values()))
elif self.is_new() and not self.get(df.fieldname) and df.default:
# set default title for new transactions (if default)
self.set(df.fieldname, df.default.format(**get_values()))
def update_single(self, d):
"""Updates values for Single type Document in `tabSingles`."""
frappe.db.delete("Singles", {
"doctype": self.doctype
})
for field, value in d.items():
if field != "doctype":
frappe.db.sql("""insert into `tabSingles` (doctype, field, value)
values (%s, %s, %s)""", (self.doctype, field, value))
if self.doctype in frappe.db.value_cache:
del frappe.db.value_cache[self.doctype]
def set_user_and_timestamp(self):
self._original_modified = self.modified
self.modified = now()
self.modified_by = frappe.session.user
# We'd probably want the creation and owner to be set via API
# or Data import at some point, that'd have to be handled here
if self.is_new() and not (frappe.flags.in_patch or frappe.flags.in_migrate):
self.creation = self.modified
self.owner = self.modified_by
for d in self.get_all_children():
d.modified = self.modified
d.modified_by = self.modified_by
if not d.owner:
d.owner = self.owner
if not d.creation:
d.creation = self.creation
frappe.flags.currently_saving.append((self.doctype, self.name))
def set_docstatus(self):
if self.docstatus is None:
self.docstatus = DocStatus.draft()
for d in self.get_all_children():
d.docstatus = self.docstatus
def _validate(self):
self._validate_mandatory()
self._validate_data_fields()
self._validate_selects()
self._validate_non_negative()
self._validate_length()
self._validate_code_fields()
self._extract_images_from_text_editor()
self._sanitize_content()
self._save_passwords()
self.validate_workflow()
children = self.get_all_children()
for d in children:
d._validate_data_fields()
d._validate_selects()
d._validate_non_negative()
d._validate_length()
d._validate_code_fields()
d._extract_images_from_text_editor()
d._sanitize_content()
d._save_passwords()
if self.is_new():
# don't set fields like _assign, _comments for new doc
for fieldname in optional_fields:
self.set(fieldname, None)
else:
self.validate_set_only_once()
def _validate_non_negative(self):
def get_msg(df):
if self.get("parentfield"):
return "{} {} #{}: {} {}".format(frappe.bold(_(self.doctype)),
_("Row"), self.idx, _("Value cannot be negative for"), frappe.bold(_(df.label)))
else:
return _("Value cannot be negative for {0}: {1}").format(_(df.parent), frappe.bold(_(df.label)))
for df in self.meta.get('fields', {'non_negative': ('=', 1),
'fieldtype': ('in', ['Int', 'Float', 'Currency'])}):
if flt(self.get(df.fieldname)) < 0:
msg = get_msg(df)
frappe.throw(msg, frappe.NonNegativeError, title=_("Negative Value"))
def validate_workflow(self):
"""Validate if the workflow transition is valid"""
if frappe.flags.in_install == 'frappe': return
workflow = self.meta.get_workflow()
if workflow:
validate_workflow(self)
if not self._action == 'save':
set_workflow_state_on_action(self, workflow, self._action)
def validate_set_only_once(self):
"""Validate that fields are not changed if not in insert"""
set_only_once_fields = self.meta.get_set_only_once_fields()
if set_only_once_fields and self._doc_before_save:
# document exists before saving
for field in set_only_once_fields:
fail = False
value = self.get(field.fieldname)
original_value = self._doc_before_save.get(field.fieldname)
if field.fieldtype in table_fields:
fail = not self.is_child_table_same(field.fieldname)
elif field.fieldtype in ('Date', 'Datetime', 'Time'):
fail = str(value) != str(original_value)
else:
fail = value != original_value
if fail:
frappe.throw(
_("Value cannot be changed for {0}").format(
frappe.bold(self.meta.get_label(field.fieldname))
),
exc=frappe.CannotChangeConstantError
)
return False
def is_child_table_same(self, fieldname):
"""Validate child table is same as original table before saving"""
value = self.get(fieldname)
original_value = self._doc_before_save.get(fieldname)
same = True
if len(original_value) != len(value):
same = False
else:
# check all child entries
for i, d in enumerate(original_value):
new_child = value[i].as_dict(convert_dates_to_str = True)
original_child = d.as_dict(convert_dates_to_str = True)
# all fields must be same other than modified and modified_by
for key in ('modified', 'modified_by', 'creation'):
del new_child[key]
del original_child[key]
if original_child != new_child:
same = False
break
return same
def apply_fieldlevel_read_permissions(self):
"""Remove values the user is not allowed to read (called when loading in desk)"""
if frappe.session.user == "Administrator":
return
has_higher_permlevel = False
all_fields = self.meta.fields.copy()
for table_field in self.meta.get_table_fields():
all_fields += frappe.get_meta(table_field.options).fields or []
for df in all_fields:
if df.permlevel > 0:
has_higher_permlevel = True
break
if not has_higher_permlevel:
return
has_access_to = self.get_permlevel_access('read')
for df in self.meta.fields:
if df.permlevel and not df.permlevel in has_access_to:
self.set(df.fieldname, None)
for table_field in self.meta.get_table_fields():
for df in frappe.get_meta(table_field.options).fields or []:
if df.permlevel and not df.permlevel in has_access_to:
for child in self.get(table_field.fieldname) or []:
child.set(df.fieldname, None)
def validate_higher_perm_levels(self):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
if self.flags.ignore_permissions or frappe.flags.in_install:
return
if frappe.session.user == "Administrator":
return
has_access_to = self.get_permlevel_access()
high_permlevel_fields = self.meta.get_high_permlevel_fields()
if high_permlevel_fields:
self.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
# If new record then don't reset the values for child table
if self.is_new(): return
# check for child tables
for df in self.meta.get_table_fields():
high_permlevel_fields = frappe.get_meta(df.options).get_high_permlevel_fields()
if high_permlevel_fields:
for d in self.get(df.fieldname):
d.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
def get_permlevel_access(self, permission_type='write'):
if not hasattr(self, "_has_access_to"):
self._has_access_to = {}
self._has_access_to[permission_type] = []
roles = frappe.get_roles()
for perm in self.get_permissions():
if perm.role in roles and perm.get(permission_type):
if perm.permlevel not in self._has_access_to[permission_type]:
self._has_access_to[permission_type].append(perm.permlevel)
return self._has_access_to[permission_type]
def has_permlevel_access_to(self, fieldname, df=None, permission_type='read'):
if not df:
df = self.meta.get_field(fieldname)
return df.permlevel in self.get_permlevel_access(permission_type)
def get_permissions(self):
if self.meta.istable:
# use parent permissions
permissions = frappe.get_meta(self.parenttype).permissions
else:
permissions = self.meta.permissions
return permissions
def _set_defaults(self):
if frappe.flags.in_import:
return
new_doc = frappe.new_doc(self.doctype, as_dict=True)
self.update_if_missing(new_doc)
# children
for df in self.meta.get_table_fields():
new_doc = frappe.new_doc(df.options, as_dict=True)
value = self.get(df.fieldname)
if isinstance(value, list):
for d in value:
d.update_if_missing(new_doc)
def check_if_latest(self):
"""Checks if `modified` timestamp provided by document being updated is same as the
`modified` timestamp in the database. If there is a different, the document has been
updated in the database after the current copy was read. Will throw an error if
timestamps don't match.
Will also validate document transitions (Save > Submit > Cancel) calling
`self.check_docstatus_transition`."""
conflict = False
self._action = "save"
if not self.get('__islocal') and not self.meta.get('is_virtual'):
if self.meta.issingle:
modified = frappe.db.sql("""select value from tabSingles
where doctype=%s and field='modified' for update""", self.doctype)
modified = modified and modified[0][0]
if modified and modified != cstr(self._original_modified):
conflict = True
else:
tmp = frappe.db.sql("""select modified, docstatus from `tab{0}`
where name = %s for update""".format(self.doctype), self.name, as_dict=True)
if not tmp:
frappe.throw(_("Record does not exist"))
else:
tmp = tmp[0]
modified = cstr(tmp.modified)
if modified and modified != cstr(self._original_modified):
conflict = True
self.check_docstatus_transition(tmp.docstatus)
if conflict:
frappe.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.modified)) \
+ _("Please refresh to get the latest document."),
raise_exception=frappe.TimestampMismatchError)
else:
self.check_docstatus_transition(0)
def check_docstatus_transition(self, to_docstatus):
"""Ensures valid `docstatus` transition.
Valid transitions are (number in brackets is `docstatus`):
- Save (0) > Save (0)
- Save (0) > Submit (1)
- Submit (1) > Submit (1)
- Submit (1) > Cancel (2)
"""
if not self.docstatus:
self.docstatus = DocStatus.draft()
if to_docstatus == DocStatus.draft():
if self.docstatus.is_draft():
self._action = "save"
elif self.docstatus.is_submitted():
self._action = "submit"
self.check_permission("submit")
elif self.docstatus.is_cancelled():
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 0 (Draft) to 2 (Cancelled)"))
else:
raise frappe.ValidationError(_("Invalid docstatus"), self.docstatus)
elif to_docstatus == DocStatus.submitted():
if self.docstatus.is_submitted():
self._action = "update_after_submit"
self.check_permission("submit")
elif self.docstatus.is_cancelled():
self._action = "cancel"
self.check_permission("cancel")
elif self.docstatus.is_draft():
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 1 (Submitted) to 0 (Draft)"))
else:
raise frappe.ValidationError(_("Invalid docstatus"), self.docstatus)
elif to_docstatus == DocStatus.cancelled():
raise frappe.ValidationError(_("Cannot edit cancelled document"))
def set_parent_in_children(self):
"""Updates `parent` and `parenttype` property in all children."""
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype
def set_name_in_children(self):
# Set name for any new children
for d in self.get_all_children():
if not d.name:
set_new_name(d)
def validate_update_after_submit(self):
if self.flags.ignore_validate_update_after_submit:
return
self._validate_update_after_submit()
for d in self.get_all_children():
if d.is_new() and self.meta.get_field(d.parentfield).allow_on_submit:
# in case of a new row, don't validate allow on submit, if table is allow on submit
continue
d._validate_update_after_submit()
# TODO check only allowed values are updated
def _validate_mandatory(self):
if self.flags.ignore_mandatory:
return
missing = self._get_missing_mandatory_fields()
for d in self.get_all_children():
missing.extend(d._get_missing_mandatory_fields())
if not missing:
return
for fieldname, msg in missing:
msgprint(msg)
if frappe.flags.print_messages:
print(self.as_json().encode("utf-8"))
raise frappe.MandatoryError('[{doctype}, {name}]: {fields}'.format(
fields=", ".join((each[0] for each in missing)),
doctype=self.doctype,
name=self.name))
def _validate_links(self):
if self.flags.ignore_links or self._action == "cancel":
return
invalid_links, cancelled_links = self.get_invalid_links()
for d in self.get_all_children():
result = d.get_invalid_links(is_submittable=self.meta.is_submittable)
invalid_links.extend(result[0])
cancelled_links.extend(result[1])
if invalid_links:
msg = ", ".join((each[2] for each in invalid_links))
frappe.throw(_("Could not find {0}").format(msg),
frappe.LinkValidationError)
if cancelled_links:
msg = ", ".join((each[2] for each in cancelled_links))
frappe.throw(_("Cannot link cancelled document: {0}").format(msg),
frappe.CancelledLinkError)
def get_all_children(self, parenttype=None):
"""Returns all children documents from **Table** type field in a list."""
ret = []
for df in self.meta.get("fields", {"fieldtype": ['in', table_fields]}):
if parenttype:
if df.options==parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def run_method(self, method, *args, **kwargs):
"""run standard triggers, plus those in hooks"""
if "flags" in kwargs:
del kwargs["flags"]
if hasattr(self, method) and hasattr(getattr(self, method), "__call__"):
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
else:
# hack! to run hooks even if method does not exist
fn = lambda self, *args, **kwargs: None
fn.__name__ = str(method)
out = Document.hook(fn)(self, *args, **kwargs)
self.run_notifications(method)
run_webhooks(self, method)
run_server_script_for_doc_event(self, method)
return out
def run_trigger(self, method, *args, **kwargs):
return self.run_method(method, *args, **kwargs)
def run_notifications(self, method):
"""Run notifications for this method"""
if (frappe.flags.in_import and frappe.flags.mute_emails) or frappe.flags.in_patch or frappe.flags.in_install:
return
if self.flags.notifications_executed is None:
self.flags.notifications_executed = []
from frappe.email.doctype.notification.notification import evaluate_alert
if self.flags.notifications is None:
alerts = frappe.cache().hget('notifications', self.doctype)
if alerts is None:
alerts = frappe.get_all('Notification', fields=['name', 'event', 'method'],
filters={'enabled': 1, 'document_type': self.doctype})
frappe.cache().hset('notifications', self.doctype, alerts)
self.flags.notifications = alerts
if not self.flags.notifications:
return
def _evaluate_alert(alert):
if not alert.name in self.flags.notifications_executed:
evaluate_alert(self, alert.name, alert.event)
self.flags.notifications_executed.append(alert.name)
event_map = {
"on_update": "Save",
"after_insert": "New",
"on_submit": "Submit",
"on_cancel": "Cancel"
}
if not self.flags.in_insert:
# value change is not applicable in insert
event_map['on_change'] = 'Value Change'
for alert in self.flags.notifications:
event = event_map.get(method, None)
if event and alert.event == event:
_evaluate_alert(alert)
elif alert.event=='Method' and method == alert.method:
_evaluate_alert(alert)
@whitelist.__func__
def _submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self.docstatus = DocStatus.submitted()
return self.save()
@whitelist.__func__
def _cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves.
"""
self.docstatus = DocStatus.cancelled()
return self.save()
@whitelist.__func__
def submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
return self._submit()
@whitelist.__func__
def cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
return self._cancel()
def delete(self, ignore_permissions=False):
"""Delete document."""
frappe.delete_doc(self.doctype, self.name, ignore_permissions = ignore_permissions, flags=self.flags)
def run_before_save_methods(self):
"""Run standard methods before `INSERT` or `UPDATE`. Standard Methods are:
- `validate`, `before_save` for **Save**.
- `validate`, `before_submit` for **Submit**.
- `before_cancel` for **Cancel**
- `before_update_after_submit` for **Update after Submit**
Will also update title_field if set"""
self.load_doc_before_save()
self.reset_seen()
# before_validate method should be executed before ignoring validations
if self._action in ("save", "submit"):
self.run_method("before_validate")
if self.flags.ignore_validate:
return
if self._action=="save":
self.run_method("validate")
self.run_method("before_save")
elif self._action=="submit":
self.run_method("validate")
self.run_method("before_submit")
elif self._action=="cancel":
self.run_method("before_cancel")
elif self._action=="update_after_submit":
self.run_method("before_update_after_submit")
self.set_title_field()
def load_doc_before_save(self):
"""Save load document from db before saving"""
self._doc_before_save = None
if not self.is_new():
try:
self._doc_before_save = frappe.get_doc(self.doctype, self.name)
except frappe.DoesNotExistError:
self._doc_before_save = None
frappe.clear_last_message()
def run_post_save_methods(self):
"""Run standard methods after `INSERT` or `UPDATE`. Standard Methods are:
- `on_update` for **Save**.
- `on_update`, `on_submit` for **Submit**.
- `on_cancel` for **Cancel**
- `update_after_submit` for **Update after Submit**"""
doc_before_save = self.get_doc_before_save()
if self._action=="save":
self.run_method("on_update")
elif self._action=="submit":
self.run_method("on_update")
self.run_method("on_submit")
elif self._action=="cancel":
self.run_method("on_cancel")
self.check_no_back_links_exist()
elif self._action=="update_after_submit":
self.run_method("on_update_after_submit")
self.clear_cache()
self.notify_update()
update_global_search(self)
self.save_version()
self.run_method('on_change')
if (self.doctype, self.name) in frappe.flags.currently_saving:
frappe.flags.currently_saving.remove((self.doctype, self.name))
self.latest = None
def clear_cache(self):
frappe.clear_document_cache(self.doctype, self.name)
def reset_seen(self):
"""Clear _seen property and set current user as seen"""
if getattr(self.meta, 'track_seen', False):
frappe.db.set_value(self.doctype, self.name, "_seen", json.dumps([frappe.session.user]), update_modified=False)
def notify_update(self):
"""Publish realtime that the current document is modified"""
if frappe.flags.in_patch: return
frappe.publish_realtime("doc_update", {"modified": self.modified, "doctype": self.doctype, "name": self.name},
doctype=self.doctype, docname=self.name, after_commit=True)
if not self.meta.get("read_only") and not self.meta.get("issingle") and \
not self.meta.get("istable"):
data = {
"doctype": self.doctype,
"name": self.name,
"user": frappe.session.user
}
frappe.publish_realtime("list_update", data, after_commit=True)
def db_set(self, fieldname, value=None, update_modified=True, notify=False, commit=False):
"""Set a value in the document object, update the timestamp and update the database.
WARNING: This method does not trigger controller validations and should
be used very carefully.
:param fieldname: fieldname of the property to be updated, or a {"field":"value"} dictionary
:param value: value of the property to be updated
:param update_modified: default True. updates the `modified` and `modified_by` properties
:param notify: default False. run doc.notify_updated() to send updates via socketio
:param commit: default False. run frappe.db.commit()
"""
if isinstance(fieldname, dict):
self.update(fieldname)
else:
self.set(fieldname, value)
if update_modified and (self.doctype, self.name) not in frappe.flags.currently_saving:
# don't update modified timestamp if called from post save methods
# like on_update or on_submit
self.set("modified", now())
self.set("modified_by", frappe.session.user)
# load but do not reload doc_before_save because before_change or on_change might expect it
if not self.get_doc_before_save():
self.load_doc_before_save()
# to trigger notification on value change
self.run_method('before_change')
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
self.run_method('on_change')
if notify:
self.notify_update()
self.clear_cache()
if commit:
frappe.db.commit()
def db_get(self, fieldname):
"""get database value for this fieldname"""
return frappe.db.get_value(self.doctype, self.name, fieldname)
def check_no_back_links_exist(self):
"""Check if document links to any active document before Cancel."""
from frappe.model.delete_doc import check_if_doc_is_linked, check_if_doc_is_dynamically_linked
if not self.flags.ignore_links:
check_if_doc_is_linked(self, method="Cancel")
check_if_doc_is_dynamically_linked(self, method="Cancel")
def save_version(self):
"""Save version info"""
# don't track version under following conditions
if (not getattr(self.meta, 'track_changes', False)
or self.doctype == 'Version'
or self.flags.ignore_version
or frappe.flags.in_install
or (not self._doc_before_save and frappe.flags.in_patch)):
return
version = frappe.new_doc('Version')
if not self._doc_before_save:
version.for_insert(self)
version.insert(ignore_permissions=True)
elif version.set_diff(self._doc_before_save, self):
version.insert(ignore_permissions=True)
if not frappe.flags.in_migrate:
# follow since you made a change?
follow_document(self.doctype, self.name, frappe.session.user)
@staticmethod
def hook(f):
"""Decorator: Make method `hookable` (i.e. extensible by another app).
Note: If each hooked method returns a value (dict), then all returns are
collated in one dict and returned. Ideally, don't return values in hookable
methods, set properties in the document."""
def add_to_return_value(self, new_return_value):
if new_return_value is None:
self._return_value = self.get("_return_value")
return
if isinstance(new_return_value, dict):
if not self.get("_return_value"):
self._return_value = {}
self._return_value.update(new_return_value)
else:
self._return_value = new_return_value
def compose(fn, *hooks):
def runner(self, method, *args, **kwargs):
add_to_return_value(self, fn(self, *args, **kwargs))
for f in hooks:
add_to_return_value(self, f(self, method, *args, **kwargs))
return self._return_value
return runner
def composer(self, *args, **kwargs):
hooks = []
method = f.__name__
doc_events = frappe.get_doc_hooks()
for handler in doc_events.get(self.doctype, {}).get(method, []) \
+ doc_events.get("*", {}).get(method, []):
hooks.append(frappe.get_attr(handler))
composed = compose(f, *hooks)
return composed(self, method, *args, **kwargs)
return composer
def is_whitelisted(self, method_name):
method = getattr(self, method_name, None)
if not method:
raise NotFound("Method {0} not found".format(method_name))
is_whitelisted(getattr(method, '__func__', method))
def validate_value(self, fieldname, condition, val2, doc=None, raise_exception=None):
"""Check that value of fieldname should be 'condition' val2
else throw Exception."""
error_condition_map = {
"in": _("one of"),
"not in": _("none of"),
"^": _("beginning with"),
}
if not doc:
doc = self
val1 = doc.get_value(fieldname)
df = doc.meta.get_field(fieldname)
val2 = doc.cast(val2, df)
if not frappe.compare(val1, condition, val2):
label = doc.meta.get_label(fieldname)
condition_str = error_condition_map.get(condition, condition)
if doc.get("parentfield"):
msg = _("Incorrect value in row {0}: {1} must be {2} {3}").format(doc.idx, label, condition_str, val2)
else:
msg = _("Incorrect value: {0} must be {1} {2}").format(label, condition_str, val2)
# raise passed exception or True
msgprint(msg, raise_exception=raise_exception or True)
def validate_table_has_rows(self, parentfield, raise_exception=None):
"""Raise exception if Table field is empty."""
if not (isinstance(self.get(parentfield), list) and len(self.get(parentfield)) > 0):
label = self.meta.get_label(parentfield)
frappe.throw(_("Table {0} cannot be empty").format(label), raise_exception or frappe.EmptyTableError)
def round_floats_in(self, doc, fieldnames=None):
"""Round floats for all `Currency`, `Float`, `Percent` fields for the given doc.
:param doc: Document whose numeric properties are to be rounded.
:param fieldnames: [Optional] List of fields to be rounded."""
if not fieldnames:
fieldnames = (df.fieldname for df in
doc.meta.get("fields", {"fieldtype": ["in", ["Currency", "Float", "Percent"]]}))
for fieldname in fieldnames:
doc.set(fieldname, flt(doc.get(fieldname), self.precision(fieldname, doc.get("parentfield"))))
def get_url(self):
"""Returns Desk URL for this document."""
return get_absolute_url(self.doctype, self.name)
def add_comment(self, comment_type='Comment', text=None, comment_email=None, link_doctype=None, link_name=None, comment_by=None):
"""Add a comment to this document.
:param comment_type: e.g. `Comment`. See Communication for more info."""
out = frappe.get_doc({
"doctype":"Comment",
'comment_type': comment_type,
"comment_email": comment_email or frappe.session.user,
"comment_by": comment_by,
"reference_doctype": self.doctype,
"reference_name": self.name,
"content": text or comment_type,
"link_doctype": link_doctype,
"link_name": link_name
}).insert(ignore_permissions=True)
return out
def add_seen(self, user=None):
"""add the given/current user to list of users who have seen this document (_seen)"""
if not user:
user = frappe.session.user
if self.meta.track_seen:
_seen = self.get('_seen') or []
_seen = frappe.parse_json(_seen)
if user not in _seen:
_seen.append(user)
frappe.db.set_value(self.doctype, self.name, '_seen', json.dumps(_seen), update_modified=False)
frappe.local.flags.commit = True
def add_viewed(self, user=None):
"""add log to communication when a user views a document"""
if not user:
user = frappe.session.user
if hasattr(self.meta, 'track_views') and self.meta.track_views:
frappe.get_doc({
"doctype": "View Log",
"viewed_by": frappe.session.user,
"reference_doctype": self.doctype,
"reference_name": self.name,
}).insert(ignore_permissions=True)
frappe.local.flags.commit = True
def get_signature(self):
"""Returns signature (hash) for private URL."""
return hashlib.sha224(get_datetime_str(self.creation).encode()).hexdigest()
def get_liked_by(self):
liked_by = getattr(self, "_liked_by", None)
if liked_by:
return json.loads(liked_by)
else:
return []
def set_onload(self, key, value):
if not self.get("__onload"):
self.set("__onload", frappe._dict())
self.get("__onload")[key] = value
def get_onload(self, key=None):
if not key:
return self.get("__onload", frappe._dict())
return self.get('__onload')[key]
def queue_action(self, action, **kwargs):
"""Run an action in background. If the action has an inner function,
like _submit for submit, it will call that instead"""
# call _submit instead of submit, so you can override submit to call
# run_delayed based on some action
# See: Stock Reconciliation
from frappe.utils.background_jobs import enqueue
if hasattr(self, '_' + action):
action = '_' + action
if file_lock.lock_exists(self.get_signature()):
frappe.throw(_('This document is currently queued for execution. Please try again'),
title=_('Document Queued'))
self.lock()
enqueue('frappe.model.document.execute_action', doctype=self.doctype, name=self.name,
action=action, **kwargs)
def lock(self, timeout=None):
"""Creates a lock file for the given document. If timeout is set,
it will retry every 1 second for acquiring the lock again
:param timeout: Timeout in seconds, default 0"""
signature = self.get_signature()
if file_lock.lock_exists(signature):
lock_exists = True
if timeout:
for i in range(timeout):
time.sleep(1)
if not file_lock.lock_exists(signature):
lock_exists = False
break
if lock_exists:
raise frappe.DocumentLockedError
file_lock.create_lock(signature)
def unlock(self):
"""Delete the lock file for this document"""
file_lock.delete_lock(self.get_signature())
# validation helpers
def validate_from_to_dates(self, from_date_field, to_date_field):
"""
Generic validation to verify date sequence
"""
if date_diff(self.get(to_date_field), self.get(from_date_field)) < 0:
frappe.throw(_('{0} must be after {1}').format(
frappe.bold(self.meta.get_label(to_date_field)),
frappe.bold(self.meta.get_label(from_date_field)),
), frappe.exceptions.InvalidDates)
def get_assigned_users(self):
assigned_users = frappe.get_all('ToDo',
fields=['allocated_to'],
filters={
'reference_type': self.doctype,
'reference_name': self.name,
'status': ('!=', 'Cancelled'),
}, pluck='allocated_to')
users = set(assigned_users)
return users
def add_tag(self, tag):
"""Add a Tag to this document"""
from frappe.desk.doctype.tag.tag import DocTags
DocTags(self.doctype).add(self.name, tag)
def get_tags(self):
"""Return a list of Tags attached to this document"""
from frappe.desk.doctype.tag.tag import DocTags
return DocTags(self.doctype).get_tags(self.name).split(",")[1:]
def __repr__(self):
name = self.name or "unsaved"
doctype = self.__class__.__name__
docstatus = f" docstatus={self.docstatus}" if self.docstatus else ""
repr_str = f"<{doctype}: {name}{docstatus}"
if not hasattr(self, "parent"):
return repr_str + ">"
return f"{repr_str} parent={self.parent}>"
def __str__(self):
name = self.name or "unsaved"
doctype = self.__class__.__name__
return f"{doctype}({name})"
def execute_action(doctype, name, action, **kwargs):
"""Execute an action on a document (called by background worker)"""
doc = frappe.get_doc(doctype, name)
doc.unlock()
try:
getattr(doc, action)(**kwargs)
except Exception:
frappe.db.rollback()
# add a comment (?)
if frappe.local.message_log:
msg = json.loads(frappe.local.message_log[-1]).get('message')
else:
msg = '<pre><code>' + frappe.get_traceback() + '</pre></code>'
doc.add_comment('Comment', _('Action Failed') + '<br><br>' + msg)
doc.notify_update()
| 31.30742
| 130
| 0.717494
|
3e1be44bcd83841ad1a0b32fed1511f6ded23c5a
| 197
|
py
|
Python
|
bin/twigs/quasi-polytwigs-123-6x3-trapezoid-ring.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/twigs/quasi-polytwigs-123-6x3-trapezoid-ring.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/twigs/quasi-polytwigs-123-6x3-trapezoid-ring.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1
|
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
"""many solutions"""
import puzzler
from puzzler.puzzles.quasipolytwigs123 import QuasiPolytwigs123_6x3TrapezoidRing
puzzler.run(QuasiPolytwigs123_6x3TrapezoidRing)
| 19.7
| 80
| 0.822335
|
4b5d400a929094757db708d9021873d68ebd9b34
| 38,148
|
py
|
Python
|
conans/test/build_helpers/cmake_test.py
|
aharrison24/conan
|
d5197368d1bcf99241cad08c858f6a1471613471
|
[
"MIT"
] | null | null | null |
conans/test/build_helpers/cmake_test.py
|
aharrison24/conan
|
d5197368d1bcf99241cad08c858f6a1471613471
|
[
"MIT"
] | null | null | null |
conans/test/build_helpers/cmake_test.py
|
aharrison24/conan
|
d5197368d1bcf99241cad08c858f6a1471613471
|
[
"MIT"
] | null | null | null |
import os
import shutil
import stat
import sys
import unittest
import platform
from collections import namedtuple
from conans import tools
from conans.model.conan_file import ConanFile
from conans.model.settings import Settings
from conans.client.conf import default_settings_yml
from conans.client.build.cmake import CMake
from conans.test.utils.tools import TestBufferConanOutput
from conans.tools import cpu_count
from conans.util.files import save, load
from conans.test.utils.test_files import temp_folder
from conans.model.options import Options, PackageOptions
from conans.errors import ConanException
class CMakeTest(unittest.TestCase):
def setUp(self):
self.tempdir = temp_folder(path_with_spaces=False)
def tearDown(self):
shutil.rmtree(self.tempdir)
def config_patch_test(self):
conan_file = ConanFileMock()
conan_file.name = "MyPkg"
conan_file.settings = Settings()
conan_file.source_folder = os.path.join(self.tempdir, "src")
conan_file.build_folder = os.path.join(self.tempdir, "build")
conan_file.package_folder = os.path.join(self.tempdir, "pkg")
msg = "FOLDER: " + conan_file.package_folder
for folder in (conan_file.build_folder, conan_file.package_folder):
save(os.path.join(folder, "file1.cmake"), "Nothing")
save(os.path.join(folder, "file2"), msg)
save(os.path.join(folder, "file3.txt"), msg)
save(os.path.join(folder, "file3.cmake"), msg)
save(os.path.join(folder, "sub", "file3.cmake"), msg)
cmake = CMake(conan_file, generator="Unix Makefiles")
cmake.patch_config_paths()
for folder in (conan_file.build_folder, conan_file.package_folder):
self.assertEqual("Nothing", load(os.path.join(folder, "file1.cmake")))
self.assertEqual(msg, load(os.path.join(folder, "file2")))
self.assertEqual(msg, load(os.path.join(folder, "file3.txt")))
self.assertEqual("FOLDER: ${CONAN_MYPKG_ROOT}",
load(os.path.join(folder, "file3.cmake")))
self.assertEqual("FOLDER: ${CONAN_MYPKG_ROOT}",
load(os.path.join(folder, "sub", "file3.cmake")))
def partial_build_test(self):
conan_file = ConanFileMock()
conan_file.settings = Settings()
conan_file.should_configure = False
conan_file.should_build = False
conan_file.should_install = False
cmake = CMake(conan_file, generator="Unix Makefiles")
cmake.configure()
self.assertIsNone(conan_file.command)
cmake.build()
self.assertIsNone(conan_file.command)
cmake.install()
self.assertIsNone(conan_file.command)
conan_file.name = None
cmake.patch_config_paths()
def cmake_generator_test(self):
conan_file = ConanFileMock()
conan_file.settings = Settings()
with tools.environment_append({"CONAN_CMAKE_GENERATOR": "My CMake Generator"}):
cmake = CMake(conan_file)
self.assertIn('-G "My CMake Generator"', cmake.command_line)
def cmake_fpic_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "6.3"
settings.arch = "x86"
def assert_fpic(the_settings, input_shared, input_fpic, expected_option):
options = []
values = {}
if input_shared is not None:
options.append('"shared": [True, False]')
values["shared"] = input_shared
if input_fpic is not None:
options.append('"fPIC": [True, False]')
values["fPIC"] = input_fpic
conan_file = ConanFileMock(options='{%s}' % ", ".join(options),
options_values=values)
conan_file.settings = the_settings
cmake = CMake(conan_file)
cmake.configure()
if expected_option is not None:
self.assertEquals(cmake.definitions["CONAN_CMAKE_POSITION_INDEPENDENT_CODE"],
expected_option)
else:
self.assertNotIn("CONAN_CMAKE_POSITION_INDEPENDENT_CODE", cmake.definitions)
# Test shared=False and fpic=False
assert_fpic(settings, input_shared=False, input_fpic=False, expected_option="OFF")
# Test shared=True and fpic=False
assert_fpic(settings, input_shared=True, input_fpic=False, expected_option="ON")
# Test shared=True and fpic=True
assert_fpic(settings, input_shared=True, input_fpic=True, expected_option="ON")
# Test shared not defined and fpic=True
assert_fpic(settings, input_shared=None, input_fpic=True, expected_option="ON")
# Test shared not defined and fpic not defined
assert_fpic(settings, input_shared=None, input_fpic=None, expected_option=None)
# Test shared True and fpic not defined
assert_fpic(settings, input_shared=True, input_fpic=None, expected_option=None)
# Test nothing in Windows
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86_64"
assert_fpic(settings, input_shared=True, input_fpic=True, expected_option=None)
def cmake_make_program_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "6.3"
settings.arch = "x86"
settings.build_type = "Release"
conan_file = ConanFileMock()
conan_file.settings = settings
conan_file.source_folder = os.path.join(self.tempdir, "my_cache_source_folder")
conan_file.build_folder = os.path.join(self.tempdir, "my_cache_build_folder")
# Existing make
make_path = os.path.join(self.tempdir, "make")
save(make_path, "")
st = os.stat(make_path)
os.chmod(make_path, st.st_mode | stat.S_IEXEC)
with tools.environment_append({"CONAN_MAKE_PROGRAM": make_path}):
cmake = CMake(conan_file)
self.assertEquals(cmake.definitions["CMAKE_MAKE_PROGRAM"], make_path)
# Not existing make
with tools.environment_append({"CONAN_MAKE_PROGRAM": "fake_path/make"}):
cmake = CMake(conan_file)
self.assertNotIn("CMAKE_MAKE_PROGRAM", cmake.definitions)
self.assertIn("The specified make program 'fake_path/make' cannot be found", conan_file.output)
def folders_test(self):
def quote_var(var):
return "'%s'" % var if platform.system() != "Windows" else var
settings = Settings.loads(default_settings_yml)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "6.3"
settings.arch = "x86"
settings.build_type = "Release"
conan_file = ConanFileMock()
conan_file.settings = settings
conan_file.source_folder = os.path.join(self.tempdir, "my_cache_source_folder")
conan_file.build_folder = os.path.join(self.tempdir, "my_cache_build_folder")
with tools.chdir(self.tempdir):
cmake = CMake(conan_file)
cmake.configure(source_dir="../subdir", build_dir="build")
linux_stuff = '-DCMAKE_SYSTEM_NAME="Linux" ' \
'-DCMAKE_SYSROOT="/path/to/sysroot" ' if platform.system() != "Linux" else ""
generator = "MinGW Makefiles" if platform.system() == "Windows" else "Unix Makefiles"
base_cmd = ' && cmake -G "%s" -DCMAKE_BUILD_TYPE="Release" %s' \
'-DCONAN_EXPORTED="1" -DCONAN_COMPILER="gcc" ' \
'-DCONAN_COMPILER_VERSION="6.3" ' \
'-DCONAN_CXX_FLAGS="-m32" -DCONAN_SHARED_LINKER_FLAGS="-m32" ' \
'-DCONAN_C_FLAGS="-m32" -Wno-dev ' % (generator, linux_stuff)
build_expected = quote_var("build")
source_expected = quote_var("../subdir")
self.assertEquals(conan_file.command, 'cd %s' % build_expected + base_cmd + source_expected)
cmake.configure(build_dir="build")
build_expected = quote_var("build")
source_expected = quote_var(os.path.join(self.tempdir, "my_cache_source_folder"))
self.assertEquals(conan_file.command, 'cd %s' % build_expected + base_cmd + source_expected)
cmake.configure()
build_expected = quote_var(os.path.join(self.tempdir, "my_cache_build_folder"))
source_expected = quote_var(os.path.join(self.tempdir, "my_cache_source_folder"))
self.assertEquals(conan_file.command, 'cd %s' % build_expected + base_cmd + source_expected)
cmake.configure(source_folder="source", build_folder="build")
build_expected = quote_var(os.path.join(os.path.join(self.tempdir, "my_cache_build_folder", "build")))
source_expected = quote_var(os.path.join(os.path.join(self.tempdir, "my_cache_source_folder", "source")))
self.assertEquals(conan_file.command, 'cd %s' % build_expected + base_cmd + source_expected)
conan_file.in_local_cache = True
cmake.configure(source_folder="source", build_folder="build",
cache_build_folder="rel_only_cache")
build_expected = quote_var(os.path.join(self.tempdir, "my_cache_build_folder", "rel_only_cache"))
source_expected = quote_var(os.path.join(self.tempdir, "my_cache_source_folder", "source"))
self.assertEquals(conan_file.command, 'cd %s' % build_expected + base_cmd + source_expected)
conan_file.in_local_cache = False
cmake.configure(source_folder="source", build_folder="build",
cache_build_folder="rel_only_cache")
build_expected = quote_var(os.path.join(self.tempdir, "my_cache_build_folder", "build"))
source_expected = quote_var(os.path.join(self.tempdir, "my_cache_source_folder", "source"))
self.assertEquals(conan_file.command, 'cd %s' % build_expected + base_cmd + source_expected)
conan_file.in_local_cache = True
cmake.configure(build_dir="build", cache_build_folder="rel_only_cache")
build_expected = quote_var(os.path.join(self.tempdir, "my_cache_build_folder", "rel_only_cache"))
source_expected = quote_var(os.path.join(self.tempdir, "my_cache_source_folder"))
self.assertEquals(conan_file.command, 'cd %s' % build_expected + base_cmd + source_expected)
# Raise mixing
with self.assertRaisesRegexp(ConanException, "Use 'build_folder'/'source_folder'"):
cmake.configure(source_folder="source", build_dir="build")
def build_type_ovewrite_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "6.3"
settings.arch = "x86"
settings.build_type = "Release"
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
cmake.build_type = "Debug"
self.assertIn('WARN: Set CMake build type "Debug" is different than the '
'settings build_type "Release"', conan_file.output)
self.assertEquals(cmake.build_type, "Debug")
self.assertIn('-DCMAKE_BUILD_TYPE="Debug"', cmake.command_line)
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
self.assertNotIn('WARN: Set CMake build type ', conan_file.output)
self.assertEquals(cmake.build_type, "Release")
# Now with visual, (multiconfig)
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86"
settings.build_type = "Release"
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
cmake.build_type = "Debug"
self.assertIn('WARN: Set CMake build type "Debug" is different than the '
'settings build_type "Release"', conan_file.output)
self.assertEquals(cmake.build_type, "Debug")
self.assertNotIn('-DCMAKE_BUILD_TYPE="Debug"', cmake.command_line)
self.assertIn("--config Debug", cmake.build_config)
cmake = CMake(conan_file)
cmake.build_type = "Release"
self.assertIn("--config Release", cmake.build_config)
def loads_default_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
conan_file = ConanFileMock()
conan_file.settings = settings
def check(text, build_config, generator=None, set_cmake_flags=False):
os = str(settings.os)
os_ver = str(settings.os.version) if settings.get_safe('os.version') else None
for cmake_system_name in (True, False):
cross_ver = ("-DCMAKE_SYSTEM_VERSION=\"%s\" " % os_ver) if os_ver else ""
cross = ("-DCMAKE_SYSTEM_NAME=\"%s\" %s-DCMAKE_SYSROOT=\"/path/to/sysroot\" "
% ({"Macos": "Darwin"}.get(os, os), cross_ver)
if (platform.system() != os and cmake_system_name) else "")
cmake = CMake(conan_file, generator=generator, cmake_system_name=cmake_system_name,
set_cmake_flags=set_cmake_flags)
new_text = text.replace("-DCONAN_EXPORTED", "%s-DCONAN_EXPORTED" % cross)
if "Visual Studio" in text:
cores = ('-DCONAN_CXX_FLAGS="/MP{0}" '
'-DCONAN_C_FLAGS="/MP{0}" '.format(tools.cpu_count()))
new_text = new_text.replace("-Wno-dev", "%s-Wno-dev" % cores)
self.assertEqual(new_text, cmake.command_line)
self.assertEqual(build_config, cmake.build_config)
check('-G "Visual Studio 12 2013" -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" -Wno-dev',
"")
check('-G "Custom Generator" -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" -Wno-dev',
'', generator="Custom Generator")
check('-G "Custom Generator" -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" -Wno-dev',
'', generator="Custom Generator", set_cmake_flags=True)
settings.build_type = "Debug"
check('-G "Visual Studio 12 2013" -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" -Wno-dev',
'--config Debug')
settings.arch = "x86_64"
check('-G "Visual Studio 12 2013 Win64" -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" -Wno-dev',
'--config Debug')
settings.compiler = "gcc"
settings.compiler.version = "4.8"
generator = "MinGW Makefiles" if platform.system() == "Windows" else "Unix Makefiles"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug" -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="gcc" -DCONAN_COMPILER_VERSION="4.8" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" -Wno-dev' % generator, "")
settings.os = "Linux"
settings.arch = "x86"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_COMPILER="gcc" '
'-DCONAN_COMPILER_VERSION="4.8" -DCONAN_CXX_FLAGS="-m32" '
'-DCONAN_SHARED_LINKER_FLAGS="-m32" -DCONAN_C_FLAGS="-m32" -Wno-dev' % generator,
"")
settings.arch = "x86_64"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_COMPILER="gcc" '
'-DCONAN_COMPILER_VERSION="4.8" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" -Wno-dev' % generator,
"")
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_COMPILER="gcc" '
'-DCONAN_COMPILER_VERSION="4.8" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" '
'-DCMAKE_CXX_FLAGS="-m64" -DCMAKE_SHARED_LINKER_FLAGS="-m64" -DCMAKE_C_FLAGS="-m64" '
'-Wno-dev' % generator,
"", set_cmake_flags=True)
settings.os = "FreeBSD"
settings.compiler = "clang"
settings.compiler.version = "3.8"
settings.arch = "x86"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_COMPILER="clang" '
'-DCONAN_COMPILER_VERSION="3.8" -DCONAN_CXX_FLAGS="-m32" '
'-DCONAN_SHARED_LINKER_FLAGS="-m32" -DCONAN_C_FLAGS="-m32" -Wno-dev' % generator,
"")
settings.arch = "x86_64"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_COMPILER="clang" '
'-DCONAN_COMPILER_VERSION="3.8" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" -Wno-dev' % generator,
"")
settings.os = "SunOS"
settings.compiler = "sun-cc"
settings.compiler.version = "5.10"
settings.arch = "x86"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_COMPILER="sun-cc" '
'-DCONAN_COMPILER_VERSION="5.10" -DCONAN_CXX_FLAGS="-m32" '
'-DCONAN_SHARED_LINKER_FLAGS="-m32" -DCONAN_C_FLAGS="-m32" -Wno-dev' % generator,
"")
settings.arch = "x86_64"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_COMPILER="sun-cc" '
'-DCONAN_COMPILER_VERSION="5.10" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" -Wno-dev' % generator,
"")
settings.arch = "sparc"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug" -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="sun-cc" '
'-DCONAN_COMPILER_VERSION="5.10" -DCONAN_CXX_FLAGS="-m32" '
'-DCONAN_SHARED_LINKER_FLAGS="-m32" -DCONAN_C_FLAGS="-m32" -Wno-dev' % generator,
"")
settings.arch = "sparcv9"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug" -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="sun-cc" '
'-DCONAN_COMPILER_VERSION="5.10" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" -Wno-dev' % generator,
"")
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.os = "WindowsStore"
settings.os.version = "8.1"
settings.build_type = "Debug"
check('-G "Visual Studio 12 2013" -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" -Wno-dev',
"--config Debug")
settings.os.version = "10.0"
check('-G "Visual Studio 12 2013" -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" -Wno-dev',
"--config Debug")
def deleted_os_test(self):
partial_settings = """
os: [Linux]
arch: [x86_64]
compiler:
gcc:
version: ["4.9"]
build_type: [ Release]
"""
settings = Settings.loads(partial_settings)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "4.9"
settings.arch = "x86_64"
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
generator = "Unix" if platform.system() != "Windows" else "MinGW"
cross = "-DCMAKE_SYSTEM_NAME=\"Linux\" -DCMAKE_SYSROOT=\"/path/to/sysroot\" " if platform.system() != "Linux" else ""
self.assertEqual('-G "%s Makefiles" %s-DCONAN_EXPORTED="1" -DCONAN_COMPILER="gcc" '
'-DCONAN_COMPILER_VERSION="4.9" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" -Wno-dev' % (generator, cross),
cmake.command_line)
def test_sysroot(self):
settings = Settings.loads(default_settings_yml)
conan_file = ConanFileMock()
conan_file.settings = settings
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
settings.os = "Windows"
cmake = CMake(conan_file)
if platform.system() == "Windows":
self.assertNotIn("-DCMAKE_SYSROOT=", cmake.flags)
# Now activate cross build and check sysroot and system processor
with(tools.environment_append({"CONAN_CMAKE_SYSTEM_NAME": "Android",
"CONAN_CMAKE_SYSTEM_PROCESSOR": "somevalue"})):
cmake = CMake(conan_file)
self.assertEquals(cmake.definitions["CMAKE_SYSROOT"], "/path/to/sysroot")
self.assertEquals(cmake.definitions["CMAKE_SYSTEM_PROCESSOR"], "somevalue")
def test_deprecated_behaviour(self):
""""Remove when deprecate the old settings parameter to CMake and
conanfile to configure/build/test"""
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
conan_file = ConanFileMock()
conan_file.settings = settings
with self.assertRaises(ConanException):
CMake(settings)
def test_cores_ancient_visual(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "9"
settings.compiler.runtime = "MDd"
settings.arch = "x86"
settings.build_type = None
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
cmake.build()
self.assertNotIn("/m", conan_file.command)
settings.compiler.version = "10"
cmake = CMake(conan_file)
cmake.build()
self.assertIn("/m", conan_file.command)
def convenient_functions_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Android"
settings.os_build = "Windows" # Here we are declaring we are cross building
settings.compiler = "gcc"
settings.compiler.version = "5.4"
settings.arch = "armv7"
settings.build_type = None
if platform.system() == 'Windows':
dot_dir = "."
tempdir = self.tempdir
else:
dot_dir = "'.'"
tempdir = "'" + self.tempdir + "'"
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
cross = '-DCMAKE_SYSTEM_NAME="Android"' \
' -DCMAKE_SYSROOT="/path/to/sysroot"' \
' -DCMAKE_ANDROID_ARCH_ABI="armeabi-v7a"'
target_test = CMakeTest.scape('--target test')
cmake.configure()
self.assertEqual('cd {0} && cmake -G "MinGW Makefiles" '
'{1} -DCONAN_EXPORTED="1"'
' -DCONAN_COMPILER="gcc" -DCONAN_COMPILER_VERSION="5.4" '
'-Wno-dev {0}'.format(dot_dir, cross),
conan_file.command)
cmake.build()
self.assertEqual('cmake --build %s %s' %
(dot_dir, (CMakeTest.scape('-- -j%i' % cpu_count()))), conan_file.command)
cmake.test()
self.assertEqual('cmake --build %s %s %s' %
(dot_dir, target_test, (CMakeTest.scape('-- -j%i' % cpu_count()))),
conan_file.command)
settings.build_type = "Debug"
cmake = CMake(conan_file)
cmake.build()
self.assertEqual('cmake --build %s %s' %
(dot_dir, (CMakeTest.scape('-- -j%i' % cpu_count()))), conan_file.command)
cmake.test()
self.assertEqual('cmake --build %s %s %s' %
(dot_dir, target_test, (CMakeTest.scape('-- -j%i' % cpu_count()))),
conan_file.command)
cmake.configure(source_dir="/source", build_dir=self.tempdir,
args=['--foo "bar"'], defs={"SHARED": True})
if sys.platform == 'win32':
escaped_args = r'"--foo \"bar\"" -DSHARED="True" /source'
else:
escaped_args = "'--foo \"bar\"' -DSHARED=\"True\" '/source'"
self.assertEqual('cd %s && cmake -G "MinGW Makefiles" -DCMAKE_BUILD_TYPE="Debug" '
'%s -DCONAN_EXPORTED="1" '
'-DCONAN_COMPILER="gcc" -DCONAN_COMPILER_VERSION="5.4" '
'-Wno-dev %s' % (tempdir, cross, escaped_args),
conan_file.command)
cmake.build(args=["--bar 'foo'"], target="install")
if platform.system() == 'Windows':
escaped_args = '--target install "--bar \'foo\'"'
else:
escaped_args = r"'--target' 'install' '--bar '\''foo'\'''"
self.assertEqual('cmake --build %s %s %s'
% (tempdir, escaped_args, (CMakeTest.scape('-- -j%i' % cpu_count()))),
conan_file.command)
cmake.test(args=["--bar 'foo'"])
if sys.platform == 'win32':
escaped_args = '%s "--bar \'foo\'"' % target_test
else:
escaped_args = r"%s '--bar '\''foo'\'''" % target_test
self.assertEqual('cmake --build %s %s %s' %
(tempdir, escaped_args, (CMakeTest.scape('-- -j%i' % cpu_count()))),
conan_file.command)
settings.build_type = "Release"
cmake = CMake(conan_file)
cmake.build()
self.assertEqual('cmake --build %s %s' %
(dot_dir, (CMakeTest.scape('-- -j%i' % cpu_count()))),
conan_file.command)
cmake.test()
self.assertEqual('cmake --build %s %s %s'
% (dot_dir, target_test, (CMakeTest.scape('-- -j%i' % cpu_count()))),
conan_file.command)
cmake.build(build_dir=self.tempdir)
self.assertEqual('cmake --build %s %s'
% (tempdir, (CMakeTest.scape('-- -j%i' % cpu_count()))),
conan_file.command)
cmake.test(build_dir=self.tempdir)
self.assertEqual('cmake --build %s %s %s'
% (tempdir, target_test, (CMakeTest.scape('-- -j%i' % cpu_count()))),
conan_file.command)
settings.compiler = "gcc"
settings.compiler.version = "5.4"
cmake = CMake(conan_file)
cmake.build()
self.assertEqual('cmake --build %s' % (CMakeTest.scape('. -- -j%i' % cpu_count())),
conan_file.command)
cmake.test()
self.assertEqual('cmake --build '
'%s' % (CMakeTest.scape('. --target test -- -j%i' % cpu_count())),
conan_file.command)
cmake.build(args=['foo', '--', 'bar'])
self.assertEqual('cmake --build %s' % (CMakeTest.scape('. foo -- bar -j%i' % cpu_count())),
conan_file.command)
cmake.test(args=['foo', '--', 'bar'])
self.assertEqual('cmake --build '
'%s' % (CMakeTest.scape('. --target test foo -- bar -j%i' % cpu_count())),
conan_file.command)
cmake = CMake(conan_file, parallel=False)
cmake.build()
self.assertEqual('cmake --build %s' % CMakeTest.scape('.'), conan_file.command)
cmake.test()
self.assertEqual('cmake --build %s' % CMakeTest.scape('. --target test'),
conan_file.command)
def test_run_tests(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "14"
settings.compiler.runtime = "MDd"
settings.arch = "x86"
settings.build_type = None
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
cmake.test()
self.assertIn('cmake --build '
'%s' % CMakeTest.scape('. --target RUN_TESTS -- /m:%i' % cpu_count()),
conan_file.command)
cmake.generator = "Ninja Makefiles"
cmake.test()
self.assertEqual('cmake --build '
'%s' % CMakeTest.scape('. --target test -- -j%i' % cpu_count()),
conan_file.command)
cmake.generator = "NMake Makefiles"
cmake.test()
self.assertEqual('cmake --build '
'%s' % CMakeTest.scape('. --target test'),
conan_file.command)
def test_clean_sh_path(self):
if platform.system() != "Windows":
return
os.environ["PATH"] = os.environ.get("PATH", "") + os.pathsep + self.tempdir
save(os.path.join(self.tempdir, "sh.exe"), "Fake sh")
conanfile = ConanFileMock()
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
conanfile.settings = settings
cmake = CMake(conanfile)
cmake.configure()
self.assertIn(self.tempdir, conanfile.path)
cmake.generator = "MinGW Makefiles"
cmake.configure()
self.assertNotIn(self.tempdir, conanfile.path)
# Automatic gcc
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "gcc"
settings.compiler.version = "5.4"
settings.arch = "x86"
conanfile.settings = settings
cmake = CMake(conanfile)
cmake.configure()
self.assertNotIn(self.tempdir, conanfile.path)
def test_pkg_config_path(self):
conanfile = ConanFileMock()
conanfile.generators = ["pkg_config"]
conanfile.install_folder = "/my_install/folder/"
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
conanfile.settings = settings
cmake = CMake(conanfile)
cmake.configure()
self.assertEquals(conanfile.captured_env["PKG_CONFIG_PATH"], "/my_install/folder/")
conanfile.generators = []
cmake = CMake(conanfile)
cmake.configure()
self.assertNotIn("PKG_CONFIG_PATH", conanfile.captured_env)
cmake = CMake(conanfile)
cmake.configure(pkg_config_paths=["reldir1", "/abspath2/to/other"])
self.assertEquals(conanfile.captured_env["PKG_CONFIG_PATH"],
os.path.pathsep.join(["/my_install/folder/reldir1",
"/abspath2/to/other"]))
# If there is already a PKG_CONFIG_PATH do not set it
conanfile.generators = ["pkg_config"]
cmake = CMake(conanfile)
with tools.environment_append({"PKG_CONFIG_PATH": "do_not_mess_with_this"}):
cmake.configure()
self.assertEquals(conanfile.captured_env["PKG_CONFIG_PATH"], "do_not_mess_with_this")
def test_shared(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
settings.os = "Windows"
conan_file = ConanFileMock(shared=True)
conan_file.settings = settings
cmake = CMake(conan_file)
self.assertEquals(cmake.definitions["BUILD_SHARED_LIBS"], "ON")
conan_file = ConanFileMock(shared=False)
conan_file.settings = settings
cmake = CMake(conan_file)
self.assertEquals(cmake.definitions["BUILD_SHARED_LIBS"], "OFF")
conan_file = ConanFileMock(shared=None)
conan_file.settings = settings
cmake = CMake(conan_file)
self.assertNotIn("BUILD_SHARED_LIBS", cmake.definitions)
def test_verbose(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
self.assertNotIn("CMAKE_VERBOSE_MAKEFILE", cmake.definitions)
cmake.verbose = True
self.assertEquals(cmake.definitions["CMAKE_VERBOSE_MAKEFILE"], "ON")
cmake.verbose = False
self.assertEquals(cmake.definitions["CMAKE_VERBOSE_MAKEFILE"], "OFF")
cmake.definitions["CMAKE_VERBOSE_MAKEFILE"] = True
self.assertTrue(cmake.verbose)
cmake.definitions["CMAKE_VERBOSE_MAKEFILE"] = False
self.assertFalse(cmake.verbose)
del cmake.definitions["CMAKE_VERBOSE_MAKEFILE"]
self.assertFalse(cmake.verbose)
def set_toolset_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86"
settings.compiler.toolset = "v140" # Will be overwritten by parameter
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file, toolset="v141")
self.assertIn('-T "v141"', cmake.command_line)
# DEPRECATED VARIABLE, NOT MODIFY ANYMORE THE TOOLSET
with tools.environment_append({"CONAN_CMAKE_TOOLSET": "v141"}):
cmake = CMake(conan_file)
self.assertNotIn('-T "v141"', cmake.command_line)
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86"
settings.compiler.toolset = "v140"
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
self.assertIn('-T "v140"', cmake.command_line)
def test_missing_settings(self):
def instance_with_os_build(os_build):
settings = Settings.loads(default_settings_yml)
settings.os_build = os_build
conan_file = ConanFileMock()
conan_file.settings = settings
return CMake(conan_file)
cmake = instance_with_os_build("Linux")
self.assertEquals(cmake.generator, "Unix Makefiles")
cmake = instance_with_os_build("Macos")
self.assertEquals(cmake.generator, "Unix Makefiles")
cmake = instance_with_os_build("Windows")
self.assertEquals(cmake.generator, None)
with tools.environment_append({"CONAN_CMAKE_GENERATOR": "MyCoolGenerator"}):
cmake = instance_with_os_build("Windows")
self.assertEquals(cmake.generator, "MyCoolGenerator")
def test_cmake_system_version_android(self):
with tools.environment_append({"CONAN_CMAKE_SYSTEM_NAME": "SomeSystem",
"CONAN_CMAKE_GENERATOR": "SomeGenerator"}):
settings = Settings.loads(default_settings_yml)
settings.os = "WindowsStore"
settings.os.version = "8.1"
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
self.assertEquals(cmake.definitions["CMAKE_SYSTEM_VERSION"], "8.1")
settings = Settings.loads(default_settings_yml)
settings.os = "Android"
settings.os.api_level = "32"
conan_file = ConanFileMock()
conan_file.settings = settings
cmake = CMake(conan_file)
self.assertEquals(cmake.definitions["CMAKE_SYSTEM_VERSION"], "32")
@staticmethod
def scape(args):
pattern = "%s" if sys.platform == "win32" else r"'%s'"
return ' '.join(pattern % i for i in args.split())
class ConanFileMock(ConanFile):
def __init__(self, shared=None, options=None, options_values=None):
options = options or ""
self.command = None
self.path = None
self.source_folder = self.build_folder = "."
self.settings = None
self.options = Options(PackageOptions.loads(options))
if options_values:
for var, value in options_values.items():
self.options._data[var] = value
self.deps_cpp_info = namedtuple("deps_cpp_info", "sysroot")("/path/to/sysroot")
self.output = TestBufferConanOutput()
self.in_local_cache = False
self.install_folder = "myinstallfolder"
if shared is not None:
self.options = namedtuple("options", "shared")(shared)
self.should_configure = True
self.should_build = True
self.should_install = True
self.generators = []
self.captured_env = {}
def run(self, command):
self.command = command
self.path = os.environ["PATH"]
self.captured_env = {key: value for key, value in os.environ.items()}
| 43.153846
| 125
| 0.602312
|
b7282a78cc9c2d12f4ea6d2ba55c24b2110513d6
| 3,684
|
py
|
Python
|
python/oneflow/nn/modules/reshape.py
|
Zhangchangh/oneflow
|
4ea3935458cc83dcea0abd88dd613f09c57dc01a
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/nn/modules/reshape.py
|
Zhangchangh/oneflow
|
4ea3935458cc83dcea0abd88dd613f09c57dc01a
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/nn/modules/reshape.py
|
Zhangchangh/oneflow
|
4ea3935458cc83dcea0abd88dd613f09c57dc01a
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Sequence
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
class Reshape(Module):
def __init__(self, shape: Sequence[int]) -> None:
super().__init__()
self.shape = shape
def forward(self, x):
return flow.F.reshape(x, shape=self.shape)
@register_tensor_op("reshape")
def reshape_op(x, shape: Sequence[int] = None):
"""This operator reshapes a Tensor.
We can set one dimension in `shape` as `-1`, the operator will infer the complete shape.
Args:
x: A Tensor.
shape: Shape of the output tensor.
Returns:
A Tensor has the same type as `x`.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x = np.array(
... [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
... ).astype(np.float32)
>>> input = flow.Tensor(x)
>>> y = flow.reshape(input, shape=[2, 2, 2, -1]).shape
>>> y
flow.Size([2, 2, 2, 2])
"""
return Reshape(shape=shape)(x)
@register_tensor_op("view")
def view_op(x, shape: Sequence[int] = None):
"""
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.Tensor.view.html
Returns a new tensor with the same data as the :attr:`self` tensor but of a
different :attr:`shape`.
The returned tensor shares the same data and must have the same number
of elements, but may have a different size. For a tensor to be viewed, the new
view size must be compatible with its original size and stride, i.e., each new
view dimension must either be a subspace of an original dimension, or only span
across original dimensions :math:`d, d+1, \\dots, d+k` that satisfy the following
contiguity-like condition that :math:`\\forall i = d, \\dots, d+k-1`,
.. math::
\\text{stride}[i] = \\text{stride}[i+1] \\times \\text{size}[i+1]
Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
:meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
returns a view if the shapes are compatible, and copies (equivalent to calling
:meth:`contiguous`) otherwise.
Args:
x: A Tensor.
shape: Shape of the output tensor.
Returns:
A Tensor has the same type as `x`.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x = np.array(
... [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
... ).astype(np.float32)
>>> input = flow.Tensor(x)
>>> y = flow.view(input, shape=[2, 2, 2, -1]).numpy().shape
>>> y
(2, 2, 2, 2)
"""
return Reshape(shape=shape)(x)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| 30.957983
| 106
| 0.635722
|
23f4892bd188872ba6dca01e0ea0c762d99831b2
| 49
|
py
|
Python
|
dir1/my_code1.py
|
mbaseer07/Pyneta
|
d12f185f030f4c4d88045d2083275272504676f6
|
[
"Apache-2.0"
] | null | null | null |
dir1/my_code1.py
|
mbaseer07/Pyneta
|
d12f185f030f4c4d88045d2083275272504676f6
|
[
"Apache-2.0"
] | null | null | null |
dir1/my_code1.py
|
mbaseer07/Pyneta
|
d12f185f030f4c4d88045d2083275272504676f6
|
[
"Apache-2.0"
] | null | null | null |
Print ("Hello")
Print ("Hello")
Print ("Hello")
| 9.8
| 15
| 0.612245
|
83e8e32d8f77fcede90b4dbf16b2e8bfafdda0f7
| 134
|
py
|
Python
|
archive/urls.py
|
boxed/forum
|
abb3699d310bf3a404f031a3cb0e4bdbf403da5a
|
[
"BSD-3-Clause"
] | 2
|
2019-06-28T16:30:44.000Z
|
2020-12-28T01:46:52.000Z
|
archive/urls.py
|
boxed/forum
|
abb3699d310bf3a404f031a3cb0e4bdbf403da5a
|
[
"BSD-3-Clause"
] | 14
|
2019-02-26T17:25:54.000Z
|
2019-04-03T18:11:24.000Z
|
archive/urls.py
|
boxed/forum
|
abb3699d310bf3a404f031a3cb0e4bdbf403da5a
|
[
"BSD-3-Clause"
] | 1
|
2019-06-14T14:21:47.000Z
|
2019-06-14T14:21:47.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('<path:path>', views.index),
path('', views.index),
]
| 14.888889
| 37
| 0.641791
|
5016fe209a174cf0398a6695ce260f6333813469
| 3,347
|
py
|
Python
|
IT LAB/lab5/Question3/Question1/Question1/settings.py
|
Sahil1515/Sem6-Labs
|
cabedbccc2a5272365c250e2d3f4e3a3e802233f
|
[
"MIT"
] | 1
|
2021-03-03T16:26:59.000Z
|
2021-03-03T16:26:59.000Z
|
IT LAB/lab5/Question3/Question1/Question1/settings.py
|
Sahil1515/sem6-Labs
|
cabedbccc2a5272365c250e2d3f4e3a3e802233f
|
[
"MIT"
] | null | null | null |
IT LAB/lab5/Question3/Question1/Question1/settings.py
|
Sahil1515/sem6-Labs
|
cabedbccc2a5272365c250e2d3f4e3a3e802233f
|
[
"MIT"
] | null | null | null |
"""
Django settings for Question1 project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-2dbiv@u#k#_s6o%drul3umlkfro5v!*4ps3-mzfv=)30%3v8aj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'Question',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Question1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Question1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.549618
| 91
| 0.703018
|
fd0e78d8411110f3d4571b697788495380da4106
| 543
|
py
|
Python
|
manage.py
|
Audiopolis/IoTConnect
|
394559af7142802541be2dfda28b93a567ffc1d0
|
[
"MIT"
] | 1
|
2022-01-10T09:53:57.000Z
|
2022-01-10T09:53:57.000Z
|
manage.py
|
Audiopolis/IoTConnect
|
394559af7142802541be2dfda28b93a567ffc1d0
|
[
"MIT"
] | null | null | null |
manage.py
|
Audiopolis/IoTConnect
|
394559af7142802541be2dfda28b93a567ffc1d0
|
[
"MIT"
] | 1
|
2022-01-10T09:54:24.000Z
|
2022-01-10T09:54:24.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uninett_api.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.9375
| 75
| 0.688766
|
977a9af42f065083a8090792f405ea7e8ea02c81
| 3,649
|
py
|
Python
|
objects/CSCG/_3d/__tests__/unittests/Poisson/hdMSEM_inner.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | 1
|
2020-10-14T12:48:35.000Z
|
2020-10-14T12:48:35.000Z
|
objects/CSCG/_3d/__tests__/unittests/Poisson/hdMSEM_inner.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
objects/CSCG/_3d/__tests__/unittests/Poisson/hdMSEM_inner.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
"""
Here we use the hdMSEM to solve the inner-orientated version of the Poisson problem. We do this to
test the hybridization of 0-forms.
"""
import sys
if './' not in sys.path: sys.path.append('./')
import random
from objects.CSCG._3d.__init__ import mesh as mesh3
from objects.CSCG._3d.__init__ import space as space3
from objects.CSCG._3d.__init__ import form as form3
from objects.CSCG._3d.__init__ import exact_solution as es3
from root.config.main import *
from tools.linear_algebra.elementwise_cache.objects.sparse_matrix.main import EWC_SparseMatrix
from tools.linear_algebra.elementwise_cache.objects.column_vector.main import EWC_ColumnVector
from tools.linear_algebra.elementwise_cache.operators.bmat.main import bmat
from tools.linear_algebra.elementwise_cache.operators.concatenate.main import concatenate
from tools.linear_algebra.linear_system.main import LinearSystem
def test_hdMSEM_Poisson_Inner():
""""""
mesh = mesh3('crazy', c=0, bounds=([0.125, 1.125], [0.125, 1.125], [0.125, 1.125]))(
[4, 3, 5], EDM=None)
space = space3('polynomials')([('Lobatto', 3), ('Lobatto', 4), ('Lobatto', 2)])
FC = form3(mesh, space)
ES = es3(mesh)('Poisson:sincos1')
all_boundaries = mesh.boundaries.names
if rAnk == mAster_rank:
rn = random.randint(1, 5)
boundaries = random.sample(all_boundaries, rn)
else:
boundaries = None
boundaries = cOmm.bcast(boundaries, root=mAster_rank)
u_boundaries = boundaries
if rAnk == mAster_rank:
print(f"inP [hdMSEM_inner_Poisson] @ u_boundaries = {u_boundaries}. ", flush=True)
p_boundaries = list()
for b in all_boundaries:
if b not in u_boundaries:
p_boundaries.append(b)
p = FC('0-f', is_hybrid = True)
u = FC('1-f', is_hybrid = True)
t = FC('0-adt')
e = FC('0-e')
f = FC('0-adf')
p.TW.BC.body = ES.status.potential
p.TW.do.push_BC_to_instant(0)
p.BC.valid_boundaries = p_boundaries
t.prime.TW.BC.body = ES.status.velocity.flux
t.prime.TW.do.push_BC_to_instant(0)
t.BC.valid_boundaries = u_boundaries
I = EWC_SparseMatrix(mesh, ('identity', u.num.basis))
E10 = p.matrices.incidence
E01 = E10.T
M1 = u.matrices.mass
T0T = t.matrices.trace.T
T, D, C, b2, eGM = p.special.hybrid_pairing(t, e)
A = bmat([( I, -E10, None, None),
(-E01 @ M1, None, T0T, None),
( None, T, D, C),
( None, None, C.T, None)])
A.gathering_matrices = [(u, p, t, eGM), (u, p, t, eGM)]
b0 = EWC_ColumnVector(mesh, u)
b0.gathering_matrix = u
f.prime.TW.func.body = ES.status.source_term
f.prime.TW.do.push_all_to_instant(0)
f.prime.discretize()
b1 = - f.cochain.EWC
b1.gathering_matrix = p
b2.gathering_matrix = t
b3 = EWC_ColumnVector(mesh, e)
b3.gathering_matrix = eGM
b = concatenate([b0, b1, b2, b3])
LS = LinearSystem(A, b)
results = LS.solve('direct')()[0]
results.do.distributed_to(u, p, t)
p.TW.func.body = ES.status.potential
p.TW.do.push_all_to_instant(0)
p_error_L2 = p.error.L()
u.TW.func.body = ES.status.velocity
u.TW.do.push_all_to_instant(0)
u_error_L2 = u.error.L()
du = FC('1-adf', u)
u_error_dH1 = du.error.dH(t, - ES.status.source_term)
f_error_L2 = f.prime.error.L()
assert p_error_L2 < 0.005
assert u_error_L2 < 0.14
assert u_error_dH1 < 0.5
assert f_error_L2 < 0.5
return 1
if __name__ == '__main__':
# mpiexec -n 4 python objects/CSCG/_3d/__tests__/unittests/Poisson/hdMSEM_inner.py
test_hdMSEM_Poisson_Inner()
| 29.909836
| 98
| 0.658263
|
9ec155b69525bc39cde97dc5b82b82fb0a8381ab
| 11,135
|
py
|
Python
|
kornia/geometry/epipolar/essential.py
|
Ishticode/kornia
|
974abb43ec72d12dbd244a2fb247bbbab8498de0
|
[
"ECL-2.0",
"Apache-2.0"
] | 418
|
2018-10-02T22:31:36.000Z
|
2019-01-16T14:15:45.000Z
|
kornia/geometry/epipolar/essential.py
|
Ishticode/kornia
|
974abb43ec72d12dbd244a2fb247bbbab8498de0
|
[
"ECL-2.0",
"Apache-2.0"
] | 94
|
2019-01-17T22:10:45.000Z
|
2019-05-22T23:47:58.000Z
|
kornia/geometry/epipolar/essential.py
|
Ishticode/kornia
|
974abb43ec72d12dbd244a2fb247bbbab8498de0
|
[
"ECL-2.0",
"Apache-2.0"
] | 25
|
2018-10-02T22:50:04.000Z
|
2019-01-13T18:14:11.000Z
|
"""Module containing functionalities for the Essential matrix."""
from typing import Optional, Tuple
import torch
from kornia.utils import eye_like, vec_like
from .numeric import cross_product_matrix
from .projection import depth_from_point, projection_from_KRt
from .triangulation import triangulate_points
__all__ = [
"essential_from_fundamental",
"decompose_essential_matrix",
"essential_from_Rt",
"motion_from_essential",
"motion_from_essential_choose_solution",
"relative_camera_motion",
]
def essential_from_fundamental(F_mat: torch.Tensor, K1: torch.Tensor, K2: torch.Tensor) -> torch.Tensor:
r"""Get Essential matrix from Fundamental and Camera matrices.
Uses the method from Hartley/Zisserman 9.6 pag 257 (formula 9.12).
Args:
F_mat: The fundamental matrix with shape of :math:`(*, 3, 3)`.
K1: The camera matrix from first camera with shape :math:`(*, 3, 3)`.
K2: The camera matrix from second camera with shape :math:`(*, 3, 3)`.
Returns:
The essential matrix with shape :math:`(*, 3, 3)`.
"""
if not (len(F_mat.shape) >= 2 and F_mat.shape[-2:] == (3, 3)):
raise AssertionError(F_mat.shape)
if not (len(K1.shape) >= 2 and K1.shape[-2:] == (3, 3)):
raise AssertionError(K1.shape)
if not (len(K2.shape) >= 2 and K2.shape[-2:] == (3, 3)):
raise AssertionError(K2.shape)
if not len(F_mat.shape[:-2]) == len(K1.shape[:-2]) == len(K2.shape[:-2]):
raise AssertionError
return K2.transpose(-2, -1) @ F_mat @ K1
def decompose_essential_matrix(E_mat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""Decompose an essential matrix to possible rotations and translation.
This function decomposes the essential matrix E using svd decomposition [96]
and give the possible solutions: :math:`R1, R2, t`.
Args:
E_mat: The essential matrix in the form of :math:`(*, 3, 3)`.
Returns:
A tuple containing the first and second possible rotation matrices and the translation vector.
The shape of the tensors with be same input :math:`[(*, 3, 3), (*, 3, 3), (*, 3, 1)]`.
"""
if not (len(E_mat.shape) >= 2 and E_mat.shape[-2:]):
raise AssertionError(E_mat.shape)
# decompose matrix by its singular values
U, _, V = torch.svd(E_mat)
Vt = V.transpose(-2, -1)
mask = torch.ones_like(E_mat)
mask[..., -1:] *= -1.0 # fill last column with negative values
maskt = mask.transpose(-2, -1)
# avoid singularities
U = torch.where((torch.det(U) < 0.0)[..., None, None], U * mask, U)
Vt = torch.where((torch.det(Vt) < 0.0)[..., None, None], Vt * maskt, Vt)
W = cross_product_matrix(torch.tensor([[0.0, 0.0, 1.0]]).type_as(E_mat))
W[..., 2, 2] += 1.0
# reconstruct rotations and retrieve translation vector
U_W_Vt = U @ W @ Vt
U_Wt_Vt = U @ W.transpose(-2, -1) @ Vt
# return values
R1 = U_W_Vt
R2 = U_Wt_Vt
T = U[..., -1:]
return (R1, R2, T)
def essential_from_Rt(R1: torch.Tensor, t1: torch.Tensor, R2: torch.Tensor, t2: torch.Tensor) -> torch.Tensor:
r"""Get the Essential matrix from Camera motion (Rs and ts).
Reference: Hartley/Zisserman 9.6 pag 257 (formula 9.12)
Args:
R1: The first camera rotation matrix with shape :math:`(*, 3, 3)`.
t1: The first camera translation vector with shape :math:`(*, 3, 1)`.
R2: The second camera rotation matrix with shape :math:`(*, 3, 3)`.
t2: The second camera translation vector with shape :math:`(*, 3, 1)`.
Returns:
The Essential matrix with the shape :math:`(*, 3, 3)`.
"""
if not (len(R1.shape) >= 2 and R1.shape[-2:] == (3, 3)):
raise AssertionError(R1.shape)
if not (len(t1.shape) >= 2 and t1.shape[-2:] == (3, 1)):
raise AssertionError(t1.shape)
if not (len(R2.shape) >= 2 and R2.shape[-2:] == (3, 3)):
raise AssertionError(R2.shape)
if not (len(t2.shape) >= 2 and t2.shape[-2:] == (3, 1)):
raise AssertionError(t2.shape)
# first compute the camera relative motion
R, t = relative_camera_motion(R1, t1, R2, t2)
# get the cross product from relative translation vector
Tx = cross_product_matrix(t[..., 0])
return Tx @ R
def motion_from_essential(E_mat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Get Motion (R's and t's ) from Essential matrix.
Computes and return four possible poses exist for the decomposition of the Essential
matrix. The possible solutions are :math:`[R1,t], [R1,−t], [R2,t], [R2,−t]`.
Args:
E_mat: The essential matrix in the form of :math:`(*, 3, 3)`.
Returns:
The rotation and translation containing the four possible combination for the retrieved motion.
The tuple is as following :math:`[(*, 4, 3, 3), (*, 4, 3, 1)]`.
"""
if not (len(E_mat.shape) >= 2 and E_mat.shape[-2:] == (3, 3)):
raise AssertionError(E_mat.shape)
# decompose the essential matrix by its possible poses
R1, R2, t = decompose_essential_matrix(E_mat)
# compbine and returns the four possible solutions
Rs = torch.stack([R1, R1, R2, R2], dim=-3)
Ts = torch.stack([t, -t, t, -t], dim=-3)
return (Rs, Ts)
def motion_from_essential_choose_solution(
E_mat: torch.Tensor,
K1: torch.Tensor,
K2: torch.Tensor,
x1: torch.Tensor,
x2: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""Recover the relative camera rotation and the translation from an estimated essential matrix.
The method checks the corresponding points in two images and also returns the triangulated
3d points. Internally uses :py:meth:`~kornia.geometry.epipolar.decompose_essential_matrix` and then chooses
the best solution based on the combination that gives more 3d points in front of the camera plane from
:py:meth:`~kornia.geometry.epipolar.triangulate_points`.
Args:
E_mat: The essential matrix in the form of :math:`(*, 3, 3)`.
K1: The camera matrix from first camera with shape :math:`(*, 3, 3)`.
K2: The camera matrix from second camera with shape :math:`(*, 3, 3)`.
x1: The set of points seen from the first camera frame in the camera plane
coordinates with shape :math:`(*, N, 2)`.
x2: The set of points seen from the first camera frame in the camera plane
coordinates with shape :math:`(*, N, 2)`.
mask: A boolean mask which can be used to exclude some points from choosing
the best solution. This is useful for using this function with sets of points of
different cardinality (for instance after filtering with RANSAC) while keeping batch
semantics. Mask is of shape :math:`(*, N)`.
Returns:
The rotation and translation plus the 3d triangulated points.
The tuple is as following :math:`[(*, 3, 3), (*, 3, 1), (*, N, 3)]`.
"""
if not (len(E_mat.shape) >= 2 and E_mat.shape[-2:] == (3, 3)):
raise AssertionError(E_mat.shape)
if not (len(K1.shape) >= 2 and K1.shape[-2:] == (3, 3)):
raise AssertionError(K1.shape)
if not (len(K2.shape) >= 2 and K2.shape[-2:] == (3, 3)):
raise AssertionError(K2.shape)
if not (len(x1.shape) >= 2 and x1.shape[-1] == 2):
raise AssertionError(x1.shape)
if not (len(x2.shape) >= 2 and x2.shape[-1] == 2):
raise AssertionError(x2.shape)
if not len(E_mat.shape[:-2]) == len(K1.shape[:-2]) == len(K2.shape[:-2]):
raise AssertionError
if mask is not None:
if len(mask.shape) < 1:
raise AssertionError(mask.shape)
if mask.shape != x1.shape[:-1]:
raise AssertionError(mask.shape)
unbatched = len(E_mat.shape) == 2
if unbatched:
# add a leading batch dimension. We will remove it at the end, before
# returning the results
E_mat = E_mat[None]
K1 = K1[None]
K2 = K2[None]
x1 = x1[None]
x2 = x2[None]
if mask is not None:
mask = mask[None]
# compute four possible pose solutions
Rs, ts = motion_from_essential(E_mat)
# set reference view pose and compute projection matrix
R1 = eye_like(3, E_mat) # Bx3x3
t1 = vec_like(3, E_mat) # Bx3x1
# compute the projection matrices for first camera
R1 = R1[:, None].expand(-1, 4, -1, -1)
t1 = t1[:, None].expand(-1, 4, -1, -1)
K1 = K1[:, None].expand(-1, 4, -1, -1)
P1 = projection_from_KRt(K1, R1, t1) # 1x4x4x4
# compute the projection matrices for second camera
R2 = Rs
t2 = ts
K2 = K2[:, None].expand(-1, 4, -1, -1)
P2 = projection_from_KRt(K2, R2, t2) # Bx4x4x4
# triangulate the points
x1 = x1[:, None].expand(-1, 4, -1, -1)
x2 = x2[:, None].expand(-1, 4, -1, -1)
X = triangulate_points(P1, P2, x1, x2) # Bx4xNx3
# project points and compute their depth values
d1 = depth_from_point(R1, t1, X)
d2 = depth_from_point(R2, t2, X)
# verify the point values that have a positive depth value
depth_mask = (d1 > 0.0) & (d2 > 0.0)
if mask is not None:
depth_mask &= mask.unsqueeze(1)
mask_indices = torch.max(depth_mask.sum(-1), dim=-1, keepdim=True)[1]
# get pose and points 3d and return
R_out = Rs[:, mask_indices][:, 0, 0]
t_out = ts[:, mask_indices][:, 0, 0]
points3d_out = X[:, mask_indices][:, 0, 0]
if unbatched:
R_out = R_out[0]
t_out = t_out[0]
points3d_out = points3d_out[0]
return R_out, t_out, points3d_out
def relative_camera_motion(
R1: torch.Tensor, t1: torch.Tensor, R2: torch.Tensor, t2: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Compute the relative camera motion between two cameras.
Given the motion parameters of two cameras, computes the motion parameters of the second
one assuming the first one to be at the origin. If :math:`T1` and :math:`T2` are the camera motions,
the computed relative motion is :math:`T = T_{2}T^{−1}_{1}`.
Args:
R1: The first camera rotation matrix with shape :math:`(*, 3, 3)`.
t1: The first camera translation vector with shape :math:`(*, 3, 1)`.
R2: The second camera rotation matrix with shape :math:`(*, 3, 3)`.
t2: The second camera translation vector with shape :math:`(*, 3, 1)`.
Returns:
A tuple with the relative rotation matrix and
translation vector with the shape of :math:`[(*, 3, 3), (*, 3, 1)]`.
"""
if not (len(R1.shape) >= 2 and R1.shape[-2:] == (3, 3)):
raise AssertionError(R1.shape)
if not (len(t1.shape) >= 2 and t1.shape[-2:] == (3, 1)):
raise AssertionError(t1.shape)
if not (len(R2.shape) >= 2 and R2.shape[-2:] == (3, 3)):
raise AssertionError(R2.shape)
if not (len(t2.shape) >= 2 and t2.shape[-2:] == (3, 1)):
raise AssertionError(t2.shape)
# compute first the relative rotation
R = R2 @ R1.transpose(-2, -1)
# compute the relative translation vector
t = t2 - R @ t1
return (R, t)
| 37.116667
| 111
| 0.625505
|
6a966441bab93c23cec28555cc721e2722699439
| 22,221
|
py
|
Python
|
src/python/system/process_handler.py
|
tapaswenipathak/clusterfuzz
|
a5468fc736ee42af9e2dd63e24c22ae2c3ac1662
|
[
"Apache-2.0"
] | 1
|
2019-11-09T23:09:00.000Z
|
2019-11-09T23:09:00.000Z
|
src/python/system/process_handler.py
|
tapaswenipathak/clusterfuzz
|
a5468fc736ee42af9e2dd63e24c22ae2c3ac1662
|
[
"Apache-2.0"
] | null | null | null |
src/python/system/process_handler.py
|
tapaswenipathak/clusterfuzz
|
a5468fc736ee42af9e2dd63e24c22ae2c3ac1662
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for process management."""
from future import standard_library
standard_library.install_aliases()
from builtins import object
from past.builtins import basestring
import copy
import datetime
import logging
import os
import queue
import subprocess
import sys
import threading
import time
from base import utils
from crash_analysis import crash_analyzer
from metrics import logs
from platforms import android
from platforms import linux
from platforms import windows
from system import environment
from system import shell
# FIXME: Find a better way to handle this case. These imports
# will fail and are not needed from App Engine.
try:
import multiprocessing
import mozprocess
import psutil
except ImportError:
pass
# On Android, we need to wait a little after a crash occurred to get the full
# logcat output. This makes sure we get all the stack frames since there is no
# effective end marker.
ANDROID_CRASH_LOGCAT_WAIT_TIME = 0.3
# Time in seconds it usually takes to analyze a crash. This is usually high
# in case of Android where it is required to do several adb shell calls.
CRASH_ANALYSIS_TIME = 1.5
# Test timeout if not specified.
DEFAULT_TEST_TIMEOUT = 10
# Time to wait for cleanup after process if finished.
PROCESS_CLEANUP_WAIT_TIME = 5
# LeakSanitizer needs additional time to process all leaks and dump stacks on
# process shutdown.
LSAN_ANALYSIS_TIME = 1
# Time to wait for thread cleanup (e.g. dumping coverage, etc).
THREAD_FINISH_WAIT_TIME = 2
class ProcessStatus(object):
"""Process exited notification."""
def __init__(self):
self.finished = False
def __call__(self):
self.finished = True
def start_process(process_handle):
"""Start the process using process handle and override list2cmdline for
Windows."""
is_win = environment.platform() == 'WINDOWS'
if is_win:
# Override list2cmdline on Windows to return first index of list as string.
# This is to workaround a mozprocess bug since it passes command as list
# and not as string.
subprocess.list2cmdline_orig = subprocess.list2cmdline
subprocess.list2cmdline = lambda s: s[0]
try:
process_handle.run()
finally:
if is_win:
subprocess.list2cmdline = subprocess.list2cmdline_orig
def cleanup_defunct_processes():
"""Cleans up defunct processes."""
# Defunct processes happen only on unix platforms.
if environment.platform() != 'WINDOWS':
while 1:
try:
# Matches any defunct child process.
p, _ = os.waitpid(-1, os.WNOHANG)
if not p:
break
logs.log('Clearing defunct process %s.' % str(p))
except:
break
# Note: changes to this function may require changes to untrusted_runner.proto.
def run_process(cmdline,
current_working_directory=None,
timeout=DEFAULT_TEST_TIMEOUT,
need_shell=False,
gestures=None,
env_copy=None,
testcase_run=True,
ignore_children=True):
"""Executes a process with a given command line and other parameters."""
# FIXME(mbarbella): Using LAUNCHER_PATH here is error prone. It forces us to
# do certain operations before fuzzer setup (e.g. bad build check).
launcher = environment.get_value('LAUNCHER_PATH')
if environment.is_trusted_host() and testcase_run and not launcher:
from bot.untrusted_runner import remote_process_host
return remote_process_host.run_process(
cmdline, current_working_directory, timeout, need_shell, gestures,
env_copy, testcase_run, ignore_children)
if gestures is None:
gestures = []
if env_copy:
os.environ.update(env_copy)
# This is used when running scripts on native linux OS and not on the device.
# E.g. running a fuzzer to generate testcases or launcher script.
plt = environment.platform()
if plt in ['ANDROID', 'FUCHSIA'] and (not testcase_run or launcher):
plt = 'LINUX'
elif plt == 'IOS' and (not testcase_run or launcher):
plt = 'MAC'
# Lower down testcase timeout slightly to account for time for crash analysis.
timeout -= CRASH_ANALYSIS_TIME
# LeakSanitizer hack - give time for stdout/stderr processing.
lsan = environment.get_value('LSAN', False)
if lsan:
timeout -= LSAN_ANALYSIS_TIME
# Initialize variables.
adb_output = None
process_output = ''
process_status = None
return_code = 0
process_poll_interval = environment.get_value('PROCESS_POLL_INTERVAL', 0.5)
start_time = time.time()
watch_for_process_exit = (
environment.get_value('WATCH_FOR_PROCESS_EXIT')
if plt == 'ANDROID' else True)
window_list = []
# Get gesture start time from last element in gesture list.
gestures = copy.deepcopy(gestures)
if gestures and gestures[-1].startswith('Trigger'):
gesture_start_time = int(gestures[-1].split(':')[1])
gestures.pop()
else:
gesture_start_time = timeout // 2
logs.log('Process (%s) started.' % str(cmdline), level=logging.DEBUG)
if plt == 'ANDROID':
# Clear the log upfront.
android.logger.clear_log()
# Run the app.
adb_output = android.adb.run_command(cmdline, timeout=timeout)
else:
cmd, args = shell.get_command_and_arguments(cmdline)
process_output = mozprocess.processhandler.StoreOutput()
process_status = ProcessStatus()
try:
process_handle = mozprocess.ProcessHandlerMixin(
cmd,
args,
cwd=current_working_directory,
shell=need_shell,
processOutputLine=[process_output],
onFinish=[process_status],
ignore_children=ignore_children)
start_process(process_handle)
except:
logs.log_error('Exception occurred when running command: %s.' % cmdline)
return None, None, ''
while True:
time.sleep(process_poll_interval)
# Run the gestures at gesture_start_time or in case we didn't find windows
# in the last try.
if (gestures and time.time() - start_time >= gesture_start_time and
not window_list):
# In case, we don't find any windows, we increment the gesture start time
# so that the next check is after 1 second.
gesture_start_time += 1
if plt == 'LINUX':
linux.gestures.run_gestures(gestures, process_handle.pid,
process_status, start_time, timeout,
window_list)
elif plt == 'WINDOWS':
windows.gestures.run_gestures(gestures, process_handle.pid,
process_status, start_time, timeout,
window_list)
elif plt == 'ANDROID':
android.gestures.run_gestures(gestures, start_time, timeout)
# TODO(mbarbella): We add a fake window here to prevent gestures on
# Android from getting executed more than once.
window_list = ['FAKE']
if time.time() - start_time >= timeout:
break
# Collect the process output.
output = (
android.logger.log_output()
if plt == 'ANDROID' else '\n'.join(process_output.output))
if crash_analyzer.is_memory_tool_crash(output):
break
# Check if we need to bail out on process exit.
if watch_for_process_exit:
# If |watch_for_process_exit| is set, then we already completed running
# our app launch command. So, we can bail out.
if plt == 'ANDROID':
break
# On desktop, we bail out as soon as the process finishes.
if process_status and process_status.finished:
# Wait for process shutdown and set return code.
process_handle.wait(timeout=PROCESS_CLEANUP_WAIT_TIME)
break
# Process output based on platform.
if plt == 'ANDROID':
# Get current log output. If device is in reboot mode, logcat automatically
# waits for device to be online.
time.sleep(ANDROID_CRASH_LOGCAT_WAIT_TIME)
output = android.logger.log_output()
if android.constants.LOW_MEMORY_REGEX.search(output):
# If the device is low on memory, we should force reboot and bail out to
# prevent device from getting in a frozen state.
logs.log('Device is low on memory, rebooting.', output=output)
android.adb.hard_reset()
android.adb.wait_for_device()
elif android.adb.time_since_last_reboot() < time.time() - start_time:
# Check if a reboot has happened, if yes, append log output before reboot
# and kernel logs content to output.
log_before_last_reboot = android.logger.log_output_before_last_reboot()
kernel_log = android.adb.get_kernel_log_content()
output = '%s%s%s%s%s' % (
log_before_last_reboot, utils.get_line_seperator('Device rebooted'),
output, utils.get_line_seperator('Kernel Log'), kernel_log)
# Make sure to reset SE Linux Permissive Mode. This can be done cheaply
# in ~0.15 sec and is needed especially between runs for kernel crashes.
android.adb.run_as_root()
android.settings.change_se_linux_to_permissive_mode()
return_code = 1
# Add output from adb to the front.
if adb_output:
output = '%s\n\n%s' % (adb_output, output)
# Kill the application if it is still running. We do this at the end to
# prevent this from adding noise to the logcat output.
task_name = environment.get_value('TASK_NAME')
child_process_termination_pattern = environment.get_value(
'CHILD_PROCESS_TERMINATION_PATTERN')
if task_name == 'fuzz' and child_process_termination_pattern:
# In some cases, we do not want to terminate the application after each
# run to avoid long startup times (e.g. for chrome). Terminate processes
# matching a particular pattern for light cleanup in this case.
android.adb.kill_processes_and_children_matching_name(
child_process_termination_pattern)
else:
# There is no special termination behavior. Simply stop the application.
android.app.stop()
else:
# Get the return code in case the process has finished already.
# If the process hasn't finished, return_code will be None which is what
# callers expect unless the output indicates a crash.
return_code = process_handle.poll()
# If the process is still running, then terminate it.
if not process_status.finished:
if launcher and cmdline.startswith(launcher):
# If this was a launcher script, we KILL all child processes created
# except for APP_NAME.
# It is expected that, if the launcher script terminated normally, it
# cleans up all the child processes it created itself.
terminate_root_and_child_processes(process_handle.pid)
else:
try:
# kill() here actually sends SIGTERM on posix.
process_handle.kill()
except:
pass
if lsan:
time.sleep(LSAN_ANALYSIS_TIME)
output = '\n'.join(process_output.output)
# X Server hack when max client reached.
if ('Maximum number of clients reached' in output or
'Unable to get connection to X server' in output):
logs.log_error('Unable to connect to X server, exiting.')
os.system('sudo killall -9 Xvfb blackbox >/dev/null 2>&1')
sys.exit(0)
if testcase_run and (crash_analyzer.is_memory_tool_crash(output) or
crash_analyzer.is_check_failure_crash(output)):
return_code = 1
# If a crash is found, then we add the memory state as well.
if return_code and plt == 'ANDROID':
ps_output = android.adb.get_ps_output()
if ps_output:
output += utils.get_line_seperator('Memory Statistics')
output += ps_output
logs.log(
'Process (%s) ended, exit code (%s), output (%s).' %
(str(cmdline), str(return_code), str(output)),
level=logging.DEBUG)
return return_code, round(time.time() - start_time, 1), output
def cleanup_stale_processes():
"""Kill stale processes left behind by a job."""
terminate_multiprocessing_children()
terminate_stale_application_instances()
cleanup_defunct_processes()
def close_queue(queue_to_close):
"""Close the queue."""
if environment.is_trusted_host():
# We don't use multiprocessing.Queue on trusted hosts.
return
try:
queue_to_close.close()
except:
logs.log_error('Unable to close queue.')
def get_process():
"""Return a multiprocessing process object (with bug fixes)."""
if environment.is_trusted_host():
# forking/multiprocessing is unsupported because of the RPC connection.
return threading.Thread
# FIXME(unassigned): Remove this hack after real bug is fixed.
# pylint: disable=protected-access
multiprocessing.current_process()._identity = ()
return multiprocessing.Process
def get_runtime_snapshot():
"""Return a list of current processes and their command lines as string."""
process_strings = []
for process in psutil.process_iter():
try:
process_info = process.as_dict(attrs=['name', 'cmdline', 'pid', 'ppid'])
process_string = '{name} ({pid}, {ppid})'.format(
name=process_info['name'],
pid=process_info['pid'],
ppid=process_info['ppid'])
process_cmd_line = process_info['cmdline']
if process_cmd_line:
process_string += ': {cmd_line}'.format(
cmd_line=(' '.join(process_cmd_line)))
process_strings.append(process_string)
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
# Ignore the error, use whatever info is available for access.
pass
return '\n'.join(sorted(process_strings))
def get_queue():
"""Return a multiprocessing queue object."""
if environment.is_trusted_host():
# We don't use multiprocessing.Process on trusted hosts. No need to use
# multiprocessing.Queue.
return queue.Queue()
try:
result_queue = multiprocessing.Queue()
except:
# FIXME: Invalid cross-device link error. Happens sometimes with
# chroot jobs even though /dev/shm and /run/shm are mounted.
logs.log_error('Unable to get multiprocessing queue.')
return None
return result_queue
def terminate_hung_threads(threads):
"""Terminate hung threads."""
start_time = time.time()
while time.time() - start_time < THREAD_FINISH_WAIT_TIME:
if not any([thread.is_alive() for thread in threads]):
# No threads are alive, so we're done.
return
time.sleep(0.1)
logs.log_warn('Hang detected.', snapshot=get_runtime_snapshot())
if environment.is_trusted_host():
from bot.untrusted_runner import host
# Bail out on trusted hosts since we're using threads and can't clean up.
host.host_exit_no_return()
# Terminate all threads that are still alive.
try:
[thread.terminate() for thread in threads if thread.is_alive()]
except:
pass
def terminate_root_and_child_processes(root_pid):
"""Terminate the root process along with any children it spawned."""
app_name = environment.get_value('APP_NAME')
direct_children = utils.get_process_ids(root_pid, recursive=False)
for child_pid in direct_children:
# utils.get_process_ids also returns the parent pid.
if child_pid == root_pid:
continue
try:
child = psutil.Process(child_pid)
except Exception:
# Process doesn't exist anymore.
continue
if child.name == app_name:
# Send SIGTERM to the root APP_NAME process only, and none of its children
# so that coverage data will be dumped properly (e.g. the browser process
# of chrome).
# TODO(ochang): Figure out how windows coverage is dumped since there is
# no equivalent of SIGTERM.
terminate_process(child_pid, kill=False)
continue
child_and_grand_children_pids = utils.get_process_ids(
child_pid, recursive=True)
for pid in child_and_grand_children_pids:
terminate_process(pid, kill=True)
terminate_process(root_pid, kill=True)
def terminate_multiprocessing_children():
"""Terminate all children created with multiprocessing module."""
child_list = multiprocessing.active_children()
for child in child_list:
try:
child.terminate()
except:
# Unable to terminate multiprocessing child or was not needed.
pass
def terminate_stale_application_instances():
"""Kill stale instances of the application running for this command."""
if environment.is_trusted_host():
from bot.untrusted_runner import remote_process_host
remote_process_host.terminate_stale_application_instances()
return
# Stale instance cleanup is sometimes disabled for local testing.
if not environment.get_value('KILL_STALE_INSTANCES', True):
return
additional_process_to_kill = environment.get_value(
'ADDITIONAL_PROCESSES_TO_KILL')
builds_directory = environment.get_value('BUILDS_DIR')
llvm_symbolizer_filename = environment.get_executable_filename(
'llvm-symbolizer')
platform = environment.platform()
start_time = time.time()
processes_to_kill = []
# Avoid killing the test binary when running the reproduce tool. It is
# commonly in-use on the side on developer workstations.
if not environment.get_value('REPRODUCE_TOOL'):
app_name = environment.get_value('APP_NAME')
processes_to_kill += [app_name]
if additional_process_to_kill:
processes_to_kill += additional_process_to_kill.split(' ')
processes_to_kill = [x for x in processes_to_kill if x]
if platform == 'ANDROID':
# Cleanup any stale adb connections.
device_serial = environment.get_value('ANDROID_SERIAL')
adb_search_string = 'adb -s %s' % device_serial
# Terminate llvm symbolizer processes matching exact path. This is important
# for Android where multiple device instances run on same host.
llvm_symbolizer_path = environment.get_llvm_symbolizer_path()
terminate_processes_matching_cmd_line(
[adb_search_string, llvm_symbolizer_path], kill=True)
# Make sure device is online and rooted.
android.adb.run_as_root()
# Make sure to reset SE Linux Permissive Mode (might be lost in reboot).
android.settings.change_se_linux_to_permissive_mode()
# Make sure that device forwarder is running (might be lost in reboot or
# process crash).
android.device.setup_host_and_device_forwarder_if_needed()
# Make sure that package optimization is complete (might be triggered due to
# unexpected circumstances).
android.app.wait_until_optimization_complete()
# Reset application state, which kills its pending instances and re-grants
# the storage permissions.
android.app.reset()
elif platform == 'WINDOWS':
processes_to_kill += [
'cdb.exe',
'handle.exe',
'msdt.exe',
'openwith.exe',
'WerFault.exe',
llvm_symbolizer_filename,
]
terminate_processes_matching_names(processes_to_kill, kill=True)
terminate_processes_matching_cmd_line(builds_directory, kill=True)
# Artifical sleep to let the processes get terminated.
time.sleep(1)
else:
# Handle Linux and Mac platforms.
processes_to_kill += [
'addr2line',
'atos',
'chrome-devel-sandbox',
'gdb',
'nacl_helper',
'xdotool',
llvm_symbolizer_filename,
]
terminate_processes_matching_names(processes_to_kill, kill=True)
terminate_processes_matching_cmd_line(builds_directory, kill=True)
duration = int(time.time() - start_time)
if duration >= 5:
logs.log('Process kill took longer than usual - %s.' % str(
datetime.timedelta(seconds=duration)))
def terminate_process(process_id, kill=False):
"""Terminates a process by its process id."""
try:
process = psutil.Process(process_id)
if kill:
process.kill()
else:
process.terminate()
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
logs.log_warn('Failed to terminate process.')
def terminate_processes_matching_names(match_strings, kill=False):
"""Terminates processes matching particular names (case sensitive)."""
if isinstance(match_strings, basestring):
match_strings = [match_strings]
for process in psutil.process_iter():
try:
process_info = process.as_dict(attrs=['name', 'pid'])
process_name = process_info['name']
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
continue
if any(x == process_name for x in match_strings):
terminate_process(process_info['pid'], kill)
def terminate_processes_matching_cmd_line(match_strings,
kill=False,
exclude_strings=None):
"""Terminates processes matching particular command line (case sensitive)."""
if exclude_strings is None:
# By default, do not terminate processes containing butler.py. This is
# important so that the reproduce tool does not terminate itself, as the
# rest of its command line may contain strings we usually terminate such
# as paths to build directories.
exclude_strings = ['butler.py', 'reproduce.sh']
if isinstance(match_strings, basestring):
match_strings = [match_strings]
for process in psutil.process_iter():
try:
process_info = process.as_dict(attrs=['cmdline', 'pid'])
process_cmd_line = process_info['cmdline']
if not process_cmd_line:
continue
process_path = ' '.join(process_cmd_line)
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
continue
if any(x in process_path for x in match_strings):
if not any([x in process_path for x in exclude_strings]):
terminate_process(process_info['pid'], kill)
| 34.61215
| 80
| 0.703119
|
c984a2bd1d52a4c5957318edacb37409258e5ac8
| 7,286
|
py
|
Python
|
src/recommender.py
|
s0umitra/Recommendation-System-101-MovieTweetings
|
e585e6170588fa534747ab849dfdd89634cdcb6d
|
[
"MIT"
] | 1
|
2020-09-16T14:23:15.000Z
|
2020-09-16T14:23:15.000Z
|
src/recommender.py
|
s0umitra/Recommendation-System-101-MovieTweetings
|
e585e6170588fa534747ab849dfdd89634cdcb6d
|
[
"MIT"
] | null | null | null |
src/recommender.py
|
s0umitra/Recommendation-System-101-MovieTweetings
|
e585e6170588fa534747ab849dfdd89634cdcb6d
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import lib
class Recommender:
"""
This Recommender uses FunkSVD to make predictions of exact ratings and uses either FunkSVD or a Knowledge Based
recommendation (highest ranked) to make recommendations for users. Finally, if given a movie, the recommender
will provide movies that are most similar as a Content Based Recommender.
"""
def __init__(self):
"""
no required attributes to initiate
"""
self.movies = 0
self.reviews = 0
self.user_item_df = 0
self.user_item_mat = 0
self.latent_features = 0
self.learning_rate = 0
self.iterators = 0
self.n_users = 0
self.n_movies = 0
self.num_ratings = 0
self.user_ids_series = 0
self.movie_ids_series = 0
self.user_mat = 0
self.movie_mat = 0
self.ranked_movies = 0
def fit(self, reviews_pth, movies_pth, latent_features=12, learning_rate=0.0001, iterators=100):
"""This function performs matrix factorization using a basic form of FunkSVD with no regularization
:param reviews_pth: path to csv with at least the four columns: 'user_id', 'movie_id', 'rating', 'timestamp'
:param movies_pth: path to csv with each movie and movie information in each row
:param latent_features: (int) the number of latent features used
:param learning_rate: (float) the learning rate
:param iterators: (int) the number of iterations
:return: None
"""
self.reviews = pd.read_csv(reviews_pth)
self.movies = pd.read_csv(movies_pth)
# Create user-item matrix
user_vs_item = self.reviews[['user_id', 'movie_id', 'rating', 'timestamp']]
self.user_item_df = user_vs_item.groupby(['user_id', 'movie_id'])['rating'].max().unstack()
self.user_item_mat = np.array(self.user_item_df)
# Store inputs
self.latent_features = latent_features
self.learning_rate = learning_rate
self.iterators = iterators
self.n_users = self.user_item_mat.shape[0]
self.n_movies = self.user_item_mat.shape[1]
self.num_ratings = np.count_nonzero(~np.isnan(self.user_item_mat))
self.user_ids_series = np.array(self.user_item_df.index)
self.movie_ids_series = np.array(self.user_item_df.columns)
# initialize the user and movie matrices with random values
user_mat = np.random.rand(self.n_users, self.latent_features)
movie_mat = np.random.rand(self.latent_features, self.n_movies)
# keep track of iteration and MSE
print("Optimization Statistics")
print("Iterations | Mean Squared Error ")
# for each iteration
for iteration in range(self.iterators):
# update our sse
sse_accum = 0
# For each user-movie pair
for i in range(self.n_users):
for j in range(self.n_movies):
# if the rating exists
if self.user_item_mat[i, j] > 0:
# compute the error as the actual minus the dot product of the user and movie latent features
diff = self.user_item_mat[i, j] - np.dot(user_mat[i, :], movie_mat[:, j])
# Keep track of the sum of squared errors for the matrix
sse_accum += diff ** 2
# update the values in each matrix in the direction of the gradient
for k in range(self.latent_features):
user_mat[i, k] += self.learning_rate * (2 * diff * movie_mat[k, j])
movie_mat[k, j] += self.learning_rate * (2 * diff * user_mat[i, k])
# print results
print("%d \t\t %f" % (iteration + 1, sse_accum / self.num_ratings))
# SVD based fit
# Keep user_mat and movie_mat for safe keeping
self.user_mat = user_mat
self.movie_mat = movie_mat
# Knowledge based fit
self.ranked_movies = lib.create_ranked_df(self.movies, self.reviews)
def predict_rating(self, user_id, movie_id):
"""
:param user_id: the user_id from the reviews df
:param movie_id: the movie_id according the movies df
:return pred: the predicted rating for user_id-movie_id according to FunkSVD
"""
try:
# User row and Movie Column
user_row = np.where(self.user_ids_series == user_id)[0][0]
movie_col = np.where(self.movie_ids_series == movie_id)[0][0]
# Take dot product of that row and column in U and V to make prediction
pred = np.dot(self.user_mat[user_row, :], self.movie_mat[:, movie_col])
movie_name = str(self.movies[self.movies['movie_id'] == movie_id]['movie'])[5:]
movie_name = movie_name.replace('\nName: movie, dtype: object', '')
print("For user {} we predict a {} rating for the movie {}.".format(user_id, round(float(pred), 2),
str(movie_name)))
return pred
except():
print("I'm sorry, but a prediction cannot be made for this user-movie pair. It looks like one of these"
"items does not exist in our current database.")
return None
def make_recommendations(self, _id, _id_type='movie', rec_num=5):
"""
:param _id: (int) either a user or movie id
:param _id_type: (str) "movie" or "user"
:param rec_num: (int) number of recommendations to return
:return recs: (array) a list or numpy array of recommended movies like the
given movie, or recs for a user_id given
"""
# if the user is available from the matrix factorization data
rec_ids, rec_names = None, None
if _id_type == 'user':
if _id in self.user_ids_series:
# Get the index of which row the user is in for use in U matrix
idx = np.where(self.user_ids_series == _id)[0][0]
# take the dot product of that row and the V matrix
preds = np.dot(self.user_mat[idx, :], self.movie_mat)
# pull the top movies according to the prediction
indices = preds.argsort()[-rec_num:][::-1] # indices
rec_ids = self.movie_ids_series[indices]
rec_names = lib.get_movie_names(rec_ids, self.movies)
else:
# if we don't have this user, give just top ratings back
rec_names = lib.popular_recommendations(rec_num, self.ranked_movies)
print("Because this user wasn't in our database,"
"we are giving back the top movie recommendations for all users.")
# Find similar movies if it is a movie that is passed
else:
if _id in self.movie_ids_series:
rec_names = list(lib.find_similar_movies(_id, self.movies))[:rec_num]
else:
print("That movie doesn't exist in our database. Sorry, we don't have any recommendations for you.")
return rec_ids, rec_names
| 42.115607
| 117
| 0.601702
|
b9dc3563acb91688180ab86fe156fb71bd6f7d1a
| 1,171
|
py
|
Python
|
src/fuzzingtool/core/defaults/encoders/__init__.py
|
NESCAU-UFLA/FuzzingTool
|
d0dbe3ee4c17ec8ee72423bf7fabce6849e01807
|
[
"MIT"
] | 131
|
2020-12-14T18:45:29.000Z
|
2022-03-31T03:00:21.000Z
|
src/fuzzingtool/core/defaults/encoders/__init__.py
|
NESCAU-UFLA/FuzzingTool
|
d0dbe3ee4c17ec8ee72423bf7fabce6849e01807
|
[
"MIT"
] | 51
|
2020-12-14T16:02:38.000Z
|
2022-03-31T18:47:12.000Z
|
src/fuzzingtool/core/defaults/encoders/__init__.py
|
NESCAU-UFLA/FuzzingTool
|
d0dbe3ee4c17ec8ee72423bf7fabce6849e01807
|
[
"MIT"
] | 38
|
2020-12-14T21:12:18.000Z
|
2022-03-29T18:23:20.000Z
|
# Copyright (c) 2020 - present Vitor Oriel <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .ChainEncoder import ChainEncoder
| 55.761905
| 80
| 0.781383
|
7e922aacf3502cb9c2d0af4b8790d911112c413e
| 4,331
|
py
|
Python
|
tests/__init__.py
|
alseambusher/python-libarchive-c
|
a076378728fcc54a79bd3bc15f41365d5a740d6d
|
[
"CC0-1.0"
] | null | null | null |
tests/__init__.py
|
alseambusher/python-libarchive-c
|
a076378728fcc54a79bd3bc15f41365d5a740d6d
|
[
"CC0-1.0"
] | null | null | null |
tests/__init__.py
|
alseambusher/python-libarchive-c
|
a076378728fcc54a79bd3bc15f41365d5a740d6d
|
[
"CC0-1.0"
] | 1
|
2019-11-05T17:08:43.000Z
|
2019-11-05T17:08:43.000Z
|
from __future__ import division, print_function, unicode_literals
from contextlib import closing, contextmanager
from copy import copy
from os import chdir, getcwd, stat, walk
from os.path import abspath, dirname, join
from stat import S_ISREG
import tarfile
from libarchive import file_reader
from . import surrogateescape
data_dir = join(dirname(__file__), 'data')
surrogateescape.register()
def check_archive(archive, tree):
tree2 = copy(tree)
for e in archive:
epath = str(e).rstrip('/')
assert epath in tree2
estat = tree2.pop(epath)
assert e.mtime == int(estat['mtime'])
if not e.isdir:
size = e.size
if size is not None:
assert size == estat['size']
with open(epath, 'rb') as f:
for block in e.get_blocks():
assert f.read(len(block)) == block
leftover = f.read()
assert not leftover
# Check that there are no missing directories or files
assert len(tree2) == 0
def get_entries(location):
"""
Using the archive file at `location`, return an iterable of name->value
mappings for each libarchive.ArchiveEntry objects essential attributes.
Paths are base64-encoded because JSON is UTF-8 and cannot handle
arbitrary binary pathdata.
"""
with file_reader(location) as arch:
for entry in arch:
# libarchive introduces prefixes such as h prefix for
# hardlinks: tarfile does not, so we ignore the first char
mode = entry.strmode[1:].decode('ascii')
yield {
'path': surrogate_decode(entry.pathname),
'mtime': entry.mtime,
'size': entry.size,
'mode': mode,
'isreg': entry.isreg,
'isdir': entry.isdir,
'islnk': entry.islnk,
'issym': entry.issym,
'linkpath': surrogate_decode(entry.linkpath),
'isblk': entry.isblk,
'ischr': entry.ischr,
'isfifo': entry.isfifo,
'isdev': entry.isdev,
'uid': entry.uid,
'gid': entry.gid
}
def get_tarinfos(location):
"""
Using the tar archive file at `location`, return an iterable of
name->value mappings for each tarfile.TarInfo objects essential
attributes.
Paths are base64-encoded because JSON is UTF-8 and cannot handle
arbitrary binary pathdata.
"""
with closing(tarfile.open(location)) as tar:
for entry in tar:
path = surrogate_decode(entry.path or '')
if entry.isdir() and not path.endswith('/'):
path += '/'
# libarchive introduces prefixes such as h prefix for
# hardlinks: tarfile does not, so we ignore the first char
mode = tarfile.filemode(entry.mode)[1:]
yield {
'path': path,
'mtime': entry.mtime,
'size': entry.size,
'mode': mode,
'isreg': entry.isreg(),
'isdir': entry.isdir(),
'islnk': entry.islnk(),
'issym': entry.issym(),
'linkpath': surrogate_decode(entry.linkpath or None),
'isblk': entry.isblk(),
'ischr': entry.ischr(),
'isfifo': entry.isfifo(),
'isdev': entry.isdev(),
'uid': entry.uid,
'gid': entry.gid
}
@contextmanager
def in_dir(dirpath):
prev = abspath(getcwd())
chdir(dirpath)
try:
yield
finally:
chdir(prev)
def stat_dict(path):
keys = set(('uid', 'gid', 'mtime'))
mode, _, _, _, uid, gid, size, _, mtime, _ = stat(path)
if S_ISREG(mode):
keys.add('size')
return {k: v for k, v in locals().items() if k in keys}
def treestat(d, stat_dict=stat_dict):
r = {}
for dirpath, dirnames, filenames in walk(d):
r[dirpath] = stat_dict(dirpath)
for fname in filenames:
fpath = join(dirpath, fname)
r[fpath] = stat_dict(fpath)
return r
def surrogate_decode(o):
if isinstance(o, bytes):
return o.decode('utf8', errors='surrogateescape')
return o
| 31.384058
| 75
| 0.558762
|
3bdd5fcf9075bf03032ca2c0d5f953b74b68365e
| 2,375
|
py
|
Python
|
tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2019] 1.py
|
gour/holidata
|
89c7323f9c5345a3ecbf5cd5a835b0e08cfebc13
|
[
"MIT"
] | 32
|
2019-04-12T08:01:34.000Z
|
2022-02-28T04:41:50.000Z
|
tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2019] 1.py
|
gour/holidata
|
89c7323f9c5345a3ecbf5cd5a835b0e08cfebc13
|
[
"MIT"
] | 74
|
2019-07-09T16:35:20.000Z
|
2022-03-09T16:41:34.000Z
|
tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2019] 1.py
|
gour/holidata
|
89c7323f9c5345a3ecbf5cd5a835b0e08cfebc13
|
[
"MIT"
] | 20
|
2019-01-28T07:41:02.000Z
|
2022-02-16T02:38:57.000Z
|
[
{
'date': '2019-01-01',
'description': 'Nowy Rok',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-01-06',
'description': 'Trzech Króli',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-04-21',
'description': 'Wielkanoc',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-04-22',
'description': 'Poniedziałek Wielkanocny',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-05-01',
'description': 'Święto Pracy',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-05-03',
'description': 'Święto Konstytucji Trzeciego Maja',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-06-09',
'description': 'Zielone Świątki',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-06-20',
'description': 'Boże Ciało',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-08-15',
'description': 'Wniebowzięcie Najświętszej Maryi Panny',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-11-01',
'description': 'Wszystkich Świętych',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-11-11',
'description': 'Narodowe Święto Niepodległości',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-12-25',
'description': 'Boże Narodzenie (pierwszy dzień)',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-12-26',
'description': 'Boże Narodzenie (drugi dzień)',
'locale': 'pl-PL',
'notes': '',
'region': '',
'type': 'NRF'
}
]
| 22.40566
| 64
| 0.381053
|
0309e26fccd1610c9e2e5c8951cc88195663f233
| 968
|
py
|
Python
|
extras/metrics.py
|
FreedomBen/banshee
|
55a6ff7a7d479b3ab64f1a5394609f82341dfa59
|
[
"MIT"
] | 8
|
2017-08-21T14:24:49.000Z
|
2020-08-24T00:04:59.000Z
|
extras/metrics.py
|
FreedomBen/banshee
|
55a6ff7a7d479b3ab64f1a5394609f82341dfa59
|
[
"MIT"
] | 67
|
2017-04-16T08:29:23.000Z
|
2021-01-09T13:58:08.000Z
|
extras/metrics.py
|
FreedomBen/banshee
|
55a6ff7a7d479b3ab64f1a5394609f82341dfa59
|
[
"MIT"
] | 8
|
2017-06-13T17:51:07.000Z
|
2021-12-20T02:37:07.000Z
|
#!/usr/bin/env python
import cgi
import os
# INITIAL SETUP:
# 1. mkdir data
# 2. chmod o-xr data
# 3. echo 0 > data/count
# 4. change data_dir below
data_dir = '/home/bansheeweb/download.banshee-project.org/metrics/data/';
uploaded = False
form = cgi.FieldStorage()
if form.file:
# Read the current count
f = open(data_dir + 'count', 'r')
count = f.read ()
count = int(count)
f.close ()
# Increment it and write it out
f = open(data_dir + 'count', 'w')
count = count + 1
f.write (str(count));
f.close ();
# Save the POSTed file
filename = data_dir + str(count) + '.json'
f = open(filename, 'w')
while 1:
line = form.file.readline()
if not line: break
f.write (line)
f.close ();
# gzip it
os.system ('gzip ' + filename)
uploaded = True
if uploaded:
print "Status-Code: 200"
print "Content-type: text/html"
print
else:
print "Status-Code: 500"
| 19.36
| 73
| 0.594008
|
e478d65a6fcafafb5c7d6f96cc7789890297d309
| 52,872
|
py
|
Python
|
mypy/main.py
|
SwagatSBhuyan/mypy
|
218b91c5576a69da51e0813abd0fc7c5fd2d627e
|
[
"PSF-2.0"
] | 2
|
2017-03-21T21:27:44.000Z
|
2020-12-06T19:13:11.000Z
|
mypy/main.py
|
SwagatSBhuyan/mypy
|
218b91c5576a69da51e0813abd0fc7c5fd2d627e
|
[
"PSF-2.0"
] | null | null | null |
mypy/main.py
|
SwagatSBhuyan/mypy
|
218b91c5576a69da51e0813abd0fc7c5fd2d627e
|
[
"PSF-2.0"
] | 1
|
2021-05-21T08:25:52.000Z
|
2021-05-21T08:25:52.000Z
|
"""Mypy type checker command line tool."""
import argparse
from gettext import gettext
import os
import subprocess
import sys
import time
from typing import Any, Dict, IO, List, Optional, Sequence, Tuple, TextIO, Union
from typing_extensions import Final, NoReturn
from mypy import build
from mypy import defaults
from mypy import state
from mypy import util
from mypy.modulefinder import (
BuildSource, FindModuleCache, SearchPaths,
get_site_packages_dirs, mypy_path,
)
from mypy.find_sources import create_source_list, InvalidSourceList
from mypy.fscache import FileSystemCache
from mypy.errors import CompileError
from mypy.errorcodes import error_codes
from mypy.options import Options, BuildType
from mypy.config_parser import get_config_module_names, parse_version, parse_config_file
from mypy.split_namespace import SplitNamespace
from mypy.version import __version__
orig_stat: Final = os.stat
MEM_PROFILE: Final = False # If True, dump memory profile
def stat_proxy(path: str) -> os.stat_result:
try:
st = orig_stat(path)
except os.error as err:
print("stat(%r) -> %s" % (path, err))
raise
else:
print("stat(%r) -> (st_mode=%o, st_mtime=%d, st_size=%d)" %
(path, st.st_mode, st.st_mtime, st.st_size))
return st
def main(script_path: Optional[str],
stdout: TextIO,
stderr: TextIO,
args: Optional[List[str]] = None,
) -> None:
"""Main entry point to the type checker.
Args:
script_path: Path to the 'mypy' script (used for finding data files).
args: Custom command-line arguments. If not given, sys.argv[1:] will
be used.
"""
util.check_python_version('mypy')
t0 = time.time()
# To log stat() calls: os.stat = stat_proxy
sys.setrecursionlimit(2 ** 14)
if args is None:
args = sys.argv[1:]
fscache = FileSystemCache()
sources, options = process_options(args, stdout=stdout, stderr=stderr,
fscache=fscache)
formatter = util.FancyFormatter(stdout, stderr, options.show_error_codes)
if options.install_types and (stdout is not sys.stdout or stderr is not sys.stderr):
# Since --install-types performs user input, we want regular stdout and stderr.
fail("error: --install-types not supported in this mode of running mypy", stderr, options)
if options.non_interactive and not options.install_types:
fail("error: --non-interactive is only supported with --install-types", stderr, options)
if options.install_types and not options.incremental:
fail("error: --install-types not supported with incremental mode disabled",
stderr, options)
if options.install_types and not sources:
install_types(options.cache_dir, formatter, non_interactive=options.non_interactive)
return
res, messages, blockers = run_build(sources, options, fscache, t0, stdout, stderr)
if options.non_interactive:
missing_pkgs = read_types_packages_to_install(options.cache_dir, after_run=True)
if missing_pkgs:
# Install missing type packages and rerun build.
install_types(options.cache_dir, formatter, after_run=True, non_interactive=True)
fscache.flush()
print()
res, messages, blockers = run_build(sources, options, fscache, t0, stdout, stderr)
show_messages(messages, stderr, formatter, options)
if MEM_PROFILE:
from mypy.memprofile import print_memory_profile
print_memory_profile()
code = 0
if messages:
code = 2 if blockers else 1
if options.error_summary:
if messages:
n_errors, n_files = util.count_stats(messages)
if n_errors:
summary = formatter.format_error(
n_errors, n_files, len(sources), blockers=blockers,
use_color=options.color_output
)
stdout.write(summary + '\n')
else:
stdout.write(formatter.format_success(len(sources), options.color_output) + '\n')
stdout.flush()
if options.install_types and not options.non_interactive:
result = install_types(options.cache_dir, formatter, after_run=True,
non_interactive=False)
if result:
print()
print("note: Run mypy again for up-to-date results with installed types")
code = 2
if options.fast_exit:
# Exit without freeing objects -- it's faster.
#
# NOTE: We don't flush all open files on exit (or run other destructors)!
util.hard_exit(code)
elif code:
sys.exit(code)
# HACK: keep res alive so that mypyc won't free it before the hard_exit
list([res])
def run_build(sources: List[BuildSource],
options: Options,
fscache: FileSystemCache,
t0: float,
stdout: TextIO,
stderr: TextIO) -> Tuple[Optional[build.BuildResult], List[str], bool]:
formatter = util.FancyFormatter(stdout, stderr, options.show_error_codes)
messages = []
def flush_errors(new_messages: List[str], serious: bool) -> None:
if options.pretty:
new_messages = formatter.fit_in_terminal(new_messages)
messages.extend(new_messages)
if options.non_interactive:
# Collect messages and possibly show them later.
return
f = stderr if serious else stdout
show_messages(new_messages, f, formatter, options)
serious = False
blockers = False
res = None
try:
# Keep a dummy reference (res) for memory profiling afterwards, as otherwise
# the result could be freed.
res = build.build(sources, options, None, flush_errors, fscache, stdout, stderr)
except CompileError as e:
blockers = True
if not e.use_stdout:
serious = True
if (options.warn_unused_configs
and options.unused_configs
and not options.incremental
and not options.non_interactive):
print("Warning: unused section(s) in %s: %s" %
(options.config_file,
get_config_module_names(options.config_file,
[glob for glob in options.per_module_options.keys()
if glob in options.unused_configs])),
file=stderr)
maybe_write_junit_xml(time.time() - t0, serious, messages, options)
return res, messages, blockers
def show_messages(messages: List[str],
f: TextIO,
formatter: util.FancyFormatter,
options: Options) -> None:
for msg in messages:
if options.color_output:
msg = formatter.colorize(msg)
f.write(msg + '\n')
f.flush()
# Make the help output a little less jarring.
class AugmentedHelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str) -> None:
super().__init__(prog=prog, max_help_position=28)
def _fill_text(self, text: str, width: int, indent: str) -> str:
if '\n' in text:
# Assume we want to manually format the text
return super()._fill_text(text, width, indent)
else:
# Assume we want argparse to manage wrapping, indenting, and
# formatting the text for us.
return argparse.HelpFormatter._fill_text(self, text, width, indent)
# Define pairs of flag prefixes with inverse meaning.
flag_prefix_pairs: Final = [
('allow', 'disallow'),
('show', 'hide'),
]
flag_prefix_map: Final[Dict[str, str]] = {}
for a, b in flag_prefix_pairs:
flag_prefix_map[a] = b
flag_prefix_map[b] = a
def invert_flag_name(flag: str) -> str:
split = flag[2:].split('-', 1)
if len(split) == 2:
prefix, rest = split
if prefix in flag_prefix_map:
return '--{}-{}'.format(flag_prefix_map[prefix], rest)
elif prefix == 'no':
return '--{}'.format(rest)
return '--no-{}'.format(flag[2:])
class PythonExecutableInferenceError(Exception):
"""Represents a failure to infer the version or executable while searching."""
def python_executable_prefix(v: str) -> List[str]:
if sys.platform == 'win32':
# on Windows, all Python executables are named `python`. To handle this, there
# is the `py` launcher, which can be passed a version e.g. `py -3.8`, and it will
# execute an installed Python 3.8 interpreter. See also:
# https://docs.python.org/3/using/windows.html#python-launcher-for-windows
return ['py', '-{}'.format(v)]
else:
return ['python{}'.format(v)]
def _python_executable_from_version(python_version: Tuple[int, int]) -> str:
if sys.version_info[:2] == python_version:
return sys.executable
str_ver = '.'.join(map(str, python_version))
try:
sys_exe = subprocess.check_output(python_executable_prefix(str_ver) +
['-c', 'import sys; print(sys.executable)'],
stderr=subprocess.STDOUT).decode().strip()
return sys_exe
except (subprocess.CalledProcessError, FileNotFoundError) as e:
raise PythonExecutableInferenceError(
'failed to find a Python executable matching version {},'
' perhaps try --python-executable, or --no-site-packages?'.format(python_version)
) from e
def infer_python_executable(options: Options,
special_opts: argparse.Namespace) -> None:
"""Infer the Python executable from the given version.
This function mutates options based on special_opts to infer the correct Python executable
to use.
"""
# TODO: (ethanhs) Look at folding these checks and the site packages subprocess calls into
# one subprocess call for speed.
# Use the command line specified executable, or fall back to one set in the
# config file. If an executable is not specified, infer it from the version
# (unless no_executable is set)
python_executable = special_opts.python_executable or options.python_executable
if python_executable is None:
if not special_opts.no_executable and not options.no_site_packages:
python_executable = _python_executable_from_version(options.python_version)
options.python_executable = python_executable
HEADER: Final = """%(prog)s [-h] [-v] [-V] [more options; see below]
[-m MODULE] [-p PACKAGE] [-c PROGRAM_TEXT] [files ...]"""
DESCRIPTION: Final = """
Mypy is a program that will type check your Python code.
Pass in any files or folders you want to type check. Mypy will
recursively traverse any provided folders to find .py files:
$ mypy my_program.py my_src_folder
For more information on getting started, see:
- https://mypy.readthedocs.io/en/stable/getting_started.html
For more details on both running mypy and using the flags below, see:
- https://mypy.readthedocs.io/en/stable/running_mypy.html
- https://mypy.readthedocs.io/en/stable/command_line.html
You can also use a config file to configure mypy instead of using
command line flags. For more details, see:
- https://mypy.readthedocs.io/en/stable/config_file.html
"""
FOOTER: Final = """Environment variables:
Define MYPYPATH for additional module search path entries.
Define MYPY_CACHE_DIR to override configuration cache_dir path."""
class CapturableArgumentParser(argparse.ArgumentParser):
"""Override ArgumentParser methods that use sys.stdout/sys.stderr directly.
This is needed because hijacking sys.std* is not thread-safe,
yet output must be captured to properly support mypy.api.run.
"""
def __init__(self, *args: Any, **kwargs: Any):
self.stdout = kwargs.pop('stdout', sys.stdout)
self.stderr = kwargs.pop('stderr', sys.stderr)
super().__init__(*args, **kwargs)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file: Optional[IO[str]] = None) -> None:
if file is None:
file = self.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file: Optional[IO[str]] = None) -> None:
if file is None:
file = self.stdout
self._print_message(self.format_help(), file)
def _print_message(self, message: str, file: Optional[IO[str]] = None) -> None:
if message:
if file is None:
file = self.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status: int = 0, message: Optional[str] = None) -> NoReturn:
if message:
self._print_message(message, self.stderr)
sys.exit(status)
def error(self, message: str) -> NoReturn:
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(self.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(2, gettext('%(prog)s: error: %(message)s\n') % args)
class CapturableVersionAction(argparse.Action):
"""Supplement CapturableArgumentParser to handle --version.
This is nearly identical to argparse._VersionAction except,
like CapturableArgumentParser, it allows output to be captured.
Another notable difference is that version is mandatory.
This allows removing a line in __call__ that falls back to parser.version
(which does not appear to exist).
"""
def __init__(self,
option_strings: Sequence[str],
version: str,
dest: str = argparse.SUPPRESS,
default: str = argparse.SUPPRESS,
help: str = "show program's version number and exit",
stdout: Optional[IO[str]] = None):
super().__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
self.stdout = stdout or sys.stdout
def __call__(self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
option_string: Optional[str] = None) -> NoReturn:
formatter = parser._get_formatter()
formatter.add_text(self.version)
parser._print_message(formatter.format_help(), self.stdout)
parser.exit()
def process_options(args: List[str],
stdout: Optional[TextIO] = None,
stderr: Optional[TextIO] = None,
require_targets: bool = True,
server_options: bool = False,
fscache: Optional[FileSystemCache] = None,
program: str = 'mypy',
header: str = HEADER,
) -> Tuple[List[BuildSource], Options]:
"""Parse command line arguments.
If a FileSystemCache is passed in, and package_root options are given,
call fscache.set_package_root() to set the cache's package root.
"""
stdout = stdout or sys.stdout
stderr = stderr or sys.stderr
parser = CapturableArgumentParser(prog=program,
usage=header,
description=DESCRIPTION,
epilog=FOOTER,
fromfile_prefix_chars='@',
formatter_class=AugmentedHelpFormatter,
add_help=False,
stdout=stdout,
stderr=stderr)
strict_flag_names: List[str] = []
strict_flag_assignments: List[Tuple[str, bool]] = []
def add_invertible_flag(flag: str,
*,
inverse: Optional[str] = None,
default: bool,
dest: Optional[str] = None,
help: str,
strict_flag: bool = False,
group: Optional[argparse._ActionsContainer] = None
) -> None:
if inverse is None:
inverse = invert_flag_name(flag)
if group is None:
group = parser
if help is not argparse.SUPPRESS:
help += " (inverse: {})".format(inverse)
arg = group.add_argument(flag,
action='store_false' if default else 'store_true',
dest=dest,
help=help)
dest = arg.dest
arg = group.add_argument(inverse,
action='store_true' if default else 'store_false',
dest=dest,
help=argparse.SUPPRESS)
if strict_flag:
assert dest is not None
strict_flag_names.append(flag)
strict_flag_assignments.append((dest, not default))
# Unless otherwise specified, arguments will be parsed directly onto an
# Options object. Options that require further processing should have
# their `dest` prefixed with `special-opts:`, which will cause them to be
# parsed into the separate special_opts namespace object.
# Note: we have a style guide for formatting the mypy --help text. See
# https://github.com/python/mypy/wiki/Documentation-Conventions
general_group = parser.add_argument_group(
title='Optional arguments')
general_group.add_argument(
'-h', '--help', action='help',
help="Show this help message and exit")
general_group.add_argument(
'-v', '--verbose', action='count', dest='verbosity',
help="More verbose messages")
general_group.add_argument(
'-V', '--version', action=CapturableVersionAction,
version='%(prog)s ' + __version__,
help="Show program's version number and exit",
stdout=stdout)
config_group = parser.add_argument_group(
title='Config file',
description="Use a config file instead of command line arguments. "
"This is useful if you are using many flags or want "
"to set different options per each module.")
config_group.add_argument(
'--config-file',
help="Configuration file, must have a [mypy] section "
"(defaults to {})".format(', '.join(defaults.CONFIG_FILES)))
add_invertible_flag('--warn-unused-configs', default=False, strict_flag=True,
help="Warn about unused '[mypy-<pattern>]' or '[[tool.mypy.overrides]]' "
"config sections",
group=config_group)
imports_group = parser.add_argument_group(
title='Import discovery',
description="Configure how imports are discovered and followed.")
add_invertible_flag(
'--namespace-packages', default=False,
help="Support namespace packages (PEP 420, __init__.py-less)",
group=imports_group)
imports_group.add_argument(
'--ignore-missing-imports', action='store_true',
help="Silently ignore imports of missing modules")
imports_group.add_argument(
'--follow-imports', choices=['normal', 'silent', 'skip', 'error'],
default='normal', help="How to treat imports (default normal)")
imports_group.add_argument(
'--python-executable', action='store', metavar='EXECUTABLE',
help="Python executable used for finding PEP 561 compliant installed"
" packages and stubs",
dest='special-opts:python_executable')
imports_group.add_argument(
'--no-site-packages', action='store_true',
dest='special-opts:no_executable',
help="Do not search for installed PEP 561 compliant packages")
imports_group.add_argument(
'--no-silence-site-packages', action='store_true',
help="Do not silence errors in PEP 561 compliant installed packages")
platform_group = parser.add_argument_group(
title='Platform configuration',
description="Type check code assuming it will be run under certain "
"runtime conditions. By default, mypy assumes your code "
"will be run using the same operating system and Python "
"version you are using to run mypy itself.")
platform_group.add_argument(
'--python-version', type=parse_version, metavar='x.y',
help='Type check code assuming it will be running on Python x.y',
dest='special-opts:python_version')
platform_group.add_argument(
'-2', '--py2', dest='special-opts:python_version', action='store_const',
const=defaults.PYTHON2_VERSION,
help="Use Python 2 mode (same as --python-version 2.7)")
platform_group.add_argument(
'--platform', action='store', metavar='PLATFORM',
help="Type check special-cased code for the given OS platform "
"(defaults to sys.platform)")
platform_group.add_argument(
'--always-true', metavar='NAME', action='append', default=[],
help="Additional variable to be considered True (may be repeated)")
platform_group.add_argument(
'--always-false', metavar='NAME', action='append', default=[],
help="Additional variable to be considered False (may be repeated)")
disallow_any_group = parser.add_argument_group(
title='Disallow dynamic typing',
description="Disallow the use of the dynamic 'Any' type under certain conditions.")
disallow_any_group.add_argument(
'--disallow-any-unimported', default=False, action='store_true',
help="Disallow Any types resulting from unfollowed imports")
disallow_any_group.add_argument(
'--disallow-any-expr', default=False, action='store_true',
help='Disallow all expressions that have type Any')
disallow_any_group.add_argument(
'--disallow-any-decorated', default=False, action='store_true',
help='Disallow functions that have Any in their signature '
'after decorator transformation')
disallow_any_group.add_argument(
'--disallow-any-explicit', default=False, action='store_true',
help='Disallow explicit Any in type positions')
add_invertible_flag('--disallow-any-generics', default=False, strict_flag=True,
help='Disallow usage of generic types that do not specify explicit type '
'parameters', group=disallow_any_group)
add_invertible_flag('--disallow-subclassing-any', default=False, strict_flag=True,
help="Disallow subclassing values of type 'Any' when defining classes",
group=disallow_any_group)
untyped_group = parser.add_argument_group(
title='Untyped definitions and calls',
description="Configure how untyped definitions and calls are handled. "
"Note: by default, mypy ignores any untyped function definitions "
"and assumes any calls to such functions have a return "
"type of 'Any'.")
add_invertible_flag('--disallow-untyped-calls', default=False, strict_flag=True,
help="Disallow calling functions without type annotations"
" from functions with type annotations",
group=untyped_group)
add_invertible_flag('--disallow-untyped-defs', default=False, strict_flag=True,
help="Disallow defining functions without type annotations"
" or with incomplete type annotations",
group=untyped_group)
add_invertible_flag('--disallow-incomplete-defs', default=False, strict_flag=True,
help="Disallow defining functions with incomplete type annotations",
group=untyped_group)
add_invertible_flag('--check-untyped-defs', default=False, strict_flag=True,
help="Type check the interior of functions without type annotations",
group=untyped_group)
add_invertible_flag('--disallow-untyped-decorators', default=False, strict_flag=True,
help="Disallow decorating typed functions with untyped decorators",
group=untyped_group)
none_group = parser.add_argument_group(
title='None and Optional handling',
description="Adjust how values of type 'None' are handled. For more context on "
"how mypy handles values of type 'None', see: "
"https://mypy.readthedocs.io/en/stable/kinds_of_types.html#no-strict-optional")
add_invertible_flag('--no-implicit-optional', default=False, strict_flag=True,
help="Don't assume arguments with default values of None are Optional",
group=none_group)
none_group.add_argument(
'--strict-optional', action='store_true',
help=argparse.SUPPRESS)
none_group.add_argument(
'--no-strict-optional', action='store_false', dest='strict_optional',
help="Disable strict Optional checks (inverse: --strict-optional)")
none_group.add_argument(
'--strict-optional-whitelist', metavar='GLOB', nargs='*',
help=argparse.SUPPRESS)
lint_group = parser.add_argument_group(
title='Configuring warnings',
description="Detect code that is sound but redundant or problematic.")
add_invertible_flag('--warn-redundant-casts', default=False, strict_flag=True,
help="Warn about casting an expression to its inferred type",
group=lint_group)
add_invertible_flag('--warn-unused-ignores', default=False, strict_flag=True,
help="Warn about unneeded '# type: ignore' comments",
group=lint_group)
add_invertible_flag('--no-warn-no-return', dest='warn_no_return', default=True,
help="Do not warn about functions that end without returning",
group=lint_group)
add_invertible_flag('--warn-return-any', default=False, strict_flag=True,
help="Warn about returning values of type Any"
" from non-Any typed functions",
group=lint_group)
add_invertible_flag('--warn-unreachable', default=False, strict_flag=False,
help="Warn about statements or expressions inferred to be"
" unreachable",
group=lint_group)
# Note: this group is intentionally added here even though we don't add
# --strict to this group near the end.
#
# That way, this group will appear after the various strictness groups
# but before the remaining flags.
# We add `--strict` near the end so we don't accidentally miss any strictness
# flags that are added after this group.
strictness_group = parser.add_argument_group(
title='Miscellaneous strictness flags')
add_invertible_flag('--allow-untyped-globals', default=False, strict_flag=False,
help="Suppress toplevel errors caused by missing annotations",
group=strictness_group)
add_invertible_flag('--allow-redefinition', default=False, strict_flag=False,
help="Allow unconditional variable redefinition with a new type",
group=strictness_group)
add_invertible_flag('--no-implicit-reexport', default=True, strict_flag=True,
dest='implicit_reexport',
help="Treat imports as private unless aliased",
group=strictness_group)
add_invertible_flag('--strict-equality', default=False, strict_flag=True,
help="Prohibit equality, identity, and container checks for"
" non-overlapping types",
group=strictness_group)
strict_help = "Strict mode; enables the following flags: {}".format(
", ".join(strict_flag_names))
strictness_group.add_argument(
'--strict', action='store_true', dest='special-opts:strict',
help=strict_help)
strictness_group.add_argument(
'--disable-error-code', metavar='NAME', action='append', default=[],
help="Disable a specific error code")
strictness_group.add_argument(
'--enable-error-code', metavar='NAME', action='append', default=[],
help="Enable a specific error code"
)
error_group = parser.add_argument_group(
title='Configuring error messages',
description="Adjust the amount of detail shown in error messages.")
add_invertible_flag('--show-error-context', default=False,
dest='show_error_context',
help='Precede errors with "note:" messages explaining context',
group=error_group)
add_invertible_flag('--show-column-numbers', default=False,
help="Show column numbers in error messages",
group=error_group)
add_invertible_flag('--show-error-codes', default=False,
help="Show error codes in error messages",
group=error_group)
add_invertible_flag('--pretty', default=False,
help="Use visually nicer output in error messages:"
" Use soft word wrap, show source code snippets,"
" and show error location markers",
group=error_group)
add_invertible_flag('--no-color-output', dest='color_output', default=True,
help="Do not colorize error messages",
group=error_group)
add_invertible_flag('--no-error-summary', dest='error_summary', default=True,
help="Do not show error stats summary",
group=error_group)
add_invertible_flag('--show-absolute-path', default=False,
help="Show absolute paths to files",
group=error_group)
error_group.add_argument('--soft-error-limit', default=defaults.MANY_ERRORS_THRESHOLD,
type=int, dest="many_errors_threshold", help=argparse.SUPPRESS)
incremental_group = parser.add_argument_group(
title='Incremental mode',
description="Adjust how mypy incrementally type checks and caches modules. "
"Mypy caches type information about modules into a cache to "
"let you speed up future invocations of mypy. Also see "
"mypy's daemon mode: "
"mypy.readthedocs.io/en/stable/mypy_daemon.html#mypy-daemon")
incremental_group.add_argument(
'-i', '--incremental', action='store_true',
help=argparse.SUPPRESS)
incremental_group.add_argument(
'--no-incremental', action='store_false', dest='incremental',
help="Disable module cache (inverse: --incremental)")
incremental_group.add_argument(
'--cache-dir', action='store', metavar='DIR',
help="Store module cache info in the given folder in incremental mode "
"(defaults to '{}')".format(defaults.CACHE_DIR))
add_invertible_flag('--sqlite-cache', default=False,
help="Use a sqlite database to store the cache",
group=incremental_group)
incremental_group.add_argument(
'--cache-fine-grained', action='store_true',
help="Include fine-grained dependency information in the cache for the mypy daemon")
incremental_group.add_argument(
'--skip-version-check', action='store_true',
help="Allow using cache written by older mypy version")
incremental_group.add_argument(
'--skip-cache-mtime-checks', action='store_true',
help="Skip cache internal consistency checks based on mtime")
internals_group = parser.add_argument_group(
title='Advanced options',
description="Debug and customize mypy internals.")
internals_group.add_argument(
'--pdb', action='store_true', help="Invoke pdb on fatal error")
internals_group.add_argument(
'--show-traceback', '--tb', action='store_true',
help="Show traceback on fatal error")
internals_group.add_argument(
'--raise-exceptions', action='store_true', help="Raise exception on fatal error"
)
internals_group.add_argument(
'--custom-typing-module', metavar='MODULE', dest='custom_typing_module',
help="Use a custom typing module")
internals_group.add_argument(
'--custom-typeshed-dir', metavar='DIR',
help="Use the custom typeshed in DIR")
add_invertible_flag('--warn-incomplete-stub', default=False,
help="Warn if missing type annotation in typeshed, only relevant with"
" --disallow-untyped-defs or --disallow-incomplete-defs enabled",
group=internals_group)
internals_group.add_argument(
'--shadow-file', nargs=2, metavar=('SOURCE_FILE', 'SHADOW_FILE'),
dest='shadow_file', action='append',
help="When encountering SOURCE_FILE, read and type check "
"the contents of SHADOW_FILE instead.")
add_invertible_flag('--fast-exit', default=False, help=argparse.SUPPRESS,
group=internals_group)
report_group = parser.add_argument_group(
title='Report generation',
description='Generate a report in the specified format.')
for report_type in sorted(defaults.REPORTER_NAMES):
if report_type not in {'memory-xml'}:
report_group.add_argument('--%s-report' % report_type.replace('_', '-'),
metavar='DIR',
dest='special-opts:%s_report' % report_type)
other_group = parser.add_argument_group(
title='Miscellaneous')
other_group.add_argument(
'--quickstart-file', help=argparse.SUPPRESS)
other_group.add_argument(
'--junit-xml', help="Write junit.xml to the given file")
other_group.add_argument(
'--find-occurrences', metavar='CLASS.MEMBER',
dest='special-opts:find_occurrences',
help="Print out all usages of a class member (experimental)")
other_group.add_argument(
'--scripts-are-modules', action='store_true',
help="Script x becomes module x instead of __main__")
add_invertible_flag('--install-types', default=False, strict_flag=False,
help="Install detected missing library stub packages using pip",
group=other_group)
add_invertible_flag('--non-interactive', default=False, strict_flag=False,
help=("Install stubs without asking for confirmation and hide " +
"errors, with --install-types"),
group=other_group, inverse="--interactive")
if server_options:
# TODO: This flag is superfluous; remove after a short transition (2018-03-16)
other_group.add_argument(
'--experimental', action='store_true', dest='fine_grained_incremental',
help="Enable fine-grained incremental mode")
other_group.add_argument(
'--use-fine-grained-cache', action='store_true',
help="Use the cache in fine-grained incremental mode")
# hidden options
parser.add_argument(
'--stats', action='store_true', dest='dump_type_stats', help=argparse.SUPPRESS)
parser.add_argument(
'--inferstats', action='store_true', dest='dump_inference_stats',
help=argparse.SUPPRESS)
parser.add_argument(
'--dump-build-stats', action='store_true',
help=argparse.SUPPRESS)
# --debug-cache will disable any cache-related compressions/optimizations,
# which will make the cache writing process output pretty-printed JSON (which
# is easier to debug).
parser.add_argument('--debug-cache', action='store_true', help=argparse.SUPPRESS)
# --dump-deps will dump all fine-grained dependencies to stdout
parser.add_argument('--dump-deps', action='store_true', help=argparse.SUPPRESS)
# --dump-graph will dump the contents of the graph of SCCs and exit.
parser.add_argument('--dump-graph', action='store_true', help=argparse.SUPPRESS)
# --semantic-analysis-only does exactly that.
parser.add_argument('--semantic-analysis-only', action='store_true', help=argparse.SUPPRESS)
# --local-partial-types disallows partial types spanning module top level and a function
# (implicitly defined in fine-grained incremental mode)
parser.add_argument('--local-partial-types', action='store_true', help=argparse.SUPPRESS)
# --logical-deps adds some more dependencies that are not semantically needed, but
# may be helpful to determine relative importance of classes and functions for overall
# type precision in a code base. It also _removes_ some deps, so this flag should be never
# used except for generating code stats. This also automatically enables --cache-fine-grained.
# NOTE: This is an experimental option that may be modified or removed at any time.
parser.add_argument('--logical-deps', action='store_true', help=argparse.SUPPRESS)
# --bazel changes some behaviors for use with Bazel (https://bazel.build).
parser.add_argument('--bazel', action='store_true', help=argparse.SUPPRESS)
# --package-root adds a directory below which directories are considered
# packages even without __init__.py. May be repeated.
parser.add_argument('--package-root', metavar='ROOT', action='append', default=[],
help=argparse.SUPPRESS)
# --cache-map FILE ... gives a mapping from source files to cache files.
# Each triple of arguments is a source file, a cache meta file, and a cache data file.
# Modules not mentioned in the file will go through cache_dir.
# Must be followed by another flag or by '--' (and then only file args may follow).
parser.add_argument('--cache-map', nargs='+', dest='special-opts:cache_map',
help=argparse.SUPPRESS)
# options specifying code to check
code_group = parser.add_argument_group(
title="Running code",
description="Specify the code you want to type check. For more details, see "
"mypy.readthedocs.io/en/stable/running_mypy.html#running-mypy")
add_invertible_flag(
'--explicit-package-bases', default=False,
help="Use current directory and MYPYPATH to determine module names of files passed",
group=code_group)
code_group.add_argument(
"--exclude",
action="append",
metavar="PATTERN",
default=[],
help=(
"Regular expression to match file names, directory names or paths which mypy should "
"ignore while recursively discovering files to check, e.g. --exclude '/setup\\.py$'. "
"May be specified more than once, eg. --exclude a --exclude b"
)
)
code_group.add_argument(
'-m', '--module', action='append', metavar='MODULE',
default=[],
dest='special-opts:modules',
help="Type-check module; can repeat for more modules")
code_group.add_argument(
'-p', '--package', action='append', metavar='PACKAGE',
default=[],
dest='special-opts:packages',
help="Type-check package recursively; can be repeated")
code_group.add_argument(
'-c', '--command', action='append', metavar='PROGRAM_TEXT',
dest='special-opts:command',
help="Type-check program passed in as string")
code_group.add_argument(
metavar='files', nargs='*', dest='special-opts:files',
help="Type-check given files or directories")
# Parse arguments once into a dummy namespace so we can get the
# filename for the config file and know if the user requested all strict options.
dummy = argparse.Namespace()
parser.parse_args(args, dummy)
config_file = dummy.config_file
# Don't explicitly test if "config_file is not None" for this check.
# This lets `--config-file=` (an empty string) be used to disable all config files.
if config_file and not os.path.exists(config_file):
parser.error("Cannot find config file '%s'" % config_file)
options = Options()
def set_strict_flags() -> None:
for dest, value in strict_flag_assignments:
setattr(options, dest, value)
# Parse config file first, so command line can override.
parse_config_file(options, set_strict_flags, config_file, stdout, stderr)
# Set strict flags before parsing (if strict mode enabled), so other command
# line options can override.
if getattr(dummy, 'special-opts:strict'): # noqa
set_strict_flags()
# Override cache_dir if provided in the environment
environ_cache_dir = os.getenv('MYPY_CACHE_DIR', '')
if environ_cache_dir.strip():
options.cache_dir = environ_cache_dir
options.cache_dir = os.path.expanduser(options.cache_dir)
# Parse command line for real, using a split namespace.
special_opts = argparse.Namespace()
parser.parse_args(args, SplitNamespace(options, special_opts, 'special-opts:'))
# The python_version is either the default, which can be overridden via a config file,
# or stored in special_opts and is passed via the command line.
options.python_version = special_opts.python_version or options.python_version
try:
infer_python_executable(options, special_opts)
except PythonExecutableInferenceError as e:
parser.error(str(e))
if special_opts.no_executable or options.no_site_packages:
options.python_executable = None
# Paths listed in the config file will be ignored if any paths, modules or packages
# are passed on the command line.
if options.files and not (special_opts.files or special_opts.packages or special_opts.modules):
special_opts.files = options.files
# Check for invalid argument combinations.
if require_targets:
code_methods = sum(bool(c) for c in [special_opts.modules + special_opts.packages,
special_opts.command,
special_opts.files])
if code_methods == 0 and not options.install_types:
parser.error("Missing target module, package, files, or command.")
elif code_methods > 1:
parser.error("May only specify one of: module/package, files, or command.")
if options.explicit_package_bases and not options.namespace_packages:
parser.error(
"Can only use --explicit-package-bases with --namespace-packages, since otherwise "
"examining __init__.py's is sufficient to determine module names for files"
)
# Check for overlapping `--always-true` and `--always-false` flags.
overlap = set(options.always_true) & set(options.always_false)
if overlap:
parser.error("You can't make a variable always true and always false (%s)" %
', '.join(sorted(overlap)))
# Process `--enable-error-code` and `--disable-error-code` flags
disabled_codes = set(options.disable_error_code)
enabled_codes = set(options.enable_error_code)
valid_error_codes = set(error_codes.keys())
invalid_codes = (enabled_codes | disabled_codes) - valid_error_codes
if invalid_codes:
parser.error("Invalid error code(s): %s" %
', '.join(sorted(invalid_codes)))
options.disabled_error_codes |= {error_codes[code] for code in disabled_codes}
options.enabled_error_codes |= {error_codes[code] for code in enabled_codes}
# Enabling an error code always overrides disabling
options.disabled_error_codes -= options.enabled_error_codes
# Set build flags.
if options.strict_optional_whitelist is not None:
# TODO: Deprecate, then kill this flag
options.strict_optional = True
if special_opts.find_occurrences:
state.find_occurrences = special_opts.find_occurrences.split('.')
assert state.find_occurrences is not None
if len(state.find_occurrences) < 2:
parser.error("Can only find occurrences of class members.")
if len(state.find_occurrences) != 2:
parser.error("Can only find occurrences of non-nested class members.")
# Set reports.
for flag, val in vars(special_opts).items():
if flag.endswith('_report') and val is not None:
report_type = flag[:-7].replace('_', '-')
report_dir = val
options.report_dirs[report_type] = report_dir
# Process --package-root.
if options.package_root:
process_package_roots(fscache, parser, options)
# Process --cache-map.
if special_opts.cache_map:
if options.sqlite_cache:
parser.error("--cache-map is incompatible with --sqlite-cache")
process_cache_map(parser, special_opts, options)
# An explicitly specified cache_fine_grained implies local_partial_types
# (because otherwise the cache is not compatiable with dmypy)
if options.cache_fine_grained:
options.local_partial_types = True
# Let logical_deps imply cache_fine_grained (otherwise the former is useless).
if options.logical_deps:
options.cache_fine_grained = True
# Set target.
if special_opts.modules + special_opts.packages:
options.build_type = BuildType.MODULE
egg_dirs, site_packages = get_site_packages_dirs(options.python_executable)
search_paths = SearchPaths((os.getcwd(),),
tuple(mypy_path() + options.mypy_path),
tuple(egg_dirs + site_packages),
())
targets = []
# TODO: use the same cache that the BuildManager will
cache = FindModuleCache(search_paths, fscache, options)
for p in special_opts.packages:
if os.sep in p or os.altsep and os.altsep in p:
fail("Package name '{}' cannot have a slash in it.".format(p),
stderr, options)
p_targets = cache.find_modules_recursive(p)
if not p_targets:
fail("Can't find package '{}'".format(p), stderr, options)
targets.extend(p_targets)
for m in special_opts.modules:
targets.append(BuildSource(None, m, None))
return targets, options
elif special_opts.command:
options.build_type = BuildType.PROGRAM_TEXT
targets = [BuildSource(None, None, '\n'.join(special_opts.command))]
return targets, options
else:
try:
targets = create_source_list(special_opts.files, options, fscache)
# Variable named e2 instead of e to work around mypyc bug #620
# which causes issues when using the same variable to catch
# exceptions of different types.
except InvalidSourceList as e2:
fail(str(e2), stderr, options)
return targets, options
def process_package_roots(fscache: Optional[FileSystemCache],
parser: argparse.ArgumentParser,
options: Options) -> None:
"""Validate and normalize package_root."""
if fscache is None:
parser.error("--package-root does not work here (no fscache)")
assert fscache is not None # Since mypy doesn't know parser.error() raises.
# Do some stuff with drive letters to make Windows happy (esp. tests).
current_drive, _ = os.path.splitdrive(os.getcwd())
dot = os.curdir
dotslash = os.curdir + os.sep
dotdotslash = os.pardir + os.sep
trivial_paths = {dot, dotslash}
package_root = []
for root in options.package_root:
if os.path.isabs(root):
parser.error("Package root cannot be absolute: %r" % root)
drive, root = os.path.splitdrive(root)
if drive and drive != current_drive:
parser.error("Package root must be on current drive: %r" % (drive + root))
# Empty package root is always okay.
if root:
root = os.path.relpath(root) # Normalize the heck out of it.
if not root.endswith(os.sep):
root = root + os.sep
if root.startswith(dotdotslash):
parser.error("Package root cannot be above current directory: %r" % root)
if root in trivial_paths:
root = ''
package_root.append(root)
options.package_root = package_root
# Pass the package root on the the filesystem cache.
fscache.set_package_root(package_root)
def process_cache_map(parser: argparse.ArgumentParser,
special_opts: argparse.Namespace,
options: Options) -> None:
"""Validate cache_map and copy into options.cache_map."""
n = len(special_opts.cache_map)
if n % 3 != 0:
parser.error("--cache-map requires one or more triples (see source)")
for i in range(0, n, 3):
source, meta_file, data_file = special_opts.cache_map[i:i + 3]
if source in options.cache_map:
parser.error("Duplicate --cache-map source %s)" % source)
if not source.endswith('.py') and not source.endswith('.pyi'):
parser.error("Invalid --cache-map source %s (triple[0] must be *.py[i])" % source)
if not meta_file.endswith('.meta.json'):
parser.error("Invalid --cache-map meta_file %s (triple[1] must be *.meta.json)" %
meta_file)
if not data_file.endswith('.data.json'):
parser.error("Invalid --cache-map data_file %s (triple[2] must be *.data.json)" %
data_file)
options.cache_map[source] = (meta_file, data_file)
def maybe_write_junit_xml(td: float, serious: bool, messages: List[str], options: Options) -> None:
if options.junit_xml:
py_version = '{}_{}'.format(options.python_version[0], options.python_version[1])
util.write_junit_xml(
td, serious, messages, options.junit_xml, py_version, options.platform)
def fail(msg: str, stderr: TextIO, options: Options) -> None:
"""Fail with a serious error."""
stderr.write('%s\n' % msg)
maybe_write_junit_xml(0.0, serious=True, messages=[msg], options=options)
sys.exit(2)
def read_types_packages_to_install(cache_dir: str, after_run: bool) -> List[str]:
if not os.path.isdir(cache_dir):
if not after_run:
sys.stderr.write(
"error: Can't determine which types to install with no files to check " +
"(and no cache from previous mypy run)\n"
)
else:
sys.stderr.write(
"error: --install-types failed (no mypy cache directory)\n"
)
sys.exit(2)
fnam = build.missing_stubs_file(cache_dir)
if not os.path.isfile(fnam):
# No missing stubs.
return []
with open(fnam) as f:
return [line.strip() for line in f.readlines()]
def install_types(cache_dir: str,
formatter: util.FancyFormatter,
*,
after_run: bool = False,
non_interactive: bool = False) -> bool:
"""Install stub packages using pip if some missing stubs were detected."""
packages = read_types_packages_to_install(cache_dir, after_run)
if not packages:
# If there are no missing stubs, generate no output.
return False
if after_run and not non_interactive:
print()
print('Installing missing stub packages:')
cmd = [sys.executable, '-m', 'pip', 'install'] + packages
print(formatter.style(' '.join(cmd), 'none', bold=True))
print()
if not non_interactive:
x = input('Install? [yN] ')
if not x.strip() or not x.lower().startswith('y'):
print(formatter.style('mypy: Skipping installation', 'red', bold=True))
sys.exit(2)
print()
subprocess.run(cmd)
return True
| 45.074169
| 99
| 0.632944
|
18dfa4638b0a726c9d07757bd8976a11fd214c43
| 2,850
|
py
|
Python
|
st2common/st2common/constants/pack.py
|
ekhavana/st2
|
2b47b0e317a2dfd7d92d63ec6dcf706493148890
|
[
"Apache-2.0"
] | null | null | null |
st2common/st2common/constants/pack.py
|
ekhavana/st2
|
2b47b0e317a2dfd7d92d63ec6dcf706493148890
|
[
"Apache-2.0"
] | null | null | null |
st2common/st2common/constants/pack.py
|
ekhavana/st2
|
2b47b0e317a2dfd7d92d63ec6dcf706493148890
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'PACKS_PACK_NAME',
'PACK_REF_WHITELIST_REGEX',
'PACK_RESERVED_CHARACTERS',
'PACK_VERSION_SEPARATOR',
'PACK_VERSION_REGEX',
'NORMALIZE_PACK_VERSION',
'ST2_VERSION_REGEX',
'SYSTEM_PACK_NAME',
'PACKS_PACK_NAME',
'LINUX_PACK_NAME',
'SYSTEM_PACK_NAMES',
'CHATOPS_PACK_NAME',
'USER_PACK_NAME_BLACKLIST',
'BASE_PACK_REQUIREMENTS',
'MANIFEST_FILE_NAME',
'CONFIG_SCHEMA_FILE_NAME'
]
# A list of allowed characters for the pack name
PACK_REF_WHITELIST_REGEX = r'^[a-z0-9_]+$'
# Check for a valid semver string
PACK_VERSION_REGEX = r'^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[\da-z\-]+(?:\.[\da-z\-]+)*)?(?:\+[\da-z\-]+(?:\.[\da-z\-]+)*)?$' # noqa
# If True, non semver valid pack versions will be normalized on register (e.g. 0.2 -> 0.2.0)
NORMALIZE_PACK_VERSION = True
# Special characters which can't be used in pack names
PACK_RESERVED_CHARACTERS = [
'.'
]
# Version sperator when version is supplied in pack name
# Example: libcloud@1.0.1
PACK_VERSION_SEPARATOR = '='
# Check for st2 version in engines
ST2_VERSION_REGEX = r'^((>?>|>=|=|<=|<?<)\s*[0-9]+\.[0-9]+\.[0-9]+?(\s*,)?\s*)+$'
# Name used for system pack
SYSTEM_PACK_NAME = 'core'
# Name used for pack management pack
PACKS_PACK_NAME = 'packs'
# Name used for linux pack
LINUX_PACK_NAME = 'linux'
# Name of the default pack
DEFAULT_PACK_NAME = 'default'
# Name of the chatops pack
CHATOPS_PACK_NAME = 'chatops'
# A list of system pack names
SYSTEM_PACK_NAMES = [
CHATOPS_PACK_NAME,
SYSTEM_PACK_NAME,
PACKS_PACK_NAME,
LINUX_PACK_NAME
]
# A list of pack names which can't be used by user-supplied packs
USER_PACK_NAME_BLACKLIST = [
SYSTEM_PACK_NAME,
PACKS_PACK_NAME
]
# Python requirements which are common to all the packs and are installed into the Python pack
# sandbox (virtualenv)
BASE_PACK_REQUIREMENTS = [
'six>=1.9.0,<2.0'
]
# Name of the pack manifest file
MANIFEST_FILE_NAME = 'pack.yaml'
# File name for the config schema file
CONFIG_SCHEMA_FILE_NAME = 'config.schema.yaml'
| 29.6875
| 147
| 0.711228
|
9c8ce207d5d6aed72c5813e0b8cd483171e6c4bb
| 1,764
|
py
|
Python
|
PowerTrack/get_stream.py
|
g4brielvs/enterprise-scripts-python
|
788d425a180ca5ec06511ee5a1304957ec4d28ee
|
[
"Apache-2.0"
] | null | null | null |
PowerTrack/get_stream.py
|
g4brielvs/enterprise-scripts-python
|
788d425a180ca5ec06511ee5a1304957ec4d28ee
|
[
"Apache-2.0"
] | null | null | null |
PowerTrack/get_stream.py
|
g4brielvs/enterprise-scripts-python
|
788d425a180ca5ec06511ee5a1304957ec4d28ee
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import json
import os
import ssl
import sys
import requests
from dotenv import load_dotenv
load_dotenv(verbose=True) # Throws error if it can't find .env file
# Argparse for cli options. Run `python engagement_totals.py -h` to see list of available arguments.
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--chunksize", type=int, help="Overrides default chunksize of '10000'.")
args = parser.parse_args()
USERNAME = os.getenv("USERNAME")
PASSWORD = os.getenv("PASSWORD")
ACCOUNT_NAME = os.getenv("ACCOUNT_NAME")
ENDPOINT_LABEL = os.getenv("POWERTRACK_LABEL")
domain = "https://gnip-stream.twitter.com/stream"
endpoint = f"{domain}/powertrack/accounts/{ACCOUNT_NAME}/publishers/twitter/{ENDPOINT_LABEL}.json"
headers = {
'connection': "keep-alive",
'accept': 'application/json',
'Accept-Encoding': 'gzip',
'gnipkeepalive': '30',
}
def main():
if args.chunksize:
chunksize = args.chunksize
else:
chunksize = 10000
try:
get_stream(endpoint, chunksize)
except ssl.SSLError as e:
sys.stderr.write(f"Connection failed: {e}\n")
def get_stream(endpoint, chunksize):
response = requests.get(url=endpoint, auth=(USERNAME, PASSWORD), stream=True, headers=headers)
while True:
for chunk in response.iter_content(chunksize, decode_unicode=True): # Content gets decoded
if "\n" or "\r" in chunk: # Handles keep-alive new lines
print(chunk) # Prints keep-alive signal to stdout
else:
try:
print(json.loads(chunk))
except ValueError:
sys.stderr.write(f"Error processing JSON: {ValueError} {chunk}\n")
if __name__ == '__main__':
main()
| 29.4
| 100
| 0.668367
|
de79c50bcf2db093ce388c48ecf4f5cdef4ddb45
| 10,842
|
py
|
Python
|
pynmt/__init__.py
|
obrmmk/demo
|
b5deb85b2b2bf118b850f93c255ee88d055156a8
|
[
"MIT"
] | null | null | null |
pynmt/__init__.py
|
obrmmk/demo
|
b5deb85b2b2bf118b850f93c255ee88d055156a8
|
[
"MIT"
] | null | null | null |
pynmt/__init__.py
|
obrmmk/demo
|
b5deb85b2b2bf118b850f93c255ee88d055156a8
|
[
"MIT"
] | 1
|
2021-11-23T14:04:36.000Z
|
2021-11-23T14:04:36.000Z
|
import torch
import torch.nn as nn
from torch.nn import (TransformerEncoder, TransformerDecoder,
TransformerEncoderLayer, TransformerDecoderLayer)
from torch import Tensor
from typing import Iterable, List
import math
import os
import numpy as np
try:
from janome.tokenizer import Tokenizer
except ModuleNotFoundError:
import os
os.system('pip install janome')
from janome.tokenizer import Tokenizer
from google_drive_downloader import GoogleDriveDownloader
# デバイスの指定
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('DEVICE :', DEVICE)
# SRC (source) : 原文
SRC_LANGUAGE = 'jpn'
# TGT (target) : 訳文
TGT_LANGUAGE = 'py'
# special_token IDX
UNK_IDX, PAD_IDX, SOS_IDX, EOS_IDX = 0, 1, 2, 3
tokenizer = Tokenizer(os.path.join(os.path.dirname(
__file__), 'janomedic.csv'), udic_type="simpledic", udic_enc="utf8", wakati=True)
def jpn_tokenizer(text):
return [token for token in tokenizer.tokenize(text) if token != " " and len(token) != 0]
class Seq2SeqTransformer(nn.Module):
def __init__(self,
num_encoder_layers: int,
num_decoder_layers: int,
emb_size: int,
nhead: int,
src_vocab_size: int,
tgt_vocab_size: int,
dim_feedforward: int = 512,
dropout: float = 0.1):
super(Seq2SeqTransformer, self).__init__()
encoder_layer = TransformerEncoderLayer(d_model=emb_size, nhead=nhead,
dim_feedforward=dim_feedforward)
self.transformer_encoder = TransformerEncoder(
encoder_layer, num_layers=num_encoder_layers)
decoder_layer = TransformerDecoderLayer(d_model=emb_size, nhead=nhead,
dim_feedforward=dim_feedforward)
self.transformer_decoder = TransformerDecoder(
decoder_layer, num_layers=num_decoder_layers)
self.generator = nn.Linear(emb_size, tgt_vocab_size)
self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)
self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)
self.positional_encoding = PositionalEncoding(
emb_size, dropout=dropout)
def forward(self,
src: Tensor,
tgt: Tensor,
src_mask: Tensor,
tgt_mask: Tensor,
src_padding_mask: Tensor,
tgt_padding_mask: Tensor,
memory_key_padding_mask: Tensor):
src_emb = self.positional_encoding(self.src_tok_emb(src))
tgt_emb = self.positional_encoding(self.tgt_tok_emb(tgt))
memory = self.transformer_encoder(src_emb, src_mask, src_padding_mask)
outs = self.transformer_decoder(tgt_emb, memory, tgt_mask, None,
tgt_padding_mask, memory_key_padding_mask)
return self.generator(outs)
def encode(self, src: Tensor, src_mask: Tensor):
return self.transformer_encoder(self.positional_encoding(
self.src_tok_emb(src)), src_mask)
def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):
return self.transformer_decoder(self.positional_encoding(
self.tgt_tok_emb(tgt)), memory,
tgt_mask)
class PositionalEncoding(nn.Module):
def __init__(self,
emb_size: int,
dropout: float,
maxlen: int = 5000):
super(PositionalEncoding, self).__init__()
den = torch.exp(- torch.arange(0, emb_size, 2)
* math.log(10000) / emb_size)
pos = torch.arange(0, maxlen).reshape(maxlen, 1)
pos_embedding = torch.zeros((maxlen, emb_size))
pos_embedding[:, 0::2] = torch.sin(pos * den)
pos_embedding[:, 1::2] = torch.cos(pos * den)
pos_embedding = pos_embedding.unsqueeze(-2)
self.dropout = nn.Dropout(dropout)
self.register_buffer('pos_embedding', pos_embedding)
def forward(self, token_embedding: Tensor):
return self.dropout(token_embedding +
self.pos_embedding[:token_embedding.size(0), :])
class TokenEmbedding(nn.Module):
def __init__(self, vocab_size: int, emb_size):
super(TokenEmbedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.emb_size = emb_size
def forward(self, tokens: Tensor):
return self.embedding(tokens.long()) * math.sqrt(self.emb_size)
# モデルが予測を行う際に、未来の単語を見ないようにするためのマスク
def generate_square_subsequent_mask(sz):
mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float(
'-inf')).masked_fill(mask == 1, float(0.0))
return mask
def sequential_transforms(*transforms):
def func(txt_input):
for transform in transforms:
txt_input = transform(txt_input)
return txt_input
return func
def tensor_transform(token_ids: List[int]):
return torch.cat((torch.tensor([SOS_IDX]),
torch.tensor(token_ids),
torch.tensor([EOS_IDX])))
def beam_topk(model, ys, memory, beamsize):
ys = ys.to(DEVICE)
tgt_mask = (generate_square_subsequent_mask(
ys.size(0)).type(torch.bool)).to(DEVICE)
out = model.decode(ys, memory, tgt_mask)
out = out.transpose(0, 1)
prob = model.generator(out[:, -1])
next_prob, next_word = prob.topk(k=beamsize, dim=1)
return next_prob, next_word
# greedy search を使って翻訳結果 (シーケンス) を生成
def beam_decode(model, src, src_mask, max_len, beamsize, start_symbol):
src = src.to(DEVICE)
src_mask = src_mask.to(DEVICE)
ys_result = {}
memory = model.encode(src, src_mask).to(DEVICE) # encode の出力 (コンテキストベクトル)
# 初期値 (beamsize)
ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(DEVICE)
next_prob, next_word = beam_topk(model, ys, memory, beamsize)
next_prob = next_prob[0].tolist()
# <sos> + 1文字目 の候補 (list の長さはbeamsizeの数)
ys = [torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(
next_word[:, idx].item())], dim=0) for idx in range(beamsize)]
for i in range(max_len-1):
prob_list = []
ys_list = []
# それぞれの候補ごとに次の予測トークンとその確率を計算
for ys_token in ys:
next_prob, next_word = beam_topk(model, ys_token, memory, len(ys))
# 予測確率をリスト (next_prob) に代入
next_prob = next_prob[0].tolist()
# 1つのリストに結合
prob_list.extend(next_prob)
ys = [torch.cat([ys_token, torch.ones(1, 1).type_as(src.data).fill_(
next_word[:, idx].item())], dim=0) for idx in range(len(ys))]
ys_list.extend(ys)
# prob_list の topk のインデックスを prob_topk_idx で保持
prob_topk_idx = list(reversed(np.argsort(prob_list).tolist()))
prob_topk_idx = prob_topk_idx[:len(ys)]
# print('@@', prob_topk_idx)
# ys に新たな topk 候補を代入
ys = [ys_list[idx] for idx in prob_topk_idx]
next_prob = [prob_list[idx] for idx in prob_topk_idx]
# print('@@orig', prob_list)
# print('@@next', next_prob)
pop_list = []
for j in range(len(ys)):
# EOS トークンが末尾にあったら、ys_result (返り値) に append
if ys[j][-1].item() == EOS_IDX:
ys_result[ys[j]] = next_prob[j]
pop_list.append(j)
# ys_result に一度入ったら、もとの ys からは抜いておく
# (ys の長さが変わるので、ところどころbeamsize ではなく len(ys) を使用している箇所がある)
for l in sorted(pop_list, reverse=True):
del ys[l]
# ys_result が beamsize よりも大きかった時に、処理を終える
if len(ys_result) >= beamsize:
break
return ys_result
class NMT(object):
vocab: object
def __init__(self, vocab_file):
self.vocab = torch.load(vocab_file)
self.SRC_VOCAB_SIZE = len(self.vocab[SRC_LANGUAGE])
self.TGT_VOCAB_SIZE = len(self.vocab[TGT_LANGUAGE])
self.src_transform = sequential_transforms(jpn_tokenizer, # Tokenization
# Numericalization
self.vocab[SRC_LANGUAGE],
tensor_transform) # Add SOS/EOS and create tensor
self.EMB_SIZE = 512
self.NHEAD = 8
self.FFN_HID_DIM = 512
self.BATCH_SIZE = 128
self.NUM_ENCODER_LAYERS = 3
self.NUM_DECODER_LAYERS = 3
self.transformer = Seq2SeqTransformer(self.NUM_ENCODER_LAYERS, self.NUM_DECODER_LAYERS,
self.EMB_SIZE, self.NHEAD, self.SRC_VOCAB_SIZE, self.TGT_VOCAB_SIZE,
self.FFN_HID_DIM)
for p in self.transformer.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self.transformer = self.transformer.to(DEVICE)
def load(self, trained_model):
self.transformer.load_state_dict(torch.load(trained_model))
def translate_beam(self, src_sentence: str, beamsize=5):
"""
複数の翻訳候補をリストで返す。
"""
pred_list = []
self.transformer.eval()
src = self.src_transform(src_sentence).view(-1, 1)
num_tokens = src.shape[0]
src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)
tgt_tokens = beam_decode(
self.transformer, src, src_mask, max_len=num_tokens + 5, beamsize=beamsize, start_symbol=SOS_IDX)
prob_list = list(tgt_tokens.values())
tgt_tokens = list(tgt_tokens.keys())
for idx in list(reversed(np.argsort(prob_list).tolist())):
pred_list.append(" ".join(self.vocab[TGT_LANGUAGE].lookup_tokens(
list(tgt_tokens[idx].cpu().numpy()))).replace("<sos>", "").replace("<eos>", ""))
return pred_list, sorted(prob_list, reverse=True)
special_token = ['<A>', '<B>', '<C>', '<D>', '<E>']
def make_pynmt(model_id='1zMTrsmcyF2oXpWKe0bIZ7Ej1JBjVq7np', vocab_id='13C39jfdkkmE2mx-1K9PFXqGST84j-mz8', model_file='./model_DS.pt', vocab_file="./vocab_obj_DS.pth"):
GoogleDriveDownloader.download_file_from_google_drive(
file_id=model_id, dest_path=model_file, unzip=False)
GoogleDriveDownloader.download_file_from_google_drive(
file_id=vocab_id, dest_path=vocab_file, unzip=False)
nmt = NMT(vocab_file)
nmt.load(model_file)
def pynmt(sentence):
# candidate = re.findall(r'[a-zA-Z"\']+', sentence)
# for idx in range(len(candidate)):
# sentence = sentence.replace(candidate[idx], special_token[idx])
# print(sentence)
pred, prob = nmt.translate_beam(sentence)
return pred, prob
# print(pred)
# print(prob)
return pynmt
| 36.14
| 168
| 0.620365
|
c1de78b4426b8252cb0fbfcd0fad10e147eb59c5
| 426
|
py
|
Python
|
examples/getVersion.py
|
lelongfds/xym-ledger-app
|
5f2e1045abc4a2995cd62b1b13ed2b6c8dec66a3
|
[
"Apache-2.0"
] | 2
|
2019-06-05T10:42:07.000Z
|
2019-07-23T14:58:26.000Z
|
examples/getVersion.py
|
lelongfds/xym-ledger-app
|
5f2e1045abc4a2995cd62b1b13ed2b6c8dec66a3
|
[
"Apache-2.0"
] | null | null | null |
examples/getVersion.py
|
lelongfds/xym-ledger-app
|
5f2e1045abc4a2995cd62b1b13ed2b6c8dec66a3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from ledgerblue.comm import getDongle
# Create APDU message.
# CLA 0xE0
# INS 0x06 GET_APP_CONFIGURATION
# P1 0x00 NO USER CONFIRMATION
# P2 0x00 NO CHAIN CODE
apduMessage = "E0060000ff"
print("-= NEM Ledger =-")
print("Request app Version")
dongle = getDongle(True)
result = dongle.exchange(bytearray.fromhex(apduMessage))
print('Version={:d}.{:d}.{:d}'.format(result[1],result[2],result[3]))
| 22.421053
| 69
| 0.71831
|
2f6e9e8b7bf146aa6caa9144ea695188e32cd412
| 7,532
|
py
|
Python
|
scripts/automation/regression/astf_tests/astf_resilience_test.py
|
timgates42/trex-core
|
efe94752fcb2d0734c83d4877afe92a3dbf8eccd
|
[
"Apache-2.0"
] | 956
|
2015-06-24T15:04:55.000Z
|
2022-03-30T06:25:04.000Z
|
scripts/automation/regression/astf_tests/astf_resilience_test.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 782
|
2015-09-20T15:19:00.000Z
|
2022-03-31T23:52:05.000Z
|
scripts/automation/regression/astf_tests/astf_resilience_test.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 429
|
2015-06-27T19:34:21.000Z
|
2022-03-23T11:02:51.000Z
|
import os, sys
import time
import random
import string
from .astf_general_test import CASTFGeneral_Test, CTRexScenario
from nose.tools import assert_raises
from trex.astf.api import *
from trex.stl.trex_stl_packet_builder_scapy import ip2int, int2ip
class ASTFResilience_Test(CASTFGeneral_Test):
"""Checking stability of ASTF in non-usual conditions """
def setUp(self):
CASTFGeneral_Test.setUp(self)
self.weak = self.is_VM
setup = CTRexScenario.setup_name
if 'no_resilience' in CTRexScenario.modes:
self.skip('not enough memory for this test')
if setup in ['trex12']:
self.weak = True
self.low_memory = self.weak
if setup in ['trex41']:
self.low_memory = True # trex-41 uses the memory for the driver and crash
def ip_gen(self, client_base, server_base, client_ips, server_ips):
assert client_ips>0
assert server_ips>0
ip_gen_c = ASTFIPGenDist(ip_range = [client_base, int2ip(ip2int(client_base) + client_ips - 1)])
ip_gen_s = ASTFIPGenDist(ip_range = [server_base, int2ip(ip2int(server_base) + server_ips - 1)])
return ASTFIPGen(dist_client = ip_gen_c,
dist_server = ip_gen_s)
def progs_gen(self, msg_len = 16):
msg = 'x' * msg_len
prog_c = ASTFProgram(side = 'c')
prog_c.send(msg)
prog_c.recv(len(msg))
prog_s = ASTFProgram(side = 's')
prog_s.recv(len(msg))
#prog_s.delay(15000000)
prog_s.send(msg)
return prog_c, prog_s
def profile_gen(self, client_ips, server_ips, templates):
ip_gen = self.ip_gen('16.0.0.1', '48.0.0.1', client_ips, server_ips)
prog_c, prog_s = self.progs_gen()
templates_arr = []
for i in range(templates):
temp_c = ASTFTCPClientTemplate(program = prog_c, ip_gen = ip_gen, cps = i + 1)
temp_s = ASTFTCPServerTemplate(program = prog_s, assoc = ASTFAssociationRule(port = 80 + i))
template = ASTFTemplate(client_template = temp_c, server_template = temp_s)
templates_arr.append(template)
return ASTFProfile(default_ip_gen = ip_gen, templates = templates_arr)
def test_astf_params(self):
print('')
for client_ips in (1<<8, 1<<16):
for server_ips in (1<<8, 1<<16):
for templates in (1, 1<<8, 1<<12):
if self.weak and templates > 1<<8:
continue
if self.weak:
if (client_ips > (1<<8)) and (server_ips >(1<<8)) :
continue;
params = {
'client_ips': client_ips,
'server_ips': server_ips,
'templates': templates,
}
print('Creating profile with params: %s' % params)
profile = self.profile_gen(**params)
profile_str = profile.to_json_str()
print('Profile size: %s' % len(profile_str))
start_time = time.time()
self.astf_trex.load_profile(profile)
print('Load took: %g' % round(time.time() - start_time, 3))
start_time = time.time()
self.astf_trex.start(duration = 1, nc = True)
print('Start took: %g' % round(time.time() - start_time, 3))
self.astf_trex.stop()
def randomString(self, stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def test_astf_params_dynamic_profile(self):
print('')
for client_ips in (1<<8, 1<<16):
for server_ips in (1<<8, 1<<16):
for templates in (1, 1<<8, 1<<12):
if self.weak:
if ( (templates > 1<<8) or
(server_ips > 1<<8 ) or
(client_ips > 1<<8 ) ):
continue
params = {
'client_ips': client_ips,
'server_ips': server_ips,
'templates': templates,
}
print('Creating profile with params: %s' % params)
profile = self.profile_gen(**params)
profile_str = profile.to_json_str()
print('Profile size: %s' % len(profile_str))
start_time = time.time()
print('Creating random name for the dynamic profile')
random_profile = self.randomString()
print('Dynamic profile name : %s' % str(random_profile))
self.astf_trex.load_profile(profile, pid_input=str(random_profile))
print('Load took: %g' % round(time.time() - start_time, 3))
start_time = time.time()
self.astf_trex.start(duration = 1, nc = True, pid_input=str(random_profile))
print('Start took: %g' % round(time.time() - start_time, 3))
self.astf_trex.stop(pid_input=str(random_profile))
self.astf_trex.reset()
def test_double_start_stop(self):
print('')
c = self.astf_trex
c.load_profile(os.path.join(CTRexScenario.scripts_path, 'astf', 'udp1.py'))
c.start(duration = 20)
with assert_raises(TRexError):
c.start()
c.stop()
c.stop()
def test_double_start_stop_dynamic_profile(self):
print('')
c = self.astf_trex
random_profile_1 = self.randomString()
print('Dynamic profile(1) : %s' % str(random_profile_1))
c.load_profile(os.path.join(CTRexScenario.scripts_path, 'astf', 'udp1.py'), pid_input=str(random_profile_1))
c.start(duration = 20, pid_input=str(random_profile_1))
with assert_raises(TRexError):
random_profile_2 = self.randomString()
print('Dynamic profile(2) : %s' % str(random_profile_2))
c.load_profile(os.path.join(CTRexScenario.scripts_path, 'astf', 'udp1.py'), pid_input=str(random_profile_2))
c.start(pid_input=str(random_profile_2))
c.stop(pid_input=str(random_profile_1))
c.stop(pid_input=str(random_profile_2))
def test_stress_start_stop(self):
print('')
c = self.astf_trex
c.load_profile(os.path.join(CTRexScenario.scripts_path, 'astf', 'udp1.py'))
for _ in range(99):
c.start()
c.stop()
def test_stress_start_stop_dynamic_profile(self):
print('')
c = self.astf_trex
profiles =1000
if self.low_memory:
profiles = 100
for n in range(profiles):
profile_n = self.randomString()
port_n = 9000 + n
tunables = {'port': port_n}
print('Dynamic profile added : %s' % str(profile_n))
print('Port added : %s' % str(port_n))
c.load_profile(os.path.join(CTRexScenario.scripts_path, 'astf', 'http_simple_port_tunable.py'),
tunables=tunables,
pid_input=str(profile_n))
c.start(duration = 20, pid_input=str(profile_n))
c.wait_on_traffic()
c.stop(pid_input = "*")
c.reset()
| 37.849246
| 120
| 0.555762
|
8822ec0893e103a7aa1ed60c5fd17fd823b3295c
| 1,476
|
py
|
Python
|
translation.py
|
warrenregister/GEMSEC_DNA_Seq
|
78dba5da2378d78a13eb6ea0b9aa1f7da5d2104c
|
[
"MIT"
] | null | null | null |
translation.py
|
warrenregister/GEMSEC_DNA_Seq
|
78dba5da2378d78a13eb6ea0b9aa1f7da5d2104c
|
[
"MIT"
] | null | null | null |
translation.py
|
warrenregister/GEMSEC_DNA_Seq
|
78dba5da2378d78a13eb6ea0b9aa1f7da5d2104c
|
[
"MIT"
] | null | null | null |
""" Translation of DNA sequences into proteins from NGS"""
import numpy as np
import pandas as pd
import math
import itertools as it
import os
from time import time
#from time import time
def translate(seq):
"""
Translate DNA to protein
"""
table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'Q',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',}
'''
ncontain = [''.join(x) for x in it.product('ACTGN',repeat=3)]
correct = [''.join(x) for x in it.product('ACTG',repeat=3)]
rejects=list(set(ncontain)-set(correct))
'''
peptide = ""
if (len(seq)-1)%3 == 0:
for i in range(0, len(seq), 3):
codon = seq[i : i+3]
if codon == '\n':
break
else:
peptide += table[codon]
return peptide
| 32.086957
| 65
| 0.440379
|
5afbcb2ed630511bd8040044c7e8043273f22b89
| 24,274
|
py
|
Python
|
Broca.py
|
vMarcelino/Broca
|
1fe56c85c9d0e08b943ddfb4bb92a10bda8a9440
|
[
"MIT"
] | null | null | null |
Broca.py
|
vMarcelino/Broca
|
1fe56c85c9d0e08b943ddfb4bb92a10bda8a9440
|
[
"MIT"
] | null | null | null |
Broca.py
|
vMarcelino/Broca
|
1fe56c85c9d0e08b943ddfb4bb92a10bda8a9440
|
[
"MIT"
] | null | null | null |
from enum import Enum
print("Projeto broca!")
showClass = 1 == 0
def MainCodeExecution():
# stringInput = "(" + input("Manual expression Override: ") + ")"
# print(stringInput)
ExpressionVariableDatabase() # Variable database initialization
e0 = ExpressionVariable("e0")
e1 = ExpressionVariable("e1")
e2 = ExpressionVariable("e2")
e3 = ExpressionVariable("e3")
e4 = ExpressionVariable("e4")
d0 = ExpressionVariable("d0")
d1 = ExpressionVariable("d1")
d2 = ExpressionVariable("d2")
d3 = ExpressionVariable("d3")
b0 = ExpressionVariable("b0")
b1 = ExpressionVariable("b1")
b2 = ExpressionVariable("b2")
b3 = ExpressionVariable("b3")
c0 = ExpressionBlock([e0, d0], Operators.XOR)
k0 = ExpressionBlock([ExpressionVariable("LITERAL", 0), ExpressionVariable("LITERAL", 0)], Operators.XOR)
k0 = k0.doMaths()
k1 = kTemplate(k0, c0, d0)
c1 = cTemplate(k1, e1, d1)
k2 = kTemplate(k1, c1, d1)
c2 = cTemplate(e2, d2, k2)
k3 = kTemplate(k2, d2, c2)
c3 = cTemplate(k3, d3, e3)
l0 = k0
a0 = ExpressionBlock([c0, b0], Operators.XOR)
l1 = kTemplate(l0, a0, b0)
a1 = cTemplate(l1, b1, c1)
l2 = kTemplate(l1, a1, b1)
a2 = cTemplate(l2, c2, b2)
l3 = kTemplate(l2, a2, b2)
a3 = cTemplate(l3, c3, b3)
functions = [c0, "c0", c1, "c1", c2, "c2", c3, "c3", k0, "k0", k1, "k1", k2, "k2", k3, "k3", a0, "a0", a1, "a1", a2, "a2", a3, "a3", l0, "l0", l1, "l1", l2,
"l2", l3, "l3"]
c0.optimize(True)
k1.optimize(True)
c1.optimize(True)
k2.optimize(True)
c2.optimize(True)
k3.optimize(True)
c3.optimize(True)
printThings(functions)
print("Running maths...")
doMaths(functions)
printThings(functions)
while True:
ExpressionVariableDatabase.getVariableWithName(input("Nome da variavel: ")).setValue(int(input("Valor da variável: ")))
doMaths(functions)
printThings(functions)
# a0 = a0.doMaths()
# a1 = a1.doMaths()
# a2 = a2.doMaths()
# a3 = a3.doMaths()
# print(a0.print())
# print(a1.print())
# print(a2.print())
# print(a3.print())
print("\n Funciona!!!")
def printThings(fncs):
for i in range(int(len(fncs) / 2)):
print(fncs[i * 2 + 1] + " = " + fncs[i * 2].print())
def doMaths(fncs):
for i in range(int(len(fncs) / 2)):
fncs[i * 2] = fncs[i * 2].doMaths()
def kTemplate(k, c, d):
kc = ExpressionBlock([k, c], Operators.AND)
kd = ExpressionBlock([k, d], Operators.AND)
cd = ExpressionBlock([c, d], Operators.AND)
return ExpressionBlock([kc, kd, cd], Operators.OR)
def cTemplate(e, d, k):
return ExpressionBlock([e, d, k], Operators.XOR)
class ExpressionBlock:
# expressionBlocks = [] # ExpBlock
# basicExpressionBlocks = [] # BasicExpBlock
# operator = ""
# tempName = ""
def __init__(self, expressions, op):
self.operator = op
self.expressionBlocks = []
self.basicExpressionBlocks = []
for expression in expressions:
if type(expression) is ExpressionBlock:
expression = expression.formatXor()
self.expressionBlocks.append(expression)
elif type(expression) is BasicExpressionBlock:
if expression.operator == Operators.XOR and len(expression.expressionVariables) > 2:
expression = expression.formatXor()
self.expressionBlocks.append(expression)
else:
self.basicExpressionBlocks.append(expression)
elif type(expression) is ExpressionVariable:
self.basicExpressionBlocks.append(BasicExpressionBlock([expression], Operators.NoOperator))
elif type(expression) is Not:
self.expressionBlocks.append(expression)
self.optimize()
newSelf = self.formatXor(checkChild=True)
self.basicExpressionBlocks = newSelf.basicExpressionBlocks
self.expressionBlocks = newSelf.expressionBlocks
self.operator = newSelf.operator
newSelf = self.convertXor()
self.basicExpressionBlocks = newSelf.basicExpressionBlocks
self.expressionBlocks = newSelf.expressionBlocks
self.operator = newSelf.operator
def __str__(self):
return self.print("")
def print(self, str=""):
str += "E(" if showClass else "("
i = 0
for expression in self.expressionBlocks + self.basicExpressionBlocks:
str += expression.print()
if i < (len(self.expressionBlocks) + len(self.basicExpressionBlocks) - 1):
str += " " + self.operator.value + " "
i += 1
str += ")"
return str
def optimize(self, fullOptimization=False): # Where maths are applied
if fullOptimization:
for block in self.expressionBlocks + self.basicExpressionBlocks:
block.optimize(fullOptimization)
# Transform Expression Blocks (EB or EBo) with one Bae into one Bae
for i in range(len(self.expressionBlocks)):
if type(self.expressionBlocks[i]) is not Not:
if len(self.expressionBlocks[i].expressionBlocks) == 0 and len(self.expressionBlocks[i].basicExpressionBlocks) == 1:
self.basicExpressionBlocks.append(self.expressionBlocks[i].basicExpressionBlocks[0])
self.expressionBlocks[i] = ExpressionBlock([], Operators.NoOperator)
# Remove empty Expression Blocks (EB or EBo)
r = 0
for i in range(len(self.expressionBlocks) - r):
if i - r < len(self.expressionBlocks):
if type(self.expressionBlocks[i - r]) is Not:
continue
if len(self.expressionBlocks[i - r].expressionBlocks) == 0 and len(self.expressionBlocks[i - r].basicExpressionBlocks) == 0:
self.expressionBlocks.pop(i - r)
r += 1
# Join Basic Expression Blocks (BEB or BaEB or Bae) with the same operator into one (WRONG!!!!!)
# for i in range(len(self.basicExpressionBlocks) - 1):
# if self.basicExpressionBlocks[i].operator == Operators.NOT:
# continue
# if self.basicExpressionBlocks[i].operator == Operators.NoOperator and len(self.basicExpressionBlocks[i].expressionVariables) > 0:
# for j in range(i + 1, len(self.basicExpressionBlocks)):
# if self.basicExpressionBlocks[j].operator == Operators.NoOperator:
# self.basicExpressionBlocks[i] = BasicExpressionBlock((
# self.basicExpressionBlocks[i].expressionVariables +
# self.basicExpressionBlocks[j].expressionVariables),
# self.operator)
# self.basicExpressionBlocks[j] = BasicExpressionBlock([], Operators.NoOperator)
# if self.basicExpressionBlocks[i].operator != Operators.NoOperator:
# for j in range(i + 1, len(self.basicExpressionBlocks)):
# if self.basicExpressionBlocks[i].operator == self.basicExpressionBlocks[j].operator:
# self.basicExpressionBlocks[i].expressionVariables += self.basicExpressionBlocks[j].expressionVariables
# self.basicExpressionBlocks[j] = BasicExpressionBlock([], Operators.NoOperator)
r = 0
# Remove empty Basic Expression Blocks (BEB or BaEB or Bae)
for i in range(len(self.basicExpressionBlocks)):
if i - r < len(self.basicExpressionBlocks):
if len(self.basicExpressionBlocks[i - r].expressionVariables) == 0:
self.basicExpressionBlocks.pop(i - r)
r += 1
if len(self.expressionBlocks) == 1 and len(self.basicExpressionBlocks) == 0 and self.operator != Operators.NOT:
holder = self.expressionBlocks[0]
if type(holder) is not Not:
self.expressionBlocks = holder.expressionBlocks
self.basicExpressionBlocks = holder.basicExpressionBlocks
self.operator = holder.operator
def formatXor(self, checkChild=False):
if checkChild:
newBae = []
newEB = []
for expression in self.basicExpressionBlocks:
exp = expression.formatXor()
if type(exp) is BasicExpressionBlock:
newBae.append(exp)
elif type(exp) is ExpressionBlock:
newEB.append(exp)
for expression in self.expressionBlocks:
exp = expression.formatXor()
if type(exp) is BasicExpressionBlock:
newBae.append(exp)
elif type(exp) is ExpressionBlock:
newEB.append(exp)
else:
newEB.append(exp)
self.basicExpressionBlocks = newBae
self.expressionBlocks = newEB
if self.operator == Operators.XOR and len(self.basicExpressionBlocks + self.expressionBlocks) > 2:
remainingEB = self.expressionBlocks + self.basicExpressionBlocks
exp = []
for i in range(1, len(remainingEB)):
exp.append(remainingEB[i])
remainingEB = list(set(remainingEB) - set(exp))
return ExpressionBlock(remainingEB + [ExpressionBlock(exp, Operators.XOR)], Operators.XOR)
else:
return self
def convertXor(self):
for i in range(len(self.expressionBlocks)):
self.expressionBlocks[i] = self.expressionBlocks[i].convertXor()
for i in range(len(self.basicExpressionBlocks)):
self.basicExpressionBlocks[i] = self.basicExpressionBlocks[i].convertXor()
if len(self.expressionBlocks + self.basicExpressionBlocks) != 2 and self.operator == Operators.XOR:
print("WRONG!!!!!!!!!!!!!!!!!!!!!")
self.optimize()
self.formatXor()
self.optimize()
if self.operator != Operators.XOR:
return self
else:
# A XOR B = (A + B) * !(A * B)
A = (self.expressionBlocks + self.basicExpressionBlocks)[0]
B = (self.expressionBlocks + self.basicExpressionBlocks)[1]
return ExpressionBlock([
ExpressionBlock([
A,
B],
Operators.OR),
Not(
ExpressionBlock([
A,
B],
Operators.AND))],
Operators.AND)
def doMaths(self):
expressions = []
for expression in self.expressionBlocks + self.basicExpressionBlocks:
expressions.append(expression.doMaths())
if len(expressions) > 1:
if self.operator == Operators.XOR:
if expressions[0].isDefined() and expressions[1].isDefined():
v0, v1 = expressions[0].getValue(), expressions[1].getValue()
val = v0 + v1 - 2 * v0 * v1
return BasicExpressionBlock([ExpressionVariable("LITERAL", val)], Operators.NoOperator).doMaths()
elif expressions[1].isDefined():
if expressions[1].getValue() == 1:
return Not(expressions[0]).doMaths()
else:
return ExpressionBlock([expressions[0]], self.operator)
elif expressions[0].isDefined():
if expressions[0].getValue() == 1:
return Not(expressions[1]).doMaths()
else:
return ExpressionBlock([expressions[1]], self.operator)
else:
return ExpressionBlock(expressions, self.operator)
if self.operator == Operators.AND:
isZero = False
remainingVariables = []
for expressionVariable in expressions:
if expressionVariable.getValue() == 0:
isZero = True
break
elif not expressionVariable.isDefined():
remainingVariables.append(expressionVariable)
remainingVariables = list(set(remainingVariables))
if isZero:
return BasicExpressionBlock([ExpressionVariable("LITERAL", 0)], Operators.NoOperator)
elif len(remainingVariables) == 0:
return BasicExpressionBlock([ExpressionVariable("LITERAL", 1)], Operators.NoOperator)
else:
return ExpressionBlock(remainingVariables, Operators.AND)
if self.operator == Operators.OR:
isOne = False
remainingVariables = []
for expressionVariable in expressions:
if expressionVariable.isDefined():
if expressionVariable.getValue() == 1:
isOne = True
break
elif not expressionVariable.isDefined():
remainingVariables.append(expressionVariable)
remainingVariables = list(set(remainingVariables))
if isOne:
return BasicExpressionBlock([ExpressionVariable("LITERAL", 1)], Operators.NoOperator)
elif len(remainingVariables) == 0:
return BasicExpressionBlock([ExpressionVariable("LITERAL", 0)], Operators.NoOperator)
else:
return ExpressionBlock(remainingVariables, Operators.OR)
if self.operator == Operators.NOT:
return Not(expressions[0]).doMaths()
return ExpressionBlock(expressions, Operators.NoOperator)
def isDefined(self):
if len(self.expressionBlocks) == 0 and len(self.basicExpressionBlocks) == 1:
if len(self.basicExpressionBlocks[0].expressionVariables) == 1:
return self.basicExpressionBlocks[0].expressionVariables[0].isDefined()
return False
def getValue(self):
# afterMath = self.doMaths()
if len(self.expressionBlocks) == 0 and len(self.basicExpressionBlocks) == 1:
if len(self.basicExpressionBlocks[0].expressionVariables) == 1:
return self.basicExpressionBlocks[0].expressionVariables[0].getValue()
return -999
class Not:
def __init__(self, exp):
self.expression = exp
self.basic = type(exp) is ExpressionVariable
def __repr__(self):
return self.print("")
def __str__(self):
return self.print("")
def formatXor(self, checkChild=False):
self.expression = self.expression.formatXor(checkChild)
return self
def convertXor(self):
self.expression = self.expression.convertXor()
return self
def isDefined(self):
return self.expression.isDefined()
def getValue(self):
return self.expression.getValue()
def print(self, str=""):
str += "!" + self.expression.print("")
return str
def optimize(self, fullOptimize=False):
self.expression.optimize(fullOptimize)
def doMaths(self):
exp = optimize(self.expression.doMaths())
if type(exp) is Not:
return exp.expression
elif type(exp) is ExpressionVariable:
if exp.isDefined():
return BasicExpressionBlock([ExpressionVariable("LITERAL", 1 - exp.value)], Operators.NoOperator)
else:
return Not(exp)
elif type(exp) is BasicExpressionBlock:
if len(exp.expressionVariables) == 1 and exp.expressionVariables[0].isDefined():
return BasicExpressionBlock([ExpressionVariable("LITERAL", 1 - exp.expressionVariables[0].getValue())], Operators.NoOperator)
else:
return Not(exp)
else:
return Not(exp)
class BasicExpressionBlock:
# expressionVariables = [] # expVar
# operator = ""
def __init__(self, expvs, op):
self.expressionVariables = expvs
self.operator = op
self.optimize()
def __str__(self):
return self.print("")
def __repr__(self):
return self.print("")
def print(self, str=""):
if self.operator == Operators.NOT:
str += "!"
str += self.expressionVariables[0].print()
elif self.operator == Operators.NoOperator:
str += self.expressionVariables[0].print()
else:
str += "B(" if showClass else "("
for i in range(len(self.expressionVariables)):
str += self.expressionVariables[i].print()
if i < (len(self.expressionVariables) - 1):
str += " " + self.operator.value + " "
str += ")"
return str
def optimize(self, fullOptimize=False): # Where simple maths are applied
self.expressionVariables = list(set(self.expressionVariables))
def formatXor(self, checkChild=False):
if self.operator == Operators.XOR and len(self.expressionVariables) > 2:
remainingEV = self.expressionVariables
exp = []
for i in range(1, len(remainingEV)):
exp.append(remainingEV[i])
remainingEV = list(set(remainingEV) - set(exp))
remainingEV.append(ExpressionBlock(exp, Operators.XOR))
return ExpressionBlock(remainingEV, Operators.XOR)
else:
return self
def convertXor(self):
for i in range(len(self.expressionVariables)):
self.expressionVariables[i] = self.expressionVariables[i].convertXor()
if self.operator != Operators.XOR:
return self
else:
A = self.expressionVariables[0]
B = self.expressionVariables[1]
return ExpressionBlock([
ExpressionBlock([
A,
B],
Operators.OR),
Not(
ExpressionBlock([
A,
B],
Operators.AND))],
Operators.AND)
def doMaths(self):
if self.operator == Operators.XOR:
if self.expressionVariables[0].isDefined() and self.expressionVariables[1].isDefined():
v0, v1 = self.expressionVariables[0].value, self.expressionVariables[1].value
val = v0 + v1 - 2 * v0 * v1
return BasicExpressionBlock([ExpressionVariable("LITERAL", val)], Operators.NoOperator).doMaths()
elif self.expressionVariables[0].isDefined() or self.expressionVariables[1].isDefined():
v0, v1 = self.expressionVariables[0].value, self.expressionVariables[1].value
if v0 == -1:
if v1 == 1:
return Not(self.expressionVariables[0]).doMaths()
else:
return BasicExpressionBlock([self.expressionVariables[0]], Operators.NoOperator)
else:
if v0 == 1:
return Not(self.expressionVariables[1]).doMaths()
else:
return BasicExpressionBlock([self.expressionVariables[1]], Operators.NoOperator)
else:
return self
if self.operator == Operators.AND:
isZero = False
remainingVariables = []
for expressionVariable in self.expressionVariables:
if expressionVariable.value == 0:
isZero = True
break
elif not expressionVariable.isDefined():
remainingVariables.append(expressionVariable)
remainingVariables = list(set(remainingVariables))
if isZero:
return BasicExpressionBlock([ExpressionVariable("LITERAL", 0)], Operators.NoOperator)
elif len(remainingVariables) == 0:
return BasicExpressionBlock([ExpressionVariable("LITERAL", 1)], Operators.NoOperator)
else:
return BasicExpressionBlock(remainingVariables, Operators.AND)
if self.operator == Operators.OR:
isOne = False
remainingVariables = []
for expressionVariable in self.expressionVariables:
if expressionVariable.value == 1:
isOne = True
break
elif not expressionVariable.isDefined():
remainingVariables.append(expressionVariable)
remainingVariables = list(set(remainingVariables))
if isOne:
return BasicExpressionBlock([ExpressionVariable("LITERAL", 1)], Operators.NoOperator)
elif len(remainingVariables) == 0:
return BasicExpressionBlock([ExpressionVariable("LITERAL", 0)], Operators.NoOperator)
else:
return BasicExpressionBlock(remainingVariables, Operators.OR)
if self.operator == Operators.NOT:
return Not(self.expressionVariables[0]).doMaths()
return self
def isDefined(self):
afterMath = self.doMaths()
if type(afterMath) is BasicExpressionBlock:
return len(afterMath.expressionVariables) == 1 and afterMath.expressionVariables[0].isDefined()
if type(afterMath) is Not:
return afterMath.expression.isDefined()
return False
def getValue(self):
afterMath = self.doMaths()
if type(afterMath) is BasicExpressionBlock:
if len(afterMath.expressionVariables) == 1 and afterMath.expressionVariables[0].isDefined():
return afterMath.expressionVariables[0].getValue()
if type(afterMath) is Not:
return afterMath.expression.getValue()
return -999
class ExpressionVariable:
# variableName = "" # string
# value = "" # bool?
def __init__(self, name, val=-1, toDb=True):
self.variableName = name
if name == "LITERAL":
toDb = False
self.value = val
self.index = len(ExpressionVariableDatabase.expressionVariables)
self.isdefined = val != -1
if toDb: ExpressionVariableDatabase.expressionVariables.append(self)
def __str__(self):
return self.print("")
def __repr__(self):
return self.print("")
def print(self, str=""):
if self.value == -1:
str += self.variableName
else:
str += self.value.__str__()
return str
def setValue(self, val):
self.value = val
self.isdefined = val != -1
def getValue(self):
return self.value
def optimize(self, fullOptimize=False):
return
def isDefined(self):
return self.isdefined
def convertXor(self):
return self
class ExpressionVariableDatabase:
expressionVariables = [] # expVar
def __init__(self, expvs=142857):
if expvs == 142857: expvs = []
self.expressionVariables = expvs
@staticmethod
def getVariableWithName(name):
for var in ExpressionVariableDatabase.expressionVariables:
if var.variableName == name: return var
class Operators(Enum):
NOT = "NOT"
AND = "AND"
OR = "OR"
XOR = "XOR"
NoOperator = ""
def optimize(expression):
if type(expression) is ExpressionBlock:
if len(expression.expressionBlocks) == 0 and len(expression.basicExpressionBlocks) == 1:
return expression.basicExpressionBlocks[0]
elif len(expression.expressionBlocks) == 1 and len(expression.basicExpressionBlocks) == 0:
return expression.expressionBlocks[0]
else:
return expression
if type(expression) is BasicExpressionBlock:
return expression
if type(expression) is Not:
return optimize(expression.expression)
if type(expression) is ExpressionVariable:
return expression
if __name__ == "__main__":
MainCodeExecution()
| 37.809969
| 160
| 0.582763
|
609223c0b5451eaeaf7df32ddd633c2b00c79cfa
| 3,108
|
py
|
Python
|
zombie/migrations/0003_auto_20180414_1357.py
|
davidpettersson/klimatbanta
|
ac963b8a0b0799eacbfe7c0b8cc58ec2433d4dc4
|
[
"MIT"
] | null | null | null |
zombie/migrations/0003_auto_20180414_1357.py
|
davidpettersson/klimatbanta
|
ac963b8a0b0799eacbfe7c0b8cc58ec2433d4dc4
|
[
"MIT"
] | null | null | null |
zombie/migrations/0003_auto_20180414_1357.py
|
davidpettersson/klimatbanta
|
ac963b8a0b0799eacbfe7c0b8cc58ec2433d4dc4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-14 11:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('zombie', '0002_auto_20180414_1356'),
]
operations = [
migrations.CreateModel(
name='AbstractEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('co2_cost', models.FloatField(verbose_name='carbondioxide cost')),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_zombie.abstractentry_set+', to='contenttypes.ContentType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'manager_inheritance_from_future': True,
},
),
migrations.RemoveField(
model_name='foodentry',
name='co2_cost',
),
migrations.RemoveField(
model_name='foodentry',
name='created',
),
migrations.RemoveField(
model_name='foodentry',
name='id',
),
migrations.RemoveField(
model_name='foodentry',
name='polymorphic_ctype',
),
migrations.RemoveField(
model_name='foodentry',
name='user',
),
migrations.RemoveField(
model_name='travelentry',
name='co2_cost',
),
migrations.RemoveField(
model_name='travelentry',
name='created',
),
migrations.RemoveField(
model_name='travelentry',
name='id',
),
migrations.RemoveField(
model_name='travelentry',
name='polymorphic_ctype',
),
migrations.RemoveField(
model_name='travelentry',
name='user',
),
migrations.AddField(
model_name='foodentry',
name='abstractentry_ptr',
field=models.OneToOneField(auto_created=True, default=None, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='zombie.AbstractEntry'),
preserve_default=False,
),
migrations.AddField(
model_name='travelentry',
name='abstractentry_ptr',
field=models.OneToOneField(auto_created=True, default=None, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='zombie.AbstractEntry'),
preserve_default=False,
),
]
| 36.564706
| 214
| 0.600708
|
2473182eb2e998e9b548c7a46e8886e9df1edcfe
| 12,171
|
py
|
Python
|
pw_tokenizer/py/elf_reader_test.py
|
curtin-space/pigweed
|
fe2e1743e03fabd2676f01d9de0ac9d34a426076
|
[
"Apache-2.0"
] | 86
|
2021-03-09T23:49:40.000Z
|
2022-03-30T08:14:51.000Z
|
pw_tokenizer/py/elf_reader_test.py
|
curtin-space/pigweed
|
fe2e1743e03fabd2676f01d9de0ac9d34a426076
|
[
"Apache-2.0"
] | 4
|
2021-07-27T20:32:03.000Z
|
2022-03-08T10:39:07.000Z
|
pw_tokenizer/py/elf_reader_test.py
|
curtin-space/pigweed
|
fe2e1743e03fabd2676f01d9de0ac9d34a426076
|
[
"Apache-2.0"
] | 22
|
2021-03-11T15:15:47.000Z
|
2022-02-09T06:16:36.000Z
|
#!/usr/bin/env python3
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests the ELF reader Python module."""
import io
import os
import re
import unittest
from pw_tokenizer import elf_reader
# Output from the following command:
#
# readelf -WS elf_reader_test_binary.elf
#
TEST_READELF_OUTPUT = ("""
There are 33 section headers, starting at offset 0x1758:
Section Headers:
[Nr] Name Type Address Off Size ES Flg Lk Inf Al
[ 0] NULL 0000000000000000 000000 000000 00 0 0 0
[ 1] .interp PROGBITS 0000000000000238 000238 00001c 00 A 0 0 1
[ 2] .note.ABI-tag NOTE 0000000000000254 000254 000020 00 A 0 0 4
[ 3] .note.gnu.build-id NOTE 0000000000000274 000274 000024 00 A 0 0 4
[ 4] .dynsym DYNSYM 0000000000000298 000298 0000a8 18 A 5 1 8
[ 5] .dynstr STRTAB 0000000000000340 000340 00009b 00 A 0 0 1
[ 6] .gnu.hash GNU_HASH 00000000000003e0 0003e0 00001c 00 A 4 0 8
[ 7] .gnu.version VERSYM 00000000000003fc 0003fc 00000e 02 A 4 0 2
[ 8] .gnu.version_r VERNEED 000000000000040c 00040c 000020 00 A 5 1 4
[ 9] .rela.dyn RELA 0000000000000430 000430 0000d8 18 A 4 0 8
[10] .rela.plt RELA 0000000000000508 000508 000018 18 AI 4 12 8
[11] .init PROGBITS 0000000000000520 000520 000017 00 AX 0 0 4
[12] .plt PROGBITS 0000000000000540 000540 000020 10 AX 0 0 16
[13] .text PROGBITS 0000000000000560 000560 000151 00 AX 0 0 16
[14] .fini PROGBITS 00000000000006b4 0006b4 000009 00 AX 0 0 4
[15] .rodata PROGBITS 00000000000006c0 0006c0 000004 04 AM 0 0 4
[16] .test_section_1 PROGBITS 00000000000006d0 0006d0 000010 00 A 0 0 16
[17] .test_section_2 PROGBITS 00000000000006e0 0006e0 000004 00 A 0 0 4
[18] .eh_frame X86_64_UNWIND 00000000000006e8 0006e8 0000d4 00 A 0 0 8
[19] .eh_frame_hdr X86_64_UNWIND 00000000000007bc 0007bc 00002c 00 A 0 0 4
[20] .fini_array FINI_ARRAY 0000000000001d80 000d80 000008 08 WA 0 0 8
[21] .init_array INIT_ARRAY 0000000000001d88 000d88 000008 08 WA 0 0 8
[22] .dynamic DYNAMIC 0000000000001d90 000d90 000220 10 WA 5 0 8
[23] .got PROGBITS 0000000000001fb0 000fb0 000030 00 WA 0 0 8
[24] .got.plt PROGBITS 0000000000001fe0 000fe0 000020 00 WA 0 0 8
[25] .data PROGBITS 0000000000002000 001000 000010 00 WA 0 0 8
[26] .tm_clone_table PROGBITS 0000000000002010 001010 000000 00 WA 0 0 8
[27] .bss NOBITS 0000000000002010 001010 000001 00 WA 0 0 1
[28] .comment PROGBITS 0000000000000000 001010 00001d 01 MS 0 0 1
[29] .note.gnu.gold-version NOTE 0000000000000000 001030 00001c 00 0 0 4
[30] .symtab SYMTAB 0000000000000000 001050 000390 18 31 21 8
[31] .strtab STRTAB 0000000000000000 0013e0 000227 00 0 0 1
[32] .shstrtab STRTAB 0000000000000000 001607 00014a 00 0 0 1
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings), I (info),
L (link order), O (extra OS processing required), G (group), T (TLS),
C (compressed), x (unknown), o (OS specific), E (exclude),
l (large), p (processor specific)
""")
TEST_ELF_PATH = os.path.join(os.path.dirname(__file__),
'elf_reader_test_binary.elf')
class ElfReaderTest(unittest.TestCase):
"""Tests the elf_reader.Elf class."""
def setUp(self):
super().setUp()
self._elf_file = open(TEST_ELF_PATH, 'rb')
self._elf = elf_reader.Elf(self._elf_file)
def tearDown(self):
super().tearDown()
self._elf_file.close()
def _section(self, name):
return next(self._elf.sections_with_name(name))
def test_readelf_comparison_using_the_readelf_binary(self):
"""Compares elf_reader to readelf's output."""
parse_readelf_output = re.compile(r'\s+'
r'\[\s*(?P<number>\d+)\]\s+'
r'(?P<name>\.\S*)?\s+'
r'(?P<type>\S+)\s+'
r'(?P<addr>[0-9a-fA-F]+)\s+'
r'(?P<offset>[0-9a-fA-F]+)\s+'
r'(?P<size>[0-9a-fA-F]+)\s+')
readelf_sections = []
for number, name, _, addr, offset, size in parse_readelf_output.findall(
TEST_READELF_OUTPUT):
readelf_sections.append((
int(number),
name or '',
int(addr, 16),
int(offset, 16),
int(size, 16),
))
self.assertEqual(len(readelf_sections), 33)
self.assertEqual(len(readelf_sections), len(self._elf.sections))
for (index,
section), readelf_section in zip(enumerate(self._elf.sections),
readelf_sections):
readelf_index, name, address, offset, size = readelf_section
self.assertEqual(index, readelf_index)
self.assertEqual(section.name, name)
self.assertEqual(section.address, address)
self.assertEqual(section.offset, offset)
self.assertEqual(section.size, size)
def test_dump_single_section(self):
self.assertEqual(self._elf.dump_section_contents(r'\.test_section_1'),
b'You cannot pass\0')
self.assertEqual(self._elf.dump_section_contents(r'\.test_section_2'),
b'\xef\xbe\xed\xfe')
def test_dump_multiple_sections(self):
if (self._section('.test_section_1').address <
self._section('.test_section_2').address):
contents = b'You cannot pass\0\xef\xbe\xed\xfe'
else:
contents = b'\xef\xbe\xed\xfeYou cannot pass\0'
self.assertIn(self._elf.dump_section_contents(r'.test_section_\d'),
contents)
def test_read_values(self):
address = self._section('.test_section_1').address
self.assertEqual(self._elf.read_value(address), b'You cannot pass')
int32_address = self._section('.test_section_2').address
self.assertEqual(self._elf.read_value(int32_address, 4),
b'\xef\xbe\xed\xfe')
def test_read_string(self):
bytes_io = io.BytesIO(
b'This is a null-terminated string\0No terminator!')
self.assertEqual(elf_reader.read_c_string(bytes_io),
b'This is a null-terminated string')
self.assertEqual(elf_reader.read_c_string(bytes_io), b'No terminator!')
self.assertEqual(elf_reader.read_c_string(bytes_io), b'')
def test_compatible_file_for_elf(self):
self.assertTrue(elf_reader.compatible_file(self._elf_file))
self.assertTrue(elf_reader.compatible_file(io.BytesIO(b'\x7fELF')))
def test_compatible_file_for_elf_start_at_offset(self):
self._elf_file.seek(13) # Seek ahead to get out of sync
self.assertTrue(elf_reader.compatible_file(self._elf_file))
self.assertEqual(13, self._elf_file.tell())
def test_compatible_file_for_invalid_elf(self):
self.assertFalse(elf_reader.compatible_file(io.BytesIO(b'\x7fELVESF')))
def _archive_file(data: bytes) -> bytes:
return ('FILE ID 90123456'
'MODIFIED 012'
'OWNER '
'GROUP '
'MODE 678'
f'{len(data):10}' # File size -- the only part that's needed.
'`\n'.encode() + data)
class ArchiveTest(unittest.TestCase):
"""Tests reading from archive files."""
def setUp(self):
super().setUp()
with open(TEST_ELF_PATH, 'rb') as fd:
self._elf_data = fd.read()
self._archive_entries = b'blah', b'hello', self._elf_data
self._archive_data = elf_reader.ARCHIVE_MAGIC + b''.join(
_archive_file(f) for f in self._archive_entries)
self._archive = io.BytesIO(self._archive_data)
def test_compatible_file_for_archive(self):
self.assertTrue(elf_reader.compatible_file(io.BytesIO(b'!<arch>\n')))
self.assertTrue(elf_reader.compatible_file(self._archive))
def test_compatible_file_for_invalid_archive(self):
self.assertFalse(elf_reader.compatible_file(io.BytesIO(b'!<arch>')))
def test_iterate_over_files(self):
for expected, size in zip(self._archive_entries,
elf_reader.files_in_archive(self._archive)):
self.assertEqual(expected, self._archive.read(size))
def test_iterate_over_empty_archive(self):
with self.assertRaises(StopIteration):
next(iter(elf_reader.files_in_archive(io.BytesIO(b'!<arch>\n'))))
def test_iterate_over_invalid_archive(self):
with self.assertRaises(elf_reader.FileDecodeError):
for _ in elf_reader.files_in_archive(
io.BytesIO(b'!<arch>blah blahblah')):
pass
def test_extra_newline_after_entry_is_ignored(self):
archive = io.BytesIO(elf_reader.ARCHIVE_MAGIC +
_archive_file(self._elf_data) + b'\n' +
_archive_file(self._elf_data))
for size in elf_reader.files_in_archive(archive):
self.assertEqual(self._elf_data, archive.read(size))
def test_two_extra_newlines_parsing_fails(self):
archive = io.BytesIO(elf_reader.ARCHIVE_MAGIC +
_archive_file(self._elf_data) + b'\n\n' +
_archive_file(self._elf_data))
with self.assertRaises(elf_reader.FileDecodeError):
for size in elf_reader.files_in_archive(archive):
self.assertEqual(self._elf_data, archive.read(size))
def test_iterate_over_archive_with_invalid_size(self):
data = elf_reader.ARCHIVE_MAGIC + _archive_file(b'$' * 3210)
file = io.BytesIO(data)
# Iterate over the file normally.
for size in elf_reader.files_in_archive(file):
self.assertEqual(b'$' * 3210, file.read(size))
# Replace the size with a hex number, which is not valid.
with self.assertRaises(elf_reader.FileDecodeError):
for _ in elf_reader.files_in_archive(
io.BytesIO(data.replace(b'3210', b'0x99'))):
pass
def test_elf_reader_dump_single_section(self):
elf = elf_reader.Elf(self._archive)
self.assertEqual(elf.dump_section_contents(r'\.test_section_1'),
b'You cannot pass\0')
self.assertEqual(elf.dump_section_contents(r'\.test_section_2'),
b'\xef\xbe\xed\xfe')
def test_elf_reader_read_values(self):
elf = elf_reader.Elf(self._archive)
address = next(elf.sections_with_name('.test_section_1')).address
self.assertEqual(elf.read_value(address), b'You cannot pass')
int32_address = next(elf.sections_with_name('.test_section_2')).address
self.assertEqual(elf.read_value(int32_address, 4), b'\xef\xbe\xed\xfe')
if __name__ == '__main__':
unittest.main()
| 45.58427
| 93
| 0.608989
|
5ad94ae7519f6892ff7e98adad40c4f3917dd270
| 204
|
py
|
Python
|
get-sender-id.py
|
fakegit/trsh
|
f34dda6e6c9beec307fc04962e03bbe4e9d521a5
|
[
"MIT"
] | 68
|
2017-04-23T11:04:00.000Z
|
2022-02-04T22:23:53.000Z
|
get-sender-id.py
|
fakegit/trsh
|
f34dda6e6c9beec307fc04962e03bbe4e9d521a5
|
[
"MIT"
] | 2
|
2018-03-31T20:01:47.000Z
|
2021-11-18T20:48:30.000Z
|
get-sender-id.py
|
fakegit/trsh
|
f34dda6e6c9beec307fc04962e03bbe4e9d521a5
|
[
"MIT"
] | 27
|
2017-05-01T07:50:19.000Z
|
2021-11-29T05:19:57.000Z
|
#!/usr/bin/python
import telepot
from pprint import pprint
bot = telepot.Bot('TG-BOT-TOKEN')
response = bot.getUpdates()
# Print all raw messages with chat_id,text,type,username
pprint(response)
| 12
| 56
| 0.740196
|
da12fca16ee77391be661e1ed24115c186af6262
| 528
|
py
|
Python
|
h2o-hadoop-common/tests/python/pyunit_read_invalid_file.py
|
kernelrich/h2o-3
|
16bd6be6d0ac22b037cb55b4c647e63e2b112e1e
|
[
"Apache-2.0"
] | 6,098
|
2015-05-22T02:46:12.000Z
|
2022-03-31T16:54:51.000Z
|
h2o-hadoop-common/tests/python/pyunit_read_invalid_file.py
|
kernelrich/h2o-3
|
16bd6be6d0ac22b037cb55b4c647e63e2b112e1e
|
[
"Apache-2.0"
] | 2,517
|
2015-05-23T02:10:54.000Z
|
2022-03-30T17:03:39.000Z
|
h2o-hadoop-common/tests/python/pyunit_read_invalid_file.py
|
kernelrich/h2o-3
|
16bd6be6d0ac22b037cb55b4c647e63e2b112e1e
|
[
"Apache-2.0"
] | 2,199
|
2015-05-22T04:09:55.000Z
|
2022-03-28T22:20:45.000Z
|
#! /usr/env/python
import sys
import os
sys.path.insert(1, os.path.join("../../../h2o-py"))
from tests import pyunit_utils
import h2o
def read_invalid_file():
try:
hdfs_path = 'hdfs:///user/jenkins/tests/invalid'
h2o.import_file(hdfs_path)
assert False, "Read of file, which does not exists was sucessfull. This is impossible"
except ValueError as ve:
print(ve)
pass
if __name__ == "__main__":
pyunit_utils.standalone_test(read_invalid_file)
else:
read_invalid_file()
| 22.956522
| 94
| 0.67803
|
134dea980359beedd3ea29dedfba5d6e04be334e
| 2,916
|
py
|
Python
|
Month 01/Week 01/Day 06/d.py
|
KevinKnott/Coding-Review
|
6a83cb798cc317d1e4357ac6b2b1fbf76fa034fb
|
[
"MIT"
] | null | null | null |
Month 01/Week 01/Day 06/d.py
|
KevinKnott/Coding-Review
|
6a83cb798cc317d1e4357ac6b2b1fbf76fa034fb
|
[
"MIT"
] | null | null | null |
Month 01/Week 01/Day 06/d.py
|
KevinKnott/Coding-Review
|
6a83cb798cc317d1e4357ac6b2b1fbf76fa034fb
|
[
"MIT"
] | null | null | null |
# Kth Largest Element in an Array: https://leetcode.com/problems/kth-largest-element-in-an-array/
# Given an integer array nums and an integer k, return the kth largest element in the array.
# Note that it is the kth largest element in the sorted order, not the kth distinct element.
import heapq
import random
# Note I have done this problem a number of times recently
class solution1:
# First solution involves sorting because we know that the kth largest number in a sorted array will be at len(nums) - K spots
# Note using this and a remove duplicates we could also go with kth largest unique number
# Also part of this is because k is garunteed to be in 0 -> len(nums) although we could add checks
def findKthLargest(self, nums, k):
nums.sort()
return nums[len(nums)-k]
# Other than sorting which is o(nlogn) and o(1) we could potentially use a heap since we can use max heap to get largest and only update log(n) times and o(1)
# as heap strucutres is in place
def findKthLargestHeap(self, nums, k):
heapq._heapify_max(nums)
while k > 1:
heapq._heappop_max(nums)
k -= 1
return heapq._heappop_max(nums)
# The final option here
# This solution takes advantage of two things one a quick sort on a random element will end up with an element in the correct place so
# if that element is in the len(nums) - k place we end up with the solution other wise we can split the array in half or more depending
# on your selection (using binary search)
def findKthLargestQuickSort(self, nums, k):
def helper(start, end):
pivot = random.randint(start, end)
nums[pivot], nums[end] = nums[end], nums[pivot]
pivot = end
slow = start
for i in range(start, end):
if nums[i] < nums[pivot]:
nums[i], nums[slow] = nums[slow], nums[i]
slow += 1
nums[slow], nums[pivot] = nums[pivot], nums[slow]
return slow # (nums, slow)
# aka binary search
def selection(target, start=0, end=len(nums)-1):
if start == end:
return nums[start]
potential = helper(start, end)
if target > potential:
return selection(target, potential + 1, end)
elif target < potential:
return selection(target, start, potential - 1)
else:
return nums[potential]
return selection(len(nums) - k)
# Score Card
# Did I need hints? N
# Did you finish within 30 min? N
# Was the solution optimal? Y
# Were there any bugs? Y
# Unfortunately I am tired and missed on a couple things in my partition I forgot that I need to make the slow index = to start as we are already
# seraching using binary search and don't need to do whole list
# 5 3 5 3 = 4
| 39.945205
| 162
| 0.63786
|
f82d1c64b62dbeefbdff3bec3b3fa44b0e5d3a63
| 8,349
|
py
|
Python
|
pickydict/PickyDict.py
|
florian-huber/pickydict
|
b4138a4d1edc1257c4aed0691d915ef42b89ebc9
|
[
"MIT"
] | 4
|
2022-01-06T13:43:53.000Z
|
2022-02-10T01:41:04.000Z
|
pickydict/PickyDict.py
|
florian-huber/pickydict
|
b4138a4d1edc1257c4aed0691d915ef42b89ebc9
|
[
"MIT"
] | 8
|
2022-01-17T10:01:12.000Z
|
2022-03-07T21:10:32.000Z
|
pickydict/PickyDict.py
|
florian-huber/pickydict
|
b4138a4d1edc1257c4aed0691d915ef42b89ebc9
|
[
"MIT"
] | null | null | null |
import logging
import json
import re
logger = logging.getLogger('pickydict')
class PickyDict(dict):
"""More picky version of Python dictionary.
PickyDict objects will behave just like Python dictionaries, with a few
notable exceptions:
(1) PickyDict has a force_lower_case attribute. If set to True (default)
then dictionary keys will all be treated as lower-case.
(2) PickyDict can contain two additional dictionaries named "key_replacements"
and "key_regex_replacements with mappings to enforce translating specific key words.
Examples:
.. code-block:: python
from pickydict import PickyDict
# per default force_lower_case is set to True:
my_dict = PickyDict({"A": 1, "B": 2})
print(my_dict) # => {'a': 1, 'b': 2}
# now also using a replacements dictionary
my_dict = PickyDict({"A": 1, "B": 2},
key_replacements={"a": "abc", "b": "bcd", "c": "cde"})
print(my_dict) # => {'abc': 1, 'bcd': 2}
# When adding a value using an undesired key, the key will automatically be fixed
my_dict["c"] = 100
print(my_dict) # => {'abc': 1, 'bcd': 2, 'cde': 100}
# Trying to add a value using an undesired key while the proper key already exists,
# will raise an exception.
my_dict["b"] = 5 # => ValueError: Key 'b' will be interpreted as 'bcd'...
It is also possible to add a dictionary with regex expression to replace parts of
key strings. This is done using the key_regex_replacements attribute.
Example:
.. code-block:: python
from pickydict import PickyDict
my_dict = PickyDict({"First Name": "Peter", "Last Name": "Petersson"},
key_replacements={"last_name": "surname"},
key_regex_replacements={r"\\s": "_"})
print(my_dict) # => {'first_name': 'Peter', 'surname': 'Petersson'}
Whenever the pickyness is updated, no matter if the force_lower_case, key_replacements,
or key_regex_replacements, the entire dictionary will be updated accoringly.
Example:
.. code-block:: python
from pickydict import PickyDict
my_dict = PickyDict({"First Name": "Peter", "Last Name": "Petersson"})
print(my_dict) # => {'first name': 'Peter', 'last name': 'Petersson'}
my_dict.set_pickyness(key_replacements={"last_name": "surname"},
key_regex_replacements={r"\\s": "_"})
print(my_dict) # => {'first_name': 'Peter', 'surname': 'Petersson'}
For the rest, PickyDict objects can be used just like regular Python dictionaries!
"""
_force_lower_case = True
_key_replacements = None
_key_regex_replacements = None
def __init__(self, input_dict: dict = None,
key_replacements: dict = None,
key_regex_replacements: dict = None,
force_lower_case: bool = True):
"""
Parameters
----------
input_dict : dict, optional
This is the actual dictionary within PickyDict.
key_replacements : dict, optional
This additional dictionary within PickyDict contains mappings of all
keys which the user wants to force into a specific form (see code example).
key_regex_replacements : dict, optional
This additional dictionary contains pairs of regex (regular expression) strings
and replacement strings to clean and harmonize the main dictionary keys.
An example would be {r"\\s": "_"} which will replace all spaces with underscores.
force_lower_case : bool, optional
If set to True (default) all dictionary keys will be forced to be lower case.
"""
self._force_lower_case = force_lower_case
self._key_replacements = key_replacements
self._key_regex_replacements = key_regex_replacements
if input_dict is not None:
super().__init__(input_dict)
self._apply_replacements()
else:
super().__init__()
def copy(self):
return PickyDict(self,
self._key_replacements,
self._key_regex_replacements,
self._force_lower_case)
def __setitem__(self, key, value):
proper_key = self._harmonize_key(key)
if key == proper_key:
super().__setitem__(key, value)
elif self.get(proper_key, None) is not None:
raise ValueError(f"Key '{key}' will be interpreted as '{proper_key}'. "
"But this entry already exists. "
f"Please use '{proper_key}' if you want to replace the entry.")
else:
super().__setitem__(proper_key, value)
def set_pickyness(self, key_replacements: dict = None,
key_regex_replacements: dict = None,
force_lower_case: bool = True):
"""
Function to set the pickyness of the dictionary.
Will automatically also run the new replacements if the dictionary already exists.
Parameters
----------
key_replacements : dict, optional
This is second dictionary within PickyDict containing mappings of all
keys which the user wants to force into a specific form (see code example).
key_regex_replacements : dict, optional
This additional dictionary contains pairs of regex (regular expression) strings
and replacement strings to clean and harmonize the main dictionary keys.
An example would be {r"\\s": "_"} which will replace all spaces with underscores.
force_lower_case : bool, optional
If set to True (default) all dictionary keys will be forced to be lower case.
"""
self._force_lower_case = force_lower_case
self._key_replacements = key_replacements
self._key_regex_replacements = key_regex_replacements
self._apply_replacements()
def _harmonize_key(self, key):
"""Applies lower-case, then regex replacements, then key replacements."""
if self._force_lower_case is True:
key = key.lower()
if self._key_regex_replacements is not None:
for regex_pattern, target in self._key_regex_replacements.items():
key = re.sub(regex_pattern, target, key)
if self._key_replacements is not None and key in self._key_replacements:
key = self._key_replacements[key]
return key
def _apply_replacements(self):
"""Harmonizes all keys in dictionary."""
keys_initial = self.keys()
for key in list(keys_initial).copy():
proper_key = self._harmonize_key(key)
if key != proper_key:
value = self.get(key)
if self.get(proper_key) is None:
super().__setitem__(proper_key, value)
elif self.get(proper_key) != value:
msg = f"Key '{key}' will be interpreted as '{proper_key}'. " \
"But this entry already exists. " \
f"Please use '{proper_key}' if you want to replace the entry."
logger.warning(msg)
self.pop(key)
def to_json(self):
return json.dumps(self, default=lambda x: x.data,
sort_keys=True, indent=4)
@property
def force_lower_case(self):
return self._force_lower_case.copy()
@force_lower_case.setter
def force_lower_case(self, new_force_lower_case):
self._force_lower_case = new_force_lower_case
self._apply_replacements()
@property
def key_replacements(self):
return self._key_replacements.copy()
@key_replacements.setter
def key_replacements(self, new_key_replacements):
self._key_replacements = new_key_replacements
self._apply_replacements()
@property
def key_regex_replacements(self):
return self._key_regex_replacements.copy()
@key_regex_replacements.setter
def key_regex_replacements(self, new_key_regex_replacements):
self._key_regex_replacements = new_key_regex_replacements
self._apply_replacements()
| 39.947368
| 93
| 0.624745
|
226e48489a830f377fdc99306cbde1a6f45d2064
| 5,238
|
py
|
Python
|
tensorflow/lite/testing/op_tests/fully_connected.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 74
|
2020-07-06T17:11:39.000Z
|
2022-01-28T06:31:28.000Z
|
tensorflow/lite/testing/op_tests/fully_connected.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/lite/testing/op_tests/fully_connected.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 12
|
2020-07-08T07:27:17.000Z
|
2021-12-27T08:54:27.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for fully_connected."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_fully_connected_tests(options):
"""Make a set of tests to do fully_connected."""
test_parameters = [{
"shape1": [[3, 3]],
"shape2": [[3, 3]],
"transpose_a": [True, False],
"transpose_b": [True, False],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}, {
"shape1": [[4, 4], [1, 4], [4]],
"shape2": [[4, 4], [4, 1], [4]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}, {
"shape1": [[40, 37]],
"shape2": [[37, 40]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}, {
"shape1": [[40, 37]],
"shape2": [[40, 37]],
"transpose_a": [False],
"transpose_b": [True],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}, {
"shape1": [[5, 3]],
"shape2": [[5, 3]],
"transpose_a": [True],
"transpose_b": [False],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}, {
"shape1": [[1, 3]],
"shape2": [[3, 3]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True],
"fully_quantize": [True],
"quant_16x8": [False]
}, {
"shape1": [[1, 4], [4]],
"shape2": [[4, 4], [4, 1], [4]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True],
"fully_quantize": [True],
"quant_16x8": [False]
}, {
"shape1": [[1, 37], [2, 37]],
"shape2": [[37, 40]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True],
"fully_quantize": [True],
"quant_16x8": [False]
}, {
"shape1": [[1, 3], [2, 3]],
"shape2": [[3, 5], [3, 1]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True],
"fully_quantize": [True],
"quant_16x8": [False]
}, {
"shape1": [[2, 3]],
"shape2": [[3, 5]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True],
"fully_quantize": [True],
"quant_16x8": [True]
}]
def build_graph(parameters):
"""Build a matmul graph given `parameters`."""
input_tensor1 = tf.compat.v1.placeholder(
dtype=tf.float32, name="input1", shape=parameters["shape1"])
# Get input_tensor2 either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
input_tensor2 = create_tensor_data(
np.float32, parameters["shape2"], min_value=-1, max_value=1)
input_tensors = [input_tensor1]
else:
input_tensor2 = tf.compat.v1.placeholder(
dtype=tf.float32, name="input2", shape=parameters["shape2"])
input_tensors = [input_tensor1, input_tensor2]
out = tf.matmul(
input_tensor1,
input_tensor2,
transpose_a=parameters["transpose_a"],
transpose_b=parameters["transpose_b"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# pylint: disable=g-doc-return-or-yield, g-doc-args
"""Build list of input values.
It either contains 1 tensor (input_values1) or
2 tensors (input_values1, input_values2) based on whether the second input
is a constant or variable input.
"""
values = [
create_tensor_data(
np.float32, shape=parameters["shape1"], min_value=-1, max_value=1)
]
if not parameters["constant_filter"]:
values.append(
create_tensor_data(
np.float32, parameters["shape2"], min_value=-1, max_value=1))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=14)
| 32.534161
| 80
| 0.605002
|
1fdf1dfba2932cefb0179fc976d440e06c12de9d
| 6,061
|
py
|
Python
|
ASC_Project/analyses/solver/Solver_Hub.py
|
nasserarbabi/ASC_Challenge
|
88dab668756121be6ef81eb7a298cc1add8274a2
|
[
"MIT"
] | null | null | null |
ASC_Project/analyses/solver/Solver_Hub.py
|
nasserarbabi/ASC_Challenge
|
88dab668756121be6ef81eb7a298cc1add8274a2
|
[
"MIT"
] | null | null | null |
ASC_Project/analyses/solver/Solver_Hub.py
|
nasserarbabi/ASC_Challenge
|
88dab668756121be6ef81eb7a298cc1add8274a2
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
from .Darcy_CVFEM import Darcy_CVFEM
from ..models import (BC, Analysis, Connectivity, Mesh, Nodes, Preform, Resin,
Section, Step, Results)
from time import sleep
import numpy as np
from celery import shared_task
import os
import shelve as sh
@shared_task
def create_conf(_id):
'''
This function creates an instance for the Darcy_FEM solver class
based on the database information
'''
_analysis = Analysis.objects.get(id=_id)
FaceList = {}
for item in Connectivity.objects.filter(mesh_id=_analysis.mesh).values():
if item['FaceGroup'] not in FaceList.keys():
FaceList[item['FaceGroup']] = []
FaceList[item['FaceGroup']].append(item['ElmNum'])
KXX = {}
KXY = {}
KYY = {}
H = {}
phi = {}
section_names = _analysis.section.values('name').distinct()
for section in section_names:
H[section['name']] = 0.0
KXX[section['name']] = 0.0
KXY[section['name']] = 0.0
KYY[section['name']] = 0.0
phi[section['name']] = 0.0
preform_ids_rotate = Section.objects.filter(analysis = _analysis, name=section['name']).values('preform_id', 'rotate')
for item in preform_ids_rotate:
preform = Preform.objects.get(id = item['preform_id'])
radian_rot = np.deg2rad(item['rotate'])
T = np.array([[np.cos(radian_rot), np.sin(radian_rot)], [-np.sin(radian_rot), np.cos(radian_rot)]])
H[section['name']] = H[section['name']] + preform.thickness
phi[section['name']] = phi[section['name']] + preform.phi*preform.thickness
k = np.matmul(np.matmul(T,np.array([[preform.K11, preform.K12], [preform.K12, preform.K22]])), np.transpose(T))
KXX[section['name']] = KXX[section['name']] + preform.thickness * k[0][0]
KXY[section['name']] = KXY[section['name']] + preform.thickness * k[0][1]
KYY[section['name']] = KYY[section['name']] + preform.thickness * k[1][1]
KXX[section['name']] = KXX[section['name']]/H[section['name']]
KXY[section['name']] = KXY[section['name']]/H[section['name']]
KYY[section['name']] = KYY[section['name']]/H[section['name']]
phi[section['name']] = phi[section['name']]/H[section['name']]
analysis = {
'analysis_name': _analysis.name,
'analysis_id':_analysis.id,
'folder_address':("media/" + str(_analysis.id))
}
hp = {
'hp_activated': True,
'K_medium': 1e-5,
'h_medium': 0.1,
'phi_medium': 0.95,
}
mesh = {
'mesh_name': _analysis.mesh.name
}
resin = {
'resin_name': _analysis.resin.name,
'viscosity': _analysis.resin.viscosity
}
section_data = {}
section_id = 1
for section in section_names:
section_data[section['name']]={
'marker': section_id,
'K11':KXX[section['name']],
'K12':KXY[section['name']],
'K22':KYY[section['name']],
'thickness':H[section['name']],
'volume_fraction':phi[section['name']],
'faces':FaceList[section['name']],
}
section_id = section_id + 1
step = _analysis.step
step_data = {
'termination_type':step.typ,
'termination_time':step.endtime,
'output_steps':step.outputstep,
'maximum_itrations':step.maxiterations,
'maximum_consequtive_steps':step.maxhaltsteps,
'minimum_change_of_saturation':step.minchangesaturation,
'time_scaling_parameter':step.timescaling,
'filling_threshold':step.fillthreshold,
}
EdgeList = {}
for item in Nodes.objects.filter(mesh_id=_analysis.mesh).values():
if item['EdgeGroup'] not in EdgeList.keys():
EdgeList[item['EdgeGroup']] = []
EdgeList[item['EdgeGroup']].append(item['NodeNum'])
EdgeList.pop("_None",None)
Inlets = {}
Outlets = {}
Walls = {}
boundary_marker = 1
for item in BC.objects.filter(analysis_id=_analysis.id):
if item.typ == 'Inlet':
Inlets[item.name] = {
'marker':boundary_marker,
'condition':item.condition,
'value':item.value,
'nodes':EdgeList[item.name],
}
boundary_marker += 1
elif item.typ == 'Outlet':
Outlets[item.name] = {
'marker':boundary_marker,
'condition':'Pressure',
'value':item.value,
'nodes':EdgeList[item.name],
}
boundary_marker += 1
else:
Walls[item.name] = {
'marker':boundary_marker,
'condition':'None',
'value':0.0,
'nodes':EdgeList[item.name],
}
boundary_marker += 1
BCs = {
'inlets':Inlets,
'outlets':Outlets,
'walls':Walls,
}
InputData = {
'analysis': analysis,
'mesh': mesh,
'resin':resin,
'sections':section_data,
'step':step_data,
'BCs':BCs,
'hp': hp,
# 'ICs':InitialConditions,
# 'loads':Loads,
# 'output':Output
}
return InputData
@shared_task
def print_conf(InputData):
_directory = InputData['analysis']['folder_address']
if not os.path.exists(_directory):
os.makedirs(_directory)
_message_file = sh.open(_directory + "/config.db", "c")
for key in InputData.keys():
_message_file[key] = InputData[key]
_message_file.close()
@shared_task (bind=True)
def solver_rtm(progress,_id):
InputData = create_conf(_id)
print_conf(InputData)
problem=Darcy_CVFEM(InputData)
problem.solve_rtm(progress)
@shared_task (bind=True)
def solver_hp_rtm(progress,_id):
InputData = create_conf(_id)
print_conf(InputData)
problem=Darcy_CVFEM(InputData)
problem.solve_hprtm(progress)
| 32.411765
| 126
| 0.574328
|
d297dd9199fba94ee124818bf8d870149b8fc93e
| 4,253
|
py
|
Python
|
textract/textract_worker/worker.py
|
aws-samples/nlp-textract-comprehend-demo
|
69d93dc03bf14e6987f48a5a908f0f88bece34c6
|
[
"MIT-0"
] | 3
|
2020-08-26T00:17:43.000Z
|
2021-06-22T09:48:47.000Z
|
textract/textract_worker/worker.py
|
aws-samples/nlp-textract-comprehend-demo
|
69d93dc03bf14e6987f48a5a908f0f88bece34c6
|
[
"MIT-0"
] | 2
|
2020-09-11T11:52:39.000Z
|
2021-07-23T23:44:14.000Z
|
textract/textract_worker/worker.py
|
aws-samples/nlp-textract-comprehend-demo
|
69d93dc03bf14e6987f48a5a908f0f88bece34c6
|
[
"MIT-0"
] | 3
|
2020-08-28T03:01:30.000Z
|
2021-06-10T18:52:59.000Z
|
import boto3
import time
import os
import json
def is_job_complete(jobId):
"""
That function is responsible to validate if a started job in textract is completed or not
"""
client = boto3.client('textract')
response = client.get_document_text_detection(JobId=jobId)
status = response["JobStatus"]
print("Job status: {}".format(status))
try:
while(status == "IN_PROGRESS"):
time.sleep(2)
response = client.get_document_text_detection(JobId=jobId)
status = response["JobStatus"]
print("Job status: {}".format(status))
return True
except Exception as e:
print(str(e))
return False
def get_job_results(jobId):
pages = []
time.sleep(3)
client = boto3.client('textract')
response = client.get_document_text_detection(JobId=jobId)
pages.append(response)
print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
# Next token necessary because the number of pages is huge
while(nextToken):
response = client.get_document_text_detection(JobId=jobId, NextToken=nextToken)
pages.append(response)
print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
return pages
def write_extract_to_file(response, documentNametxt):
# write detected text into a txt file
for result_page in response:
for item in result_page["Blocks"]:
if item["BlockType"] == "LINE":
with open(f"/tmp/{documentNametxt}", "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
# If file is not empty then append '\n'
data = file_object.read(100)
if len(data) > 0 :
file_object.write("\n")
# Append text at the end of file
file_object.write(item["Text"])
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
if __name__ == "__main__":
QUEUE_NAME = os.getenv("QUEUE_NAME", "")
S3_BUCKET_NAME = os.getenv("BUCKET_NAME","")
S3_TEXTRACT_OUTPUT_PATH = "textract/output"
REGION_NAME = os.getenv("REGION_NAME", "us-east-1")
sqs = boto3.resource('sqs', region_name=REGION_NAME)
queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME)
while 1:
try:
messages = queue.receive_messages(WaitTimeSeconds=5)
for message in messages:
message_body = json.loads(message.body)
print(message_body)
# Get results from SQS queue
job_id = message_body.get("job_id")
documentName = message_body.get("file_name")
# Loop inside function
validation = is_job_complete(job_id)
if not validation:
print("Error when validate the JOB")
response = get_job_results(job_id)
# Change the format of document to TXT
documentNametxt = ((documentName.split("/")[-1]).split(".")[0])+".txt"
write_extract_to_file(response, documentNametxt)
upload_file(f"/tmp/{documentNametxt}", S3_BUCKET_NAME,
f"{S3_TEXTRACT_OUTPUT_PATH}/{documentNametxt}")
message.delete()
except Exception as e:
print(e)
continue
| 31.503704
| 93
| 0.599107
|
98861b63395b73cce7b8a6e6d463da045f823807
| 2,185
|
py
|
Python
|
old-katas/bowling-kata/day-7.py
|
Alex-Diez/python-tdd-katas
|
a176f16cfd103e618e539a57cac0748fba52221c
|
[
"MIT"
] | null | null | null |
old-katas/bowling-kata/day-7.py
|
Alex-Diez/python-tdd-katas
|
a176f16cfd103e618e539a57cac0748fba52221c
|
[
"MIT"
] | null | null | null |
old-katas/bowling-kata/day-7.py
|
Alex-Diez/python-tdd-katas
|
a176f16cfd103e618e539a57cac0748fba52221c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
class Game(object):
def __init__(self):
self.rolls = []
def roll(self, pins):
self.rolls.append(pins)
def score(self):
score = 0
frame_index = 0
for i in range(0, 10):
if self._is_strike(frame_index):
score += self._strike_points(frame_index)
frame_index += 1
elif self._is_spare(frame_index):
score += self._spare_points(frame_index)
frame_index += 2
else:
score += self._frame_points(frame_index)
frame_index += 2
return score
def _strike_points(self, frame_index):
return 10 + self.rolls[frame_index + 1] + self.rolls[frame_index + 2]
def _frame_points(self, frame_index):
return self.rolls[frame_index] + self.rolls[frame_index + 1]
def _spare_points(self, frame_index):
return 10 + self.rolls[frame_index + 2]
def _is_spare(self, frame_index):
return self._frame_points(frame_index) == 10
def _is_strike(self, frame_index):
return self.rolls[frame_index] == 10
import unittest
class GameTest(unittest.TestCase):
def setUp(self):
self.game = Game()
def test_gutter_game(self):
self._roll_many(0, 20)
self.assertEqual(0, self.game.score())
def test_all_ones(self):
self._roll_many(1, 20)
self.assertEqual(20, self.game.score())
def test_one_spare(self):
self._roll_spare()
self.game.roll(3)
self._roll_many(0, 17)
self.assertEqual(16, self.game.score())
def test_one_strike(self):
self._roll_strike()
self.game.roll(4)
self.game.roll(3)
self._roll_many(0, 16)
self.assertEqual(24, self.game.score())
def test_perfect_game(self):
self._roll_many(10, 12)
self.assertEqual(300, self.game.score())
def _roll_strike(self):
self.game.roll(10)
def _roll_spare(self):
self.game.roll(6)
self.game.roll(4)
def _roll_many(self, pins, times):
for i in range(0, times):
self.game.roll(pins)
| 26.325301
| 77
| 0.590847
|
dbe03429705e12c3c878e01d79a951c34d7c1344
| 9,144
|
py
|
Python
|
ansible/grab001/lib/python2.7/site-packages/ansible/modules/core/network/nxos/nxos_evpn_global.py
|
archmangler/jumpcloud
|
9dda2862d9331b8794f6985c157fa50d7260fcd0
|
[
"MIT"
] | null | null | null |
ansible/grab001/lib/python2.7/site-packages/ansible/modules/core/network/nxos/nxos_evpn_global.py
|
archmangler/jumpcloud
|
9dda2862d9331b8794f6985c157fa50d7260fcd0
|
[
"MIT"
] | null | null | null |
ansible/grab001/lib/python2.7/site-packages/ansible/modules/core/network/nxos/nxos_evpn_global.py
|
archmangler/jumpcloud
|
9dda2862d9331b8794f6985c157fa50d7260fcd0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_evpn_global
version_added: "2.2"
short_description: Handles the EVPN control plane for VXLAN.
description:
- Handles the EVPN control plane for VXLAN.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
options:
nv_overlay_evpn:
description:
- EVPN control plane.
required: true
choices: ['true', 'false']
'''
EXAMPLES = '''
- nxos_evpn_global:
nv_overlay_evpn: true
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"nv_overlay_evpn": true}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"nv_overlay_evpn": false}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"nv_overlay_evpn": true}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["nv overlay evpn"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
PARAM_TO_COMMAND_KEYMAP = {
'nv_overlay_evpn': 'nv overlay evpn',
}
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
if REGEX.search(config):
value = True
return value
def get_existing(module):
existing = {}
config = str(get_config(module))
existing['nv_overlay_evpn'] = get_value('nv_overlay_evpn', config, module)
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def get_commands(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.iteritems():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
if commands:
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
nv_overlay_evpn=dict(required=True, type='bool'),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
existing = invoke('get_existing', module)
end_state = existing
proposed = dict(nv_overlay_evpn=module.params['nv_overlay_evpn'])
result = {}
candidate = CustomNetworkConfig(indent=3)
invoke('get_commands', module, existing, proposed, candidate)
if proposed != existing:
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed
module.exit_json(**result)
if __name__ == '__main__':
main()
| 28.485981
| 93
| 0.600503
|
f000aa725c130e343e283ba21cd45761224b3ce2
| 361
|
py
|
Python
|
tests/packages/test_directory_dependency.py
|
utek/core
|
1172536cac35ca4e2519b9bd516ae8f1cf23994a
|
[
"MIT"
] | null | null | null |
tests/packages/test_directory_dependency.py
|
utek/core
|
1172536cac35ca4e2519b9bd516ae8f1cf23994a
|
[
"MIT"
] | null | null | null |
tests/packages/test_directory_dependency.py
|
utek/core
|
1172536cac35ca4e2519b9bd516ae8f1cf23994a
|
[
"MIT"
] | null | null | null |
import pytest
from poetry.core.packages.directory_dependency import DirectoryDependency
from poetry.core.utils._compat import Path
DIST_PATH = Path(__file__).parent.parent / "fixtures" / "git" / "github.com" / "demo"
def test_directory_dependency_must_exist():
with pytest.raises(ValueError):
DirectoryDependency("demo", DIST_PATH / "invalid")
| 27.769231
| 85
| 0.764543
|
824661fe30c00afd8b5fcdd1f9c50a8d9f778c50
| 10,495
|
py
|
Python
|
oslo_db/options.py
|
mail2nsrajesh/oslo.db
|
d17be6e20c82819714518d8459a8ab2e88453c13
|
[
"Apache-2.0"
] | null | null | null |
oslo_db/options.py
|
mail2nsrajesh/oslo.db
|
d17be6e20c82819714518d8459a8ab2e88453c13
|
[
"Apache-2.0"
] | 1
|
2018-02-28T17:29:41.000Z
|
2018-02-28T17:29:41.000Z
|
oslo_db/options.py
|
mail2nsrajesh/oslo.db
|
d17be6e20c82819714518d8459a8ab2e88453c13
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
database_opts = [
cfg.BoolOpt('sqlite_synchronous',
deprecated_group='DEFAULT',
default=True,
help='If True, SQLite uses synchronous mode.'),
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The back end to use for the database.'),
cfg.StrOpt('connection',
help='The SQLAlchemy connection string to use to connect to '
'the database.',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string to use to connect to the'
' slave database.'),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.BoolOpt('mysql_enable_ndb',
default=False,
help='If True, transparently enables support for handling '
'MySQL Cluster (NDB).'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle SQL connections are reaped.'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool.'),
cfg.IntOpt('max_pool_size',
default=5,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool. Setting a value of 0 indicates no limit.'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum number of database connection retries '
'during startup. Set to -1 to specify an infinite '
'retry count.'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a SQL connection.'),
cfg.IntOpt('max_overflow',
default=50,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with '
'SQLAlchemy.'),
cfg.IntOpt('connection_debug',
default=0,
min=0, max=100,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information: 0=None, '
'100=Everything.'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add Python stack traces to SQL as comment strings.'),
cfg.IntOpt('pool_timeout',
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with '
'SQLAlchemy.'),
cfg.BoolOpt('use_db_reconnect',
default=False,
help='Enable the experimental use of database reconnect '
'on connection lost.'),
cfg.IntOpt('db_retry_interval',
default=1,
help='Seconds between retries of a database transaction.'),
cfg.BoolOpt('db_inc_retry_interval',
default=True,
help='If True, increases the interval between retries '
'of a database operation up to db_max_retry_interval.'),
cfg.IntOpt('db_max_retry_interval',
default=10,
help='If db_inc_retry_interval is set, the '
'maximum seconds between retries of a '
'database operation.'),
cfg.IntOpt('db_max_retries',
default=20,
help='Maximum retries in case of connection error or deadlock '
'error before error is '
'raised. Set to -1 to specify an infinite retry '
'count.'),
]
def set_defaults(conf, connection=None, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables.
Overrides default options values.
:param conf: Config instance specified to set default options in it. Using
of instances instead of a global config object prevents conflicts between
options declaration.
:type conf: oslo.config.cfg.ConfigOpts instance.
:keyword connection: SQL connection string.
Valid SQLite URL forms are:
* sqlite:///:memory: (or, sqlite://)
* sqlite:///relative/path/to/file.db
* sqlite:////absolute/path/to/file.db
:type connection: str
:keyword max_pool_size: maximum connections pool size. The size of the pool
to be maintained, defaults to 5. This is the largest number of connections
that will be kept persistently in the pool. Note that the pool begins with
no connections; once this number of connections is requested, that number
of connections will remain.
:type max_pool_size: int
:default max_pool_size: 5
:keyword max_overflow: The maximum overflow size of the pool. When the
number of checked-out connections reaches the size set in pool_size,
additional connections will be returned up to this limit. When those
additional connections are returned to the pool, they are disconnected and
discarded. It follows then that the total number of simultaneous
connections the pool will allow is pool_size + max_overflow, and the total
number of "sleeping" connections the pool will allow is pool_size.
max_overflow can be set to -1 to indicate no overflow limit; no limit will
be placed on the total number of concurrent connections. Defaults to 10,
will be used if value of the parameter in `None`.
:type max_overflow: int
:default max_overflow: None
:keyword pool_timeout: The number of seconds to wait before giving up on
returning a connection. Defaults to 30, will be used if value of the
parameter is `None`.
:type pool_timeout: int
:default pool_timeout: None
"""
conf.register_opts(database_opts, group='database')
if connection is not None:
conf.set_default('connection', connection, group='database')
if max_pool_size is not None:
conf.set_default('max_pool_size', max_pool_size, group='database')
if max_overflow is not None:
conf.set_default('max_overflow', max_overflow, group='database')
if pool_timeout is not None:
conf.set_default('pool_timeout', pool_timeout, group='database')
def list_opts():
"""Returns a list of oslo.config options available in the library.
The returned list includes all oslo.config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
return [('database', database_opts)]
| 48.364055
| 79
| 0.566841
|
47858b122baf95c8e45af58b5d99fa39bbcf2b99
| 7,639
|
py
|
Python
|
tools/yuidoc/bin/Cheetah/Filters.py
|
theatlantic/backplanejs
|
d7b844457abba930df6a800e55eb9c70c8b76cd9
|
[
"Apache-2.0"
] | 115
|
2015-01-02T12:23:59.000Z
|
2021-12-22T08:18:04.000Z
|
tools/yuidoc/bin/Cheetah/Filters.py
|
theatlantic/backplanejs
|
d7b844457abba930df6a800e55eb9c70c8b76cd9
|
[
"Apache-2.0"
] | 1
|
2017-01-12T09:10:46.000Z
|
2017-01-12T09:10:46.000Z
|
tools/yuidoc/bin/Cheetah/Filters.py
|
theatlantic/backplanejs
|
d7b844457abba930df6a800e55eb9c70c8b76cd9
|
[
"Apache-2.0"
] | 69
|
2015-03-12T07:40:21.000Z
|
2020-04-04T15:15:55.000Z
|
'''
Filters for the #filter directive as well as #transform
#filter results in output filters Cheetah's $placeholders .
#transform results in a filter on the entirety of the output
'''
import sys
# Additional entities WebSafe knows how to transform. No need to include
# '<', '>' or '&' since those will have been done already.
webSafeEntities = {' ': ' ', '"': '"'}
class Filter(object):
"""A baseclass for the Cheetah Filters."""
def __init__(self, template=None):
"""Setup a reference to the template that is using the filter instance.
This reference isn't used by any of the standard filters, but is
available to Filter subclasses, should they need it.
Subclasses should call this method.
"""
self.template = template
def filter(self, val, encoding=None, str=str, **kw):
'''
Pass Unicode strings through unmolested, unless an encoding is specified.
'''
if val is None:
return u''
if isinstance(val, unicode):
# ignore the encoding and return the unicode object
return val
else:
try:
return unicode(val)
except UnicodeDecodeError:
# we could put more fallbacks here, but we'll just pass the str
# on and let DummyTransaction worry about it
return str(val)
RawOrEncodedUnicode = Filter
EncodeUnicode = Filter
class Markdown(EncodeUnicode):
'''
Markdown will change regular strings to Markdown
(http://daringfireball.net/projects/markdown/)
Such that:
My Header
=========
Becaomes:
<h1>My Header</h1>
and so on.
Markdown is meant to be used with the #transform
tag, as it's usefulness with #filter is marginal at
best
'''
def filter(self, value, **kwargs):
# This is a bit of a hack to allow outright embedding of the markdown module
try:
import markdown
except ImportError:
print('>>> Exception raised importing the "markdown" module')
print('>>> Are you sure you have the ElementTree module installed?')
print(' http://effbot.org/downloads/#elementtree')
raise
encoded = super(Markdown, self).filter(value, **kwargs)
return markdown.markdown(encoded)
class CodeHighlighter(EncodeUnicode):
'''
The CodeHighlighter filter depends on the "pygments" module which you can
download and install from: http://pygments.org
What the CodeHighlighter assumes the string that it's receiving is source
code and uses pygments.lexers.guess_lexer() to try to guess which parser
to use when highlighting it.
CodeHighlighter will return the HTML and CSS to render the code block, syntax
highlighted, in a browser
NOTE: I had an issue installing pygments on Linux/amd64/Python 2.6 dealing with
importing of pygments.lexers, I was able to correct the failure by adding:
raise ImportError
to line 39 of pygments/plugin.py (since importing pkg_resources was causing issues)
'''
def filter(self, source, **kwargs):
encoded = super(CodeHighlighter, self).filter(source, **kwargs)
try:
from pygments import highlight
from pygments import lexers
from pygments import formatters
except ImportError, ex:
print('<%s> - Failed to import pygments! (%s)' % (self.__class__.__name__, ex))
print('-- You may need to install it from: http://pygments.org')
return encoded
lexer = None
try:
lexer = lexers.guess_lexer(source)
except lexers.ClassNotFound:
lexer = lexers.PythonLexer()
formatter = formatters.HtmlFormatter(cssclass='code_highlighter')
encoded = highlight(encoded, lexer, formatter)
css = formatter.get_style_defs('.code_highlighter')
return '''<style type="text/css"><!--
%(css)s
--></style>%(source)s''' % {'css' : css, 'source' : encoded}
class MaxLen(Filter):
def filter(self, val, **kw):
"""Replace None with '' and cut off at maxlen."""
output = super(MaxLen, self).filter(val, **kw)
if 'maxlen' in kw and len(output) > kw['maxlen']:
return output[:kw['maxlen']]
return output
class WebSafe(Filter):
"""Escape HTML entities in $placeholders.
"""
def filter(self, val, **kw):
s = super(WebSafe, self).filter(val, **kw)
# These substitutions are copied from cgi.escape().
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
# Process the additional transformations if any.
if 'also' in kw:
also = kw['also']
entities = webSafeEntities # Global variable.
for k in also:
if k in entities:
v = entities[k]
else:
v = "&#%s;" % ord(k)
s = s.replace(k, v)
return s
class Strip(Filter):
"""Strip leading/trailing whitespace but preserve newlines.
This filter goes through the value line by line, removing leading and
trailing whitespace on each line. It does not strip newlines, so every
input line corresponds to one output line, with its trailing newline intact.
We do not use val.split('\n') because that would squeeze out consecutive
blank lines. Instead, we search for each newline individually. This
makes us unable to use the fast C .split method, but it makes the filter
much more widely useful.
This filter is intended to be usable both with the #filter directive and
with the proposed #sed directive (which has not been ratified yet.)
"""
def filter(self, val, **kw):
s = super(Strip, self).filter(val, **kw)
result = []
start = 0 # The current line will be s[start:end].
while True: # Loop through each line.
end = s.find('\n', start) # Find next newline.
if end == -1: # If no more newlines.
break
chunk = s[start:end].strip()
result.append(chunk)
result.append('\n')
start = end + 1
# Write the unfinished portion after the last newline, if any.
chunk = s[start:].strip()
result.append(chunk)
return "".join(result)
class StripSqueeze(Filter):
"""Canonicalizes every chunk of whitespace to a single space.
Strips leading/trailing whitespace. Removes all newlines, so multi-line
input is joined into one ling line with NO trailing newline.
"""
def filter(self, val, **kw):
s = super(StripSqueeze, self).filter(val, **kw)
s = s.split()
return " ".join(s)
##################################################
## MAIN ROUTINE -- testing
def test():
s1 = "abc <=> &"
s2 = " asdf \n\t 1 2 3\n"
print("WebSafe INPUT:", repr(s1))
print(" WebSafe:", repr(WebSafe().filter(s1)))
print()
print(" Strip INPUT:", repr(s2))
print(" Strip:", repr(Strip().filter(s2)))
print("StripSqueeze:", repr(StripSqueeze().filter(s2)))
print("Unicode:", repr(EncodeUnicode().filter(u'aoeu12345\u1234')))
if __name__ == "__main__":
test()
# vim: shiftwidth=4 tabstop=4 expandtab
| 35.86385
| 91
| 0.592355
|
87716dcbccf1abe6d5296b2e992cb38af80e4779
| 11,487
|
py
|
Python
|
homeassistant/helpers/helper_config_entry_flow.py
|
Rjevski/core
|
eca5fb5d54f50e7a103338ba74a94ff7ee0882db
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/helpers/helper_config_entry_flow.py
|
Rjevski/core
|
eca5fb5d54f50e7a103338ba74a94ff7ee0882db
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/helpers/helper_config_entry_flow.py
|
Rjevski/core
|
eca5fb5d54f50e7a103338ba74a94ff7ee0882db
|
[
"Apache-2.0"
] | null | null | null |
"""Helpers for data entry flows for helper config entries."""
from __future__ import annotations
from abc import abstractmethod
from collections.abc import Callable, Mapping
import copy
from dataclasses import dataclass
import types
from typing import Any, cast
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import HomeAssistant, callback, split_entity_id
from homeassistant.data_entry_flow import FlowResult, UnknownHandler
from . import entity_registry as er
class HelperFlowError(Exception):
"""Validation failed."""
@dataclass
class HelperFlowFormStep:
"""Define a helper config or options flow step."""
# Optional schema for requesting and validating user input. If schema validation
# fails, the step will be retried. If the schema is None, no user input is requested.
schema: vol.Schema | None
# Optional function to validate user input.
# The validate_user_input function is called if the schema validates successfully.
# The validate_user_input function is passed the user input from the current step.
# The validate_user_input should raise HelperFlowError is user input is invalid.
validate_user_input: Callable[[dict[str, Any]], dict[str, Any]] = lambda x: x
# Optional function to identify next step.
# The next_step function is called if the schema validates successfully or if no
# schema is defined. The next_step function is passed the union of config entry
# options and user input from previous steps.
# If next_step returns None, the flow is ended with RESULT_TYPE_CREATE_ENTRY.
next_step: Callable[[dict[str, Any]], str | None] = lambda _: None
@dataclass
class HelperFlowMenuStep:
"""Define a helper config or options flow menu step."""
# Menu options
options: list[str] | dict[str, str]
class HelperCommonFlowHandler:
"""Handle a config or options flow for helper."""
def __init__(
self,
handler: HelperConfigFlowHandler | HelperOptionsFlowHandler,
flow: dict[str, HelperFlowFormStep | HelperFlowMenuStep],
config_entry: config_entries.ConfigEntry | None,
) -> None:
"""Initialize a common handler."""
self._flow = flow
self._handler = handler
self._options = dict(config_entry.options) if config_entry is not None else {}
async def async_step(
self, step_id: str, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a step."""
if isinstance(self._flow[step_id], HelperFlowFormStep):
return await self._async_form_step(step_id, user_input)
return await self._async_menu_step(step_id, user_input)
async def _async_form_step(
self, step_id: str, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a form step."""
form_step: HelperFlowFormStep = cast(HelperFlowFormStep, self._flow[step_id])
if user_input is not None and form_step.schema is not None:
# Do extra validation of user input
try:
user_input = form_step.validate_user_input(user_input)
except HelperFlowError as exc:
return self._show_next_step(step_id, exc, user_input)
if user_input is not None:
# User input was validated successfully, update options
self._options.update(user_input)
next_step_id: str = step_id
if form_step.next_step and (user_input is not None or form_step.schema is None):
# Get next step
next_step_id_or_end_flow = form_step.next_step(self._options)
if next_step_id_or_end_flow is None:
# Flow done, create entry or update config entry options
return self._handler.async_create_entry(data=self._options)
next_step_id = next_step_id_or_end_flow
return self._show_next_step(next_step_id)
def _show_next_step(
self,
next_step_id: str,
error: HelperFlowError | None = None,
user_input: dict[str, Any] | None = None,
) -> FlowResult:
"""Show form for next step."""
form_step: HelperFlowFormStep = cast(
HelperFlowFormStep, self._flow[next_step_id]
)
options = dict(self._options)
if user_input:
options.update(user_input)
if (data_schema := form_step.schema) and data_schema.schema:
# Make a copy of the schema with suggested values set to saved options
schema = {}
for key, val in data_schema.schema.items():
new_key = key
if key in options and isinstance(key, vol.Marker):
# Copy the marker to not modify the flow schema
new_key = copy.copy(key)
new_key.description = {"suggested_value": options[key]}
schema[new_key] = val
data_schema = vol.Schema(schema)
errors = {"base": str(error)} if error else None
# Show form for next step
return self._handler.async_show_form(
step_id=next_step_id, data_schema=data_schema, errors=errors
)
async def _async_menu_step(
self, step_id: str, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a menu step."""
form_step: HelperFlowMenuStep = cast(HelperFlowMenuStep, self._flow[step_id])
return self._handler.async_show_menu(
step_id=step_id,
menu_options=form_step.options,
)
class HelperConfigFlowHandler(config_entries.ConfigFlow):
"""Handle a config flow for helper integrations."""
config_flow: dict[str, HelperFlowFormStep | HelperFlowMenuStep]
options_flow: dict[str, HelperFlowFormStep | HelperFlowMenuStep] | None = None
VERSION = 1
# pylint: disable-next=arguments-differ
def __init_subclass__(cls, **kwargs: Any) -> None:
"""Initialize a subclass."""
super().__init_subclass__(**kwargs)
@callback
def _async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Get the options flow for this handler."""
if cls.options_flow is None:
raise UnknownHandler
return HelperOptionsFlowHandler(
config_entry, cls.options_flow, cls.async_options_flow_finished
)
# Create an async_get_options_flow method
cls.async_get_options_flow = _async_get_options_flow # type: ignore[assignment]
# Create flow step methods for each step defined in the flow schema
for step in cls.config_flow:
setattr(cls, f"async_step_{step}", cls._async_step(step))
def __init__(self) -> None:
"""Initialize config flow."""
self._common_handler = HelperCommonFlowHandler(self, self.config_flow, None)
@classmethod
@callback
def async_supports_options_flow(
cls, config_entry: config_entries.ConfigEntry
) -> bool:
"""Return options flow support for this handler."""
return cls.options_flow is not None
@staticmethod
def _async_step(step_id: str) -> Callable:
"""Generate a step handler."""
async def _async_step(
self: HelperConfigFlowHandler, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a config flow step."""
# pylint: disable-next=protected-access
result = await self._common_handler.async_step(step_id, user_input)
return result
return _async_step
# pylint: disable-next=no-self-use
@abstractmethod
@callback
def async_config_entry_title(self, options: Mapping[str, Any]) -> str:
"""Return config entry title.
The options parameter contains config entry options, which is the union of user
input from the config flow steps.
"""
@callback
def async_config_flow_finished(self, options: Mapping[str, Any]) -> None:
"""Take necessary actions after the config flow is finished, if needed.
The options parameter contains config entry options, which is the union of user
input from the config flow steps.
"""
@callback
@staticmethod
def async_options_flow_finished(
hass: HomeAssistant, options: Mapping[str, Any]
) -> None:
"""Take necessary actions after the options flow is finished, if needed.
The options parameter contains config entry options, which is the union of stored
options and user input from the options flow steps.
"""
@callback
def async_create_entry( # pylint: disable=arguments-differ
self,
data: Mapping[str, Any],
**kwargs: Any,
) -> FlowResult:
"""Finish config flow and create a config entry."""
self.async_config_flow_finished(data)
return super().async_create_entry(
data={}, options=data, title=self.async_config_entry_title(data), **kwargs
)
class HelperOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle an options flow for helper integrations."""
def __init__(
self,
config_entry: config_entries.ConfigEntry,
options_flow: dict[str, vol.Schema],
async_options_flow_finished: Callable[[HomeAssistant, Mapping[str, Any]], None],
) -> None:
"""Initialize options flow."""
self._common_handler = HelperCommonFlowHandler(self, options_flow, config_entry)
self._config_entry = config_entry
self._async_options_flow_finished = async_options_flow_finished
for step in options_flow:
setattr(
self,
f"async_step_{step}",
types.MethodType(self._async_step(step), self),
)
@staticmethod
def _async_step(step_id: str) -> Callable:
"""Generate a step handler."""
async def _async_step(
self: HelperConfigFlowHandler, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle an options flow step."""
# pylint: disable-next=protected-access
result = await self._common_handler.async_step(step_id, user_input)
return result
return _async_step
@callback
def async_create_entry( # pylint: disable=arguments-differ
self,
data: Mapping[str, Any],
**kwargs: Any,
) -> FlowResult:
"""Finish config flow and create a config entry."""
self._async_options_flow_finished(self.hass, data)
return super().async_create_entry(title="", data=data, **kwargs)
@callback
def wrapped_entity_config_entry_title(
hass: HomeAssistant, entity_id_or_uuid: str
) -> str:
"""Generate title for a config entry wrapping a single entity.
If the entity is registered, use the registry entry's name.
If the entity is in the state machine, use the name from the state.
Otherwise, fall back to the object ID.
"""
registry = er.async_get(hass)
entity_id = er.async_validate_entity_id(registry, entity_id_or_uuid)
object_id = split_entity_id(entity_id)[1]
entry = registry.async_get(entity_id)
if entry:
return entry.name or entry.original_name or object_id
state = hass.states.get(entity_id)
if state:
return state.name or object_id
return object_id
| 36.466667
| 89
| 0.660834
|
1ac44e3cce3db82dd840b03d5251b4a7c85abd78
| 564
|
py
|
Python
|
login/APIv1/userLogin.py
|
aotella/calcoff_backend
|
7f25ff494ea2e1e4119fe7450d805c986fa77f0c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
login/APIv1/userLogin.py
|
aotella/calcoff_backend
|
7f25ff494ea2e1e4119fe7450d805c986fa77f0c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
login/APIv1/userLogin.py
|
aotella/calcoff_backend
|
7f25ff494ea2e1e4119fe7450d805c986fa77f0c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from ..models import UserModel
import bcrypt
def userLogin(request):
try:
userObject = UserModel.objects.get(UserName=request['userName'])
if bcrypt.checkpw(str(request['password']),str(userObject.Password)):
returnData={}
returnData['UserName']=userObject.UserName
returnData['EmailID']=userObject.EmailID
returnData['Age'] = userObject.Age
returnData['Country'] = userObject.Country
return returnData
else:
return 400
except:
return 404
| 31.333333
| 77
| 0.618794
|
60481a4cd46253b27a1a2f3fd2bf90d2cc2d923a
| 3,078
|
py
|
Python
|
cybox/utils/idgen.py
|
siemens/python-cybox
|
b692a98c8a62bd696e2a0dda802ada7359853482
|
[
"BSD-3-Clause"
] | null | null | null |
cybox/utils/idgen.py
|
siemens/python-cybox
|
b692a98c8a62bd696e2a0dda802ada7359853482
|
[
"BSD-3-Clause"
] | null | null | null |
cybox/utils/idgen.py
|
siemens/python-cybox
|
b692a98c8a62bd696e2a0dda802ada7359853482
|
[
"BSD-3-Clause"
] | 1
|
2019-04-16T18:37:32.000Z
|
2019-04-16T18:37:32.000Z
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
"""Methods for generating IDs for Objects and Observables"""
import uuid
from cybox.utils.nsparser import Namespace
EXAMPLE_NAMESPACE = Namespace("http://example.com", "example")
__all__ = ['InvalidMethodError', 'IDGenerator', 'set_id_namespace',
'set_id_method', 'create_id']
class InvalidMethodError(ValueError):
def __init__(self, method):
ValueError.__init__(self, "invalid method: %s" % method)
class IDGenerator(object):
"""Utility class for generating CybOX IDs for objects"""
METHOD_UUID = 1
METHOD_INT = 2
METHODS = (METHOD_UUID, METHOD_INT)
def __init__(self, namespace=EXAMPLE_NAMESPACE, method=METHOD_UUID):
self.namespace = namespace
self.method = method
self.reset()
def reset(self):
self.next_int = 1
@property
def namespace(self):
return self._namespace
@namespace.setter
def namespace(self, value):
if not isinstance(value, Namespace):
raise ValueError("Must be a Namespace object")
self._namespace = value
self.reset()
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value not in IDGenerator.METHODS:
raise InvalidMethodError("invalid method: %s" % value)
self._method = value
self.reset()
def create_id(self, prefix="guid"):
"""Create an ID.
Note that if `prefix` is not provided, it will be `quid`, even if the
`method` is `METHOD_INT`.
"""
if self.method == IDGenerator.METHOD_UUID:
id_ = str(uuid.uuid4())
elif self.method == IDGenerator.METHOD_INT:
id_ = self.next_int
self.next_int += 1
else:
raise InvalidMethodError()
return "%s:%s-%s" % (self.namespace.prefix, prefix, id_)
# Singleton instance within this module. It is lazily instantiated, so simply
# importing the utils module will not create the object.
__generator = None
def _get_generator():
"""Return the `cybox.utils` module's generator object.
Only under rare circumstances should this function be called by external
code. More likely, external code should initialize its own IDGenerator or
use the `set_id_namespace`, `set_id_method`, or `create_id` functions of
the `cybox.utils` module.
"""
global __generator
if not __generator:
__generator = IDGenerator()
return __generator
def set_id_namespace(namespace):
""" Set the namespace for the module-level ID Generator"""
_get_generator().namespace = namespace
def set_id_method(method):
""" Set the method for the module-level ID Generator"""
_get_generator().method = method
def create_id(prefix=None):
""" Create an ID using the module-level ID Generator"""
if not prefix:
return _get_generator().create_id()
else:
return _get_generator().create_id(prefix)
| 27.981818
| 77
| 0.662443
|
b53c43dc70d91ac1fe0904143a2ee33b5bf574f5
| 2,079
|
py
|
Python
|
lab04/scanner.py
|
konrad2508/compilators-lab
|
ebafe4c8b72d4600408c0ceb54ad0bed0138e736
|
[
"MIT"
] | 1
|
2020-10-28T13:18:09.000Z
|
2020-10-28T13:18:09.000Z
|
lab05/scanner.py
|
konrad2508/compilators-lab
|
ebafe4c8b72d4600408c0ceb54ad0bed0138e736
|
[
"MIT"
] | null | null | null |
lab05/scanner.py
|
konrad2508/compilators-lab
|
ebafe4c8b72d4600408c0ceb54ad0bed0138e736
|
[
"MIT"
] | null | null | null |
import ply.lex as lex
reserved = {
'if': 'IF',
'else': 'ELSE',
'for': 'FOR',
'while': 'WHILE',
'break': 'BREAK',
'continue': 'CONTINUE',
'return': 'RETURN',
'eye': 'EYE',
'zeros': 'ZEROS',
'ones': 'ONES',
'print': 'PRINT'
}
tokens = (
'WHITESPACE', 'COMMENT', 'DOTADD', 'DOTSUB', 'DOTMUL', 'DOTDIV', 'ADDASSIGN', 'SUBASSIGN', 'MULASSIGN',
'DIVASSIGN', 'LTE', 'GTE', 'NEQ', 'EQ', 'ID', 'FLOAT', 'INT', 'STRING', 'TRANSPOSE'
) + tuple(reserved.values())
literals = ['+', '-', '*', '/', '=', '<', '>', '(', ')', '[', ']', '{', '}', ':', ';', ',']
t_ignore = '\t'
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
t.lexer.charno = 1
def t_WHITESPACE(t):
r'\s+'
lexer.charno += len(str(t.value))
def t_COMMENT(t):
r'\#.*'
pass
t_DOTADD = r'\.\+'
t_DOTSUB = r'\.-'
t_DOTMUL = r'\.\*'
t_DOTDIV = r'\./'
t_ADDASSIGN = r'\+='
t_SUBASSIGN = r'-='
t_MULASSIGN = r'\*='
t_DIVASSIGN = r'/='
t_LTE = r'<='
t_GTE = r'>='
t_NEQ = r'!='
t_EQ = r'=='
t_TRANSPOSE = r"'"
def t_ID(t):
# capture strings starting with letter or underscore
r'[a-zA-Z_]\w*'
# check if a reserved keyword was not caught instead
t.type = reserved.get(t.value, 'ID')
return t
def t_FLOAT(t):
# capture floats from python, i.e. numbers in format 6.1, 6., .6 or 60.52E2
r'\d+\.\d*([eE]\d+)?|\d*\.\d+([eE]\d+)?'
t.value = float(t.value)
return t
def t_INT(t):
r'\d+'
t.value = int(t.value)
return t
def t_STRING(t):
# strings must be enclosed within quotation marks
# correct regex to catch other quotation marks should be (["'`])(.*?)\1, but for some reason re throws error
r'"(.*?)"'
# strip quotation marks
t.value = t.value[1:-1]
return t
def t_error(t):
print("Error starting at character '" + str(t.value[0]) + "' at line: " + str(t.lexer.lineno))
t.lexer.skip(1)
def find_tok_line(t):
return t.lexer.lineno
def find_tok_column(t):
return t.lexer.charno
lexer = lex.lex()
lexer.charno = 1
| 19.990385
| 116
| 0.537278
|
edac057cbf99b03df2f3df6c5e0ab240cf5925ed
| 24,877
|
py
|
Python
|
puzzle-project/puzzle/views.py
|
bhavul/GIDS-Endurance-Hacker-Puzzle
|
78b057b9158e7e818de833f1fd91c7f5d598ac8a
|
[
"MIT"
] | null | null | null |
puzzle-project/puzzle/views.py
|
bhavul/GIDS-Endurance-Hacker-Puzzle
|
78b057b9158e7e818de833f1fd91c7f5d598ac8a
|
[
"MIT"
] | null | null | null |
puzzle-project/puzzle/views.py
|
bhavul/GIDS-Endurance-Hacker-Puzzle
|
78b057b9158e7e818de833f1fd91c7f5d598ac8a
|
[
"MIT"
] | null | null | null |
from pyramid.httpexceptions import HTTPFound
from pyramid.response import Response, FileResponse
from pyramid.url import route_url
from pyramid.view import view_config
from pyramid.events import ApplicationCreated
from pyramid.events import NewRequest
from pyramid.events import subscriber
import sqlite3
import random
import string
import hashlib
import logging
import time
# todo - check out if you can sniff anything on http
# todo - stress testing
# todo - any random page takes him to his progress :D
# todo - secure apache server
log = logging.getLogger(__name__)
####### - FILE PATHS
tableschema_path = '/Users/bhavul.g/Downloads/Own-development-related-stuff/GIDS-puzzle-final/puzzle-project/puzzle/schema.sql'
tasksdb_path = '/Users/bhavul.g/Downloads/Own-development-related-stuff/GIDS-puzzle-final/puzzle-project/puzzle/tasks.db'
################## ------- important commands -----------
# inside /var/www/GIDS-puzzle-final
# virtualenv -p python venv27
# . venv27/bin/activate
# inside puzzle-project (where you have setup.py)
# ../env/bin/pserve development.ini --reload
################## ---- Subscribers --------
@subscriber(ApplicationCreated)
def application_created_subscriber(event):
with open(tableschema_path) as f:
stmt = f.read()
settings = event.app.registry.settings
db = sqlite3.connect(tasksdb_path)
db.executescript(stmt)
db.commit()
@subscriber(NewRequest)
def new_request_subscriber(event):
request = event.request
settings = request.registry.settings
request.db = sqlite3.connect(tasksdb_path)
request.add_finished_callback(close_db_connection)
def close_db_connection(request):
request.db.close()
########## ------------- Util methods ------------------
def getSessionHashString(sessionHash):
if type(sessionHash) is list:
return sessionHash[0]
elif type(sessionHash) is tuple:
return sessionHash[0]
else:
return sessionHash
def nameIsValid(name):
breakName = name.split()
for part in breakName:
if not part.isalpha():
log.error("The name "+name+" is not valid")
return False
return True
def generateVerifyCode():
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
def generateSessionHash(request):
sessionHash = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(15))
rs = request.db.execute('select sessionhash from bhailog where sessionhash = (?);',[sessionHash])
if len(rs.fetchall()) == 0:
log.info("Generated a new sessionHash - "+sessionHash)
return sessionHash
else:
return generateSessionHash(request)
def successful_signup(request,dict):
log.info("entered successful_signup")
verifyCode = generateVerifyCode()
sessionHash = generateSessionHash(request)
try:
#here
request.db.execute(
'insert into bhailog (name, email, phone, sessionhash, verifycode, progress) values (?, ?, ?, ?, ?, ?);',
[dict['name'].upper(), dict['email'].lower(),dict['contact'],sessionHash,verifyCode,0])
request.db.execute('insert into events (sessionhash,description) values (?,?);',[sessionHash,'Signed up'])
request.db.commit()
setSessionHash(request,sessionHash)
log.info("[successful_signup] signup done. sessionHash set into request")
except Exception as e:
log.warning("Exception occured while trying to signup. Probably conflicting email/contact - "+str(e))
rs = request.db.execute('select sessionhash from bhailog where name = (?) and email = (?) and phone = (?);',[dict['name'].upper(),dict['email'].lower(),dict['contact']])
resultArray = rs.fetchall()
if len(resultArray) == 0:
return signup_failed()
else:
log.info("[successful_signup] Existed in user tables. Logging in back.")
sessionHash = resultArray[0]
setSessionHash(request,sessionHash)
log.info("[successful_signup] sessionHash set into request")
return moveToCorrectPage(request)
def signup_failed():
log.error("[signup_failed] signup has failed.")
return {
'errors':'your signup has failed. Some input was incorrectly filled.',
'message': 'Please input your correct details to signup for the puzzle.',
'optionalmsg':''
}
def checkForEmptyValue(value):
if not value:
return True
def incrementProgress(request):
# all problem is here. getProgressUsingSession could return a HTTPFound, but i was trying to increment it. Fixed now.
progress = getProgressUsingSession(request)
progress = int(progress)
sessionHash = getSessionHash(request)
sessionHash = getSessionHashString(sessionHash)
try:
progress += 1
request.db.execute(
'update bhailog set progress = (?) where sessionhash = (?);',[progress,sessionHash])
request.db.execute('insert into events (sessionhash,description) values (?,?);',[sessionHash,'Progress incremented to '+str(progress)])
request.db.execute('insert into trials (sessionhash,progress,count) values (?,?,?);',[sessionHash,progress,0])
request.db.commit()
except Exception as e:
log.error("[incrementProgress] Had to increment progress but db call failed. - "+str(e))
return
log.info("[incrementProgress] progress incremented for sessionHash : "+sessionHash+" :)")
def getProgressUsingSession(request):
log.debug("entered getProgressUsingSession")
sessionHash = getSessionHash(request)
sessionHash = getSessionHashString(sessionHash)
log.info("[getProgressUsingSession] have to get progress using session. sessionHash = "+str(sessionHash))
if sessionHash == "null":
return HTTPFound(route_url('homeInError', request))
progress = ""
try:
rs = request.db.execute('select progress from bhailog where sessionhash = (?);',[sessionHash])
except Exception as e:
log.error("[getProgressUsingSession] select query to fetch progress from db failed. - "+str(e))
for row in rs.fetchall():
progress = row[0]
log.info("[getProgressUsingSession] progress fetched, equals "+str(progress))
return progress
def moveToCorrectPage(request):
log.info("Entering moveToCorrectPage")
progress = getProgressUsingSession(request)
sessionHash = getSessionHash(request)
sessionHash = getSessionHashString(sessionHash)
log.info("[moveToCorrectPage] progress = "+str(progress)+" for sessionHash:"+sessionHash)
#here
if(progress == 0):
log.debug("[moveToCorrectPage] redirecting to game page")
return HTTPFound(route_url('show_game', request))
elif(progress == 1):
log.debug("[moveToCorrectPage] redirecting to first page")
return HTTPFound(route_url('show_first', request))
elif(progress == 2):
log.debug("[moveToCorrectPage] redirecting to second page")
return HTTPFound(route_url('show_second', request))
elif(progress == 3):
log.debug("[moveToCorrectPage] redirecting to thid page")
return HTTPFound(route_url('show_third', request))
elif(progress == 4):
log.debug("[moveToCorrectPage] redirecting to final page")
return HTTPFound(route_url('show_final', request))
elif(progress == 5):
log.debug("[moveToCorrectPage] redirecting to completed page")
return HTTPFound(route_url('show_completed',request))
else:
log.debug("[moveToCorrectPage] redirecting to home page with optional message")
return HTTPFound(route_url('homeInError', request))
########## --------- Encryption methods -------------------
key = 'abcdefghijklmnopqrstuvwxyz'
def encrypt(n, plaintext):
"""Encrypt the string and return the ciphertext"""
result = ''
for l in plaintext.lower():
try:
i = (key.index(l) + n) % 26
result += key[i]
except ValueError:
result += l
return result.lower()
def decrypt(n, ciphertext):
"""Decrypt the string and return the plaintext"""
result = ''
for l in ciphertext:
try:
i = (key.index(l) - n) % 26
result += key[i]
except ValueError:
result += l
return result
def getRandomString():
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10))
def getEncryptedRandomString(randomString):
return encrypt(9,randomString)
def getCurrentTime():
return int(time.time())
#import hashlib
# hash_str = hashlib.sha256('hello world')
# hex_dig = hash_str.hexdigest()
# hex_dig
####################### ------------ Session variables -----------
def setSessionHash(request,sessionHash):
session = request.session
session['sessionHash'] = sessionHash
def getSessionHash(request):
session = request.session
print session
if 'sessionHash' in session:
return session['sessionHash']
else:
log.error("[getSessionHash] Couldn't find sessionHash in session. Will return null.")
return "null"
########## ------------- Handling 0th Page ------------------
@view_config(route_name='home',request_method='GET',renderer='htmls/main.pt')
def home(request):
return {
'errors': '',
'optionalmsg':'',
'message' : 'Please enter your correct details to signup/login for the puzzle.<br> We will contact you via these in case you win.'
}
@view_config(route_name='homeInError',request_method='GET',renderer='htmls/main.pt')
def homeInError(request):
return {
'errors': '',
'optionalmsg':'So sorry, session was lost or something went wrong.',
'message' : 'Please enter your details again to signup/login for the puzzle'
}
@view_config(route_name='home',request_method='POST',renderer='htmls/main.pt')
def handleSignup(request):
log.debug("entered handleSignup")
dict = {}
for key,value in request.POST.items():
key1 = key.encode('ascii','ignore')
value1 = value.encode('ascii','ignore')
dict[str(key1)] = str(value1)
if(checkForEmptyValue(value)):
log.error("[handleSignup] "+key+" has empty value. Can't signup like this.")
return signup_failed()
log.info("[handleSignup] values submitted for signup/login are "+str(dict))
if(nameIsValid(dict['name'])):
# todo - add a verification code for email
return successful_signup(request,dict)
else:
return signup_failed()
############ ------------- DEFAULT for all pages -------------
def incrementCountForTrials(request):
progress = getProgressUsingSession(request)
sessionhash = getSessionHash(request)
sessionhash = getSessionHashString(sessionhash)
try:
request.db.execute('update trials set count = count + ? where progress = (?) and sessionhash = (?);',[1,progress,sessionhash])
request.db.commit()
except Exception as e:
log.error("[incrementCountForTrials] could not increment count in trial. - "+str(e))
pass
def exceededTrials(request):
progress = getProgressUsingSession(request)
sessionhash = getSessionHash(request)
sessionhash = getSessionHashString(sessionhash)
try:
rs = request.db.execute('select count from trials where progress = (?) and sessionhash = (?);',[progress,sessionhash])
except Exception as e:
log.error("[incrementCountForTrials] could not increment count in trial. - "+str(e))
for row in rs.fetchall():
count = int(row[0])
if count >= 100:
return True
else:
return False
def getStuffForTeesra(request):
log.debug("entered getStuffForTeesra")
timeStart = time.time()
randomString = getRandomString()
sessionHash = getSessionHash(request)
sessionHash = getSessionHashString(sessionHash)
if(sessionHash == 'null'):
log.error("[getStuffForTeesra] Session got null somehow. Redirecting to first page.")
return moveToCorrectPage(request)
cipherRandomString = getEncryptedRandomString(randomString)
timeVal = getCurrentTime()
try:
request.db.execute('insert or replace into khazana (sessionhash,time,randomstring) values (?,?,?);',[sessionHash,str(timeVal),randomString])
request.db.execute('insert into events (sessionhash, description) values (?,?);',[sessionHash,'Requested another random string'])
request.db.commit()
except Exception as e:
log.error("[getStuffForTeesra] Had to insert into treasure but couldn't. - "+str(e))
log.debug("[getStuffForTeesra] added new time and randomstring values for session:"+sessionHash)
timeFin = time.time()
print str(timeFin-timeStart)
log.info('[getStuffForTeesra] It took %0.3f ms' % ((timeFin-timeStart)*1000.0))
return {
'errors':'',
'randomString':cipherRandomString
}
def getCorrectJsonForProgress(request,actualProgress):
#here
if(actualProgress == 0):
val = random.randint(1,100)
return {'val':val,'info':''}
elif(actualProgress == 1):
return {'errors':''}
elif(actualProgress == 2):
return {'errors':''}
elif(actualProgress == 3):
return getStuffForTeesra(request)
elif(actualProgress == 4):
return {
'status_of_answer_submission':'',
'color':'beige'
}
elif(actualProgress == 5):
return {'email':'bhavul.g@endurance.com'}
def ensureProgressIsCorrect(request,correctProgress):
actualProgress = getProgressUsingSession(request)
log.info("[ensureProgressIsCorrect] actualProgress = "+str(actualProgress))
# todo - check that if guy is done with everything he can't jump back
if actualProgress == correctProgress:
log.info("[ensureProgressIsCorrect] actual progress is correct progress.")
return getCorrectJsonForProgress(request,actualProgress)
else:
log.error("[ensureProgressIsCorrect] actual progress and correct progress differ. Will move to correct page.")
return moveToCorrectPage(request)
########## ------------- Handling 1st Page ------------------
@view_config(route_name='show_first',
request_method='GET',
renderer='htmls/first.pt')
def levelone(request):
return ensureProgressIsCorrect(request,1)
@view_config(route_name='show_first',
request_method='POST',
renderer='htmls/first.pt')
def handleLevelOneResponse(request):
progress = getProgressUsingSession(request)
if int(progress) != 1:
log.warning("[handleLevelOneResponse] someone's goofing around, making POST call where he should not.")
return moveToCorrectPage(request)
dict = {}
for key,value in request.POST.items():
key1 = key.encode('ascii','ignore')
value1 = value.encode('ascii','ignore')
dict[str(key1)] = str(value1)
if not (dict['name'].upper() == 'JOHN'):
log.debug("[handleLevelOneResponse] name entered isn't John. It is "+dict['name']+". Hence, redirecting to wrong page.")
return HTTPFound(route_url('show_wrong_second', request, _query={'name':dict['name']}))
else:
incrementProgress(request)
log.info("[handleLevelOneResponse] name entered is John. Let's go forward.")
return moveToCorrectPage(request)
########## ------------- Handling 2nd Page ------------------
@view_config(route_name='show_wrong_second',
request_method='GET',
renderer='htmls/second.pt')
def leveltwowrong(request):
name = request.params['name']
if name.upper() == 'JOHN':
return {'message':'Good try. But this is not the correct way to login as John. Go back, try again.'}
else:
return {'message':'But you have reached a dead end. :( '}
@view_config(route_name='show_second',
request_method='GET',
renderer='htmls/two.pt')
def leveltwo(request):
return ensureProgressIsCorrect(request,2)
@view_config(route_name='show_second',
request_method='POST',
renderer='htmls/two.pt')
def handleLevelTwo(request):
progress = getProgressUsingSession(request)
if int(progress) != 2:
log.warning("[handleLevelTwo] someone's goofing around, making POST call where he should not.")
return moveToCorrectPage(request)
if exceededTrials(request):
return {'errors':'You have exceeded the permitted number of submissions. You can not play anymore. Sorry.'}
incrementCountForTrials(request)
dict = {}
for key,value in request.POST.items():
key1 = key.encode('ascii','ignore')
value1 = value.encode('ascii','ignore')
dict[str(key1)] = str(value1)
sessionHash = getSessionHash(request)
sessionHash = getSessionHashString(sessionHash)
log.info("[handleLevelTwo] sessionHash - "+sessionHash+", the username and password submitted are - "+str(dict))
if not 'username' in dict:
return {'errors':'Username/Password is not correct. Please re-enter.'}
if not 'password' in dict:
return {'errors':'Username/Password is not correct. Please re-enter.'}
if checkForEmptyValue(dict['username']) or checkForEmptyValue(dict['password']):
return {'errors':'Username/Password is not correct. Please re-enter.'}
if dict['username'] == 'johndoe' and dict['password'] == 'OfcoursenotHiddEn1':
incrementProgress(request)
return moveToCorrectPage(request)
else:
if dict['username'] == 'johndoe' and dict['password'] != 'OfcoursenotHiddEn':
return {'errors':'Password is still not correct. Please re-enter.'}
elif dict['username'] == 'johndoe' and dict['password'] == 'OfcoursenotHiddEn':
return {'errors':'Password is still not correct. But you are so damn close! Please re-enter.'}
else:
return {'errors':'Username/Password is not correct. Please re-enter.'}
########## ------------- Handling 3rd Page ------------------
@view_config(route_name='show_third',
request_method='GET',
renderer='htmls/teesra.pt')
def levelthree(request):
session = request.session
log.debug("[levelthree] session in levelthree method is "+str(session))
return ensureProgressIsCorrect(request,3)
@view_config(route_name='show_third',
request_method='POST',
renderer='htmls/teesra.pt')
def handleLevelThree(request):
progress = getProgressUsingSession(request)
if int(progress) != 3:
log.warning("[handleLevelThree] someone's goofing around, making POST call where he should not.")
return moveToCorrectPage(request)
dict = {}
for key,value in request.POST.items():
key1 = key.encode('ascii','ignore')
value1 = value.encode('ascii','ignore')
dict[str(key1)] = str(value1)
if not 'answer' in dict:
log.warning("[handleLevelThree] answer was not given, but page submitted. Reloading page.")
return getStuffForTeesra(request)
if (checkForEmptyValue(dict['answer'])):
log.warning("[handleLevelThree] answer is empty. Reloading page.")
return getStuffForTeesra(request)
timeStart = time.time()
timeVal = getCurrentTime()
sessionHash = getSessionHash(request)
sessionHash = getSessionHashString(sessionHash)
if sessionHash == 'null':
log.warning("[handleLevelThree] since session was null, can't accept the input. Reloading login page.")
return HTTPFound(route_url('homeInError', request))
try:
rs = request.db.execute('select time,randomstring from khazana where sessionhash = (?);',[sessionHash])
except Exception as e:
log.error("[handleLevelThree] had to get time and randomstring from db, but failed. - "+str(e))
timeFromDb = 0
randomstringFromDb = " "
for row in rs.fetchall():
timeFromDb = row[0]
randomstringFromDb = row[1]
log.info("[handleLevelThree] fetched time,randomstring from db. row = "+str(row))
break;
timeFin = time.time()
log.info('[handleLevelThree] handling response of teesra function took %0.3f ms' % ((timeFin-timeStart)*1000.0))
if dict['answer'] == randomstringFromDb and (timeVal-int(timeFromDb) <= 1):
log.info("[handleLevelThree] That was correct for sessionHash"+sessionHash+" and submitted in "+str((timeVal-int(timeFromDb)))+" time.")
incrementProgress(request)
return HTTPFound(route_url('show_final', request))
else:
log.info("[handleLevelThree] Couldn't get through for sessionHash"+sessionHash+" >> answer:"+dict['answer']+",randomstringfromdb:"+str(randomstringFromDb)+" | time now:"+str(timeVal)+", timefromdb:"+str(timeFromDb))
return getStuffForTeesra(request)
########## ------------- Handling 4th Page ------------------
@view_config(route_name='show_final',
request_method='GET',
renderer='htmls/final.pt')
def levelFinal(request):
return ensureProgressIsCorrect(request,4)
def storefinalsubmission(request,dict):
if not 'code' in dict:
dict['code'] = " "
sessionhash = getSessionHash(request)
sessionhash = getSessionHashString(sessionhash)
try:
request.db.execute('insert or replace into finalsubmission (sessionhash,answer,linktocode) values (?,?,?);',[sessionhash,dict['answer'],dict['code']])
request.db.commit()
except Exception as e:
log.error("[storefinalsubmission] could not save final answer of "+sessionhash+". Reason - "+str(e))
pass
@view_config(route_name='show_final',
request_method='POST',
renderer='htmls/final.pt')
def handleLevelFinal(request):
progress = getProgressUsingSession(request)
if int(progress) != 4:
log.warning("[handleLevelFinal] someone's goofing around, making POST call where he should not.")
return moveToCorrectPage(request)
if exceededTrials(request):
return {'status_of_answer_submission':'You have done more submissions than permitted. You can not submit anymore, sorry.',
'color':'beige'}
incrementCountForTrials(request)
dict = {}
for key,value in request.POST.items():
key1 = key.encode('ascii','ignore')
value1 = value.encode('ascii','ignore')
dict[str(key1)] = str(value1)
log.info("[handleLevelFinal] answer details submitted - "+str(dict))
if not 'answer' in dict:
return {'status_of_answer_submission':'Your submission was not proper and was not counted. You can re-submit if you wish.',
'color':'beige'}
storefinalsubmission(request,dict)
if dict['answer'] == '2926319':
incrementProgress(request)
log.info("[handleLevelFinal] This guy has finally done it - "+getSessionHashString(getSessionHash(request)))
return HTTPFound(route_url('show_completed', request))
else:
return {'status_of_answer_submission':'Your answer is not correct. You can re-submit if you wish. We store your latest submission.',
'color':'beige'}
########## ------------- Handling 5th Page ------------------
@view_config(route_name='show_completed',
request_method='GET',
renderer='htmls/completed.pt')
def levelCompleted(request):
return ensureProgressIsCorrect(request,5)
@view_config(route_name='show_completed',
request_method='POST',
renderer='htmls/completed.pt')
def handleLevelCompleted(request):
return ensureProgressIsCorrect(request,5)
########## ------------- Handling 0th Page ------------------
@view_config(route_name='show_game',
request_method='GET',
renderer='htmls/game.pt')
def levelZero(request):
return ensureProgressIsCorrect(request,0)
@view_config(route_name='show_game',
request_method='POST',
renderer='htmls/game.pt')
def handleLevelZero(request):
dict = {}
for key,value in request.POST.items():
key1 = key.encode('ascii','ignore')
value1 = value.encode('ascii','ignore')
dict[str(key1)] = str(value1)
if dict['hiddenval'] == dict['value']:
incrementProgress(request)
log.info("[handleLevelZero] Correct value chosen. Let's go forward.")
return HTTPFound(route_url('show_hoodie', request))
else:
val = random.randint(1,100)
return {'val':val,'info':'Your answer was incorrect. How about another try? This could get you some awesome freebie.'}
return HTTPFound(route_url('show_game', request))
@view_config(route_name='show_hoodie',
request_method='GET',
renderer='htmls/hoodie.pt')
def levelHoodie(request):
return {}
| 24,877
| 24,877
| 0.660691
|
b66e6d9e4ddb82c17b2ba93194051581d151a0ca
| 10,062
|
py
|
Python
|
sample/helper.py
|
gazsim/RPICourseTrends
|
99aede5b7f9dcae8a5d0fd85da52b484146f9367
|
[
"MIT"
] | 4
|
2018-09-11T20:17:49.000Z
|
2018-12-13T01:01:07.000Z
|
sample/helper.py
|
gazsim/RPICourseTrends
|
99aede5b7f9dcae8a5d0fd85da52b484146f9367
|
[
"MIT"
] | 13
|
2018-10-30T19:43:39.000Z
|
2019-02-22T21:21:09.000Z
|
sample/helper.py
|
gazsim/RPICourseTrends
|
99aede5b7f9dcae8a5d0fd85da52b484146f9367
|
[
"MIT"
] | 6
|
2018-09-11T20:19:18.000Z
|
2022-03-24T02:05:01.000Z
|
import urllib
from urllib import request
from html.parser import HTMLParser
from bs4 import BeautifulSoup
from datetime import datetime
import numpy as np
from sample import Database
import mysql
class Course(object):
def __init__(self, data):
self.CRN = data[0]
self.SubjCrse = data[1]
self.Subject = self.SubjCrse[0:4]
self.Crse = self.SubjCrse[4:]
self.Title = data[2]
self.Type = data[3]
self.Cred = data[4]
self.GrTp = data[5]
self.Days = data[6]
self.StartTime = data[7]
self.EndTime = data[8]
self.Instructor = data[9]
self.Location = data[10]
self.Cap = int(data[11])
self.Act = int(data[12])
self.Rem = int(data[13])
# Convert to json object
# jobObject: dictionary
def toJson(self):
jobObject = dict()
jobObject["Course"] = self.Crse
jobObject["Title"] = self.Title
jobObject["Section Actual"] = self.Act
jobObject["Section Remaining"] = self.Rem
jobObject["Instructor"] = self.Instructor
return(jobObject)
# for get the key of course which is course number and professor
# return: string of course number and professor
def getKey(self):
return self.SubjCrse + "/" + self.Instructor
def getSubject(self):
return self.Subject
def getActual(self):
return self.Act
def getProf(self):
return self.Instructor
def getCourse(self):
return self.Crse
def getSubjCrse(self):
return self.SubjCrse
# return the information of course include key, course_name, professor_name
# deparment, crouse code, max capacity, semester time.
def getInfo(self, year):
result = [
self.SubjCrse+year+self.Instructor,
self.Title,
self.Subject,
self.Crse,
self.Instructor,
self.Cap,
year,
"",
""
]
return result
def __str__(self):
return "actual:{}".format(self.Act)
def __repr__(self):
# return "actual:{}".format(self.Act)
return "{}".format(self.Act)
def __add__(self, other):
self.Cap += other.Cap
self.Rem += other.Rem
self.Act += other.Act
data = [self.CRN, self.SubjCrse, self.Title, self.Type, self.Cred, self.GrTp, self.Days,
self.StartTime, self.EndTime, self.Instructor, self.Location, self.Cap, self.Act, self.Rem,
]
return Course(data)
# for debug
def getDescription(self):
print(self.jobDescription)
# class MyHTMLParser(HTMLParser):
# def handle_starttag(self, tag, attrs):
# print("Encountered a start tag:", tag)
#
# def handle_endtag(self, tag):
# print("Encountered an end tag :", tag)
#
# def handle_data(self, data):
# print("Encountered some data :", data)
def processHtml(rawElement):
data = []
print(rawElement)
for index in range(len(rawElement)):
text = rawElement[index].get_text().strip()
# data.append(text)
if index == 0 and text != " " and text != "":
temp = text.split(" ")
data.append(temp[0])
data.append(temp[1].split("-")[0] + temp[1].split("-")[1])
oldDepartment = temp[1].split("-")[0]
tempDepartment = oldDepartment
else:
data.append(text)
# handle the Note row in website
if index == 3 and "NOTE:" in data[1]:
return None
if data[0] == "" and data[8] == "staff":
return None
return Course(data)
def createCourseInfoTable():
myDB = "RPICourseTrends"
db = Database.CourseDb("Ruijie", "gengruijie123", "142.93.59.116", myDB)
table_name = "courseInfo"
element = [
["id", "varchar(35)"],
["courseName", "varchar(35)"],
["department", "varchar(5)"],
["courseCode", "varchar(10)"],
["professor", "varchar(35)"],
["max", "int"],
["time", "varchar(5)"],
["comment1", "varchar(30)"]
]
key = "id"
db.create_tables( table_name, element, key)
def storeCourseInfoTable(courses, db,table,year):
storeValue = []
for key, value in sorted(courses.items()):
storeValue.append(key[:14])
storeValue = value.getInfo(year)
db.insert_data(storeValue, table)
def get_professor(db, url, createIdentity=False):
localFile, headers = urllib.request.urlretrieve(url)
html = open(localFile)
soup = BeautifulSoup(html, 'lxml')
# print(soup.get_text)
# f = open("test1.txt","w")
# for element in soup.find_all("td"):
# # text = element.get_text().strip()
# print(element)
# f.write(element.get_text())
# f.close()
data = []
courses = dict()
professor = dict()
department = dict()
courseInOneDepartment = dict()
count = 0
keyCount = 0
# elements = soup.find_all("td")
# elements = np.array(elements).reshape((int(len(elements)/14),14))
# for index in range(len(elements)):
# course = processHtml(elements[index])
# if course == None:
# continue
# if course.getProf() == "staff":
# index += 1
# course2 = processHtml(elements[index])
# # print(element)
temp_course = []
incompleteData = []
for element in soup.find_all("td"):
text = element.get_text().strip()
# print(element)
if count == 0 and text != " " and text != "":
temp = text.split(" ")
data.append(temp[0])
data.append(temp[1].split("-")[0] + temp[1].split("-")[1])
oldDepartment = temp[1].split("-")[0]
tempDepartment = oldDepartment
else:
data.append(text)
count += 1
# get rid of note situtaion when count == 3, the note is not a course
if count == 3 and "NOTE:" in data[1]:
count = 0
data.clear()
continue
if count == 14:
# this part is to get rid of master and PHD situation
if "DISSERTATION" in data[2] or \
("MASTER" in data[2] and "THESIS" in data[2]) or \
("MASTER" in data[2] and "PROJECT" in data[2]):
count = 0
data.clear()
continue
# this is the situation that professor is staff, and no student register this course
if data[9] == "Staff" and data[11] == data[12] == data[13] == 0:
count = 0
data.clear()
continue
if data[9] == "Pacheco":
pass
# print()
# this is the situation that this course is a lab session and we do not have incomplete data
# just skip
if data[0] == "" and (data[8] == "Staff" or incompleteData == []):
count = 0
data.clear()
continue
# this is a situation that this line is a new course, but it is a lab session, we do not
# record the course information. So we seem this as a incomplete course, and will find
# professor in next line
elif data[0] != "" and data[9] == "Staff":
count = 0
incompleteData = data[:]
data.clear()
continue
# this code block is trying to complete incomplete course
if incompleteData != [] and data[8] != "Staff" and data[0] == "":
if incompleteData[9] == data[8]:
incompleteData = []
count = 0
data.clear()
continue
incompleteData[9] = data[8]
data = incompleteData[:]
incompleteData = []
course = Course(data)
subject = course.getSubject()
courseAndProf = course.getCourse() + course.getProf()
if subject in department:
if courseAndProf in department[subject]:
totalNumber = department[subject][courseAndProf] + course.getActual()
department[subject][courseAndProf] = totalNumber
else:
department[subject][courseAndProf] = course.getActual()
else:
department[subject] = dict()
department[subject][courseAndProf] = course.getActual()
professor[course.getKey()] = keyCount
keyCount += 1
# key is course number and professor
if course.getKey() in courses:
courses[course.getKey()] = courses[course.getKey()] + course
else:
courses[course.getKey()] = course
data.clear()
count = 0
professor = []
if createIdentity == True:
db.drop_table()
createCourseInfoTable()
storeCourseInfoTable(courses, db)
for key, value in sorted(courses.items()):
professor.append(key)
# print(key)
# courses.clear()
data.clear()
count = 0
return department, courses
def check_table(table, year):
if len(table) != 7 or table == "courseInfo" or table == "semesterInfo":
return False
flag = [1,1,1,1,1,0,0]
for index in range(len(table)):
if table[index].isalpha() != flag[index]:
return False
if table[4:] != year:
return False
return True
def drop_tables(db, year):
DB_NAME = db.get_name()
tables = db.show_tables(DB_NAME)
for table in tables:
if not check_table(table, year):
continue
db.drop_table(table)
if __name__ == '__main__':
myDB = "RPICourseTrends2"
db = Database.CourseDb("Ruijie", "XXXXXXXX", "142.93.59.116", myDB)
# table_name = "courseInfo"
# db.setTable(table_name)
# # db.drop_table(table_name)
# get_professor(db, True)
| 30.398792
| 107
| 0.547207
|
54d9d07cc9be9e4c22b1e77e899e64019a731db8
| 5,147
|
py
|
Python
|
options/base_options.py
|
shashikant-ghangare/ThermalGAN
|
be8fae6bbd5ee22326eecbbb2b84c013aa11df05
|
[
"MIT"
] | 75
|
2018-11-26T23:04:37.000Z
|
2022-03-28T07:28:58.000Z
|
options/base_options.py
|
matinraayai/ThermalGAN
|
be8fae6bbd5ee22326eecbbb2b84c013aa11df05
|
[
"MIT"
] | 13
|
2018-12-09T09:55:24.000Z
|
2022-01-27T17:26:00.000Z
|
options/base_options.py
|
matinraayai/ThermalGAN
|
be8fae6bbd5ee22326eecbbb2b84c013aa11df05
|
[
"MIT"
] | 23
|
2019-08-15T01:01:44.000Z
|
2022-03-21T13:37:30.000Z
|
import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--loadSize', type=int, default=542, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--which_model_netD', type=str, default='basic', help='selects model to use for netD')
self.parser.add_argument('--which_model_netG', type=str, default='unet_512', help='selects model to use for netG')
self.parser.add_argument('--n_layers_D', type=int, default=4, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')
self.parser.add_argument('--model', type=str, default='cycle_gan',
help='chooses which model to use. cycle_gan, pix2pix, test')
self.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"),
help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
self.parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| 64.3375
| 197
| 0.645619
|
81ce9f742b0599adc1f16f71a1cc9deb318c33c2
| 1,706
|
py
|
Python
|
sleepypuppy/admin/user/views.py
|
soffensive/sleepy-puppy
|
ab8ff46d624d30458740676b6930184e97a742a6
|
[
"Apache-2.0"
] | 952
|
2015-08-31T16:50:22.000Z
|
2018-08-22T08:48:27.000Z
|
sleepypuppy/admin/user/views.py
|
soffensive/sleepy-puppy
|
ab8ff46d624d30458740676b6930184e97a742a6
|
[
"Apache-2.0"
] | 18
|
2015-09-03T19:23:23.000Z
|
2018-06-24T17:36:40.000Z
|
sleepypuppy/admin/user/views.py
|
rjw1/sleepy-puppy
|
2ef1eddff60045c8dd3799d9a57974fa3a7c5eb7
|
[
"Apache-2.0"
] | 135
|
2015-09-01T09:22:48.000Z
|
2018-08-26T02:10:16.000Z
|
# Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext import login
from flask_wtf import Form
from wtforms import validators
from models import User
class UserView(ModelView):
"""
ModelView override of Flask Admin for Users.
"""
# CSRF protection
form_base_class = Form
# Ensure user is authenticated
def is_accessible(self):
return login.current_user.is_authenticated()
# Column tweaks
column_list = ('email', 'assessments')
column_labels = dict(email='Email Address', assessments='Assessments')
# Form tweaks and validations
form_args = dict(
email=dict(
description='Enter email address to receive notifications when captures are received',
validators=[validators.required(), validators.email()]
),
assessments=dict(
description='Subscribe to assessments to receive notifications',
validators=[validators.required()]
)
)
def __init__(self, session, **kwargs):
super(UserView, self).__init__(User, session, **kwargs)
| 34.12
| 98
| 0.689332
|
eac046d2b72b8acdfdfdfe815d8004d7a8d3c9aa
| 3,496
|
py
|
Python
|
predict.py
|
birds-on-mars/birdsonearth
|
62921423b787ad8b81b8e60e8de42a3f6e113d88
|
[
"Apache-2.0"
] | 13
|
2019-04-11T10:02:11.000Z
|
2021-12-01T22:27:18.000Z
|
predict.py
|
birds-on-mars/birdsonearth
|
62921423b787ad8b81b8e60e8de42a3f6e113d88
|
[
"Apache-2.0"
] | 2
|
2019-12-17T13:31:09.000Z
|
2020-05-14T09:48:10.000Z
|
predict.py
|
birds-on-mars/birdsonearth
|
62921423b787ad8b81b8e60e8de42a3f6e113d88
|
[
"Apache-2.0"
] | 2
|
2020-07-17T21:03:18.000Z
|
2021-07-14T02:09:31.000Z
|
import sys, getopt
import imp
import params as p
import VGGish_model as m
import torch
import os
import pickle
import numpy as np
from utils import vggish_input
from utils import preprocessing as pre
from utils import Dataset as d
from utils import trainer as t
imp.reload(p)
imp.reload(d)
imp.reload(m)
imp.reload(t)
imp.reload(pre)
def prepare(params):
'''
reads in a terminal command of the form:
$ python predict.py <file path 1> <file path 2> ...
and returns a list of files for inference
TODO: prediction for directory, taking model name as option
'''
# reading in file names from terminal command
print('working out options')
try:
opts, args = getopt.getopt(sys.argv[1:], 'n:d:', ['name=', 'device='])
except getopt.GetoptError as err:
print(err)
sys.exit()
for o, a in opts:
if o in ('-n', '--name'):
params.name = a
if o in ('-d', '--device'):
params.device = a
files = None
if args:
files = args
if files is None:
raise IOError('provide a file to predict like so:\n \
$python predict.py <file path>')
return params, files
def load_model_with(params):
print('loading model')
# load class labels
with open(os.path.join(params.model_zoo, params.name+'.pkl'), 'rb') as f:
labels = pickle.load(f)
# init network and load weights
params.n_classes = len(labels)
device = torch.device(params.device)
net = m.VGGish(params)
new_top = torch.nn.Linear(net.out_dims*512, net.n_classes)
net.classifier = new_top
net.load_state_dict(torch.load(os.path.join(params.model_zoo, params.name+'.pt'),
map_location=device))
net.to(device)
net.eval()
print('model for labels {} is ready'.format(labels))
return net, labels
def predict(net, labels, files, params):
print('starting inference')
device = torch.device(params.device)
predictions = []
probs = []
for i, file in enumerate(files):
filename = os.path.splitext(os.path.basename(file))[0]
processed = filename + '_proc.wav'
pre.preprocess(file, processed)
data = vggish_input.wavfile_to_examples(processed)
data = torch.from_numpy(data).unsqueeze(1).float()
data = data.to(device)
net.to(device)
out = net(data)
# # for each spectrogram/row index of max probability
# pred = np.argmax(out.detach().cpu().numpy(), axis=1)
# # find most frequent index over all spectrograms
# consensus = np.bincount(pred).argmax()
# print('file {} sounds like a {} to me'.format(i, labels[consensus]))
# mean probabilities for each col/class over all spectrograms
mean_probs = np.mean(out.detach().cpu().numpy(), axis=0)
# find index of max mean_probs
idx = np.argmax(mean_probs)
print('file {} sounds like a {} to me'.format(i, labels[idx]))
print('my guesses are: ')
for j, label in enumerate(labels):
print('{0}: {1:.04f}'.format(label, mean_probs[j]))
# predictions.append(labels[consensus])
predictions.append(labels[idx])
probs.append(mean_probs)
os.remove(processed)
return predictions, probs
if __name__ == '__main__':
params = p.Params()
params, files = prepare(params)
net, labels = load_model_with(params)
_ = predict(net, labels, files, params)
| 30.4
| 85
| 0.626716
|
c2a612e4d5c180ba1f613cdd492713405039836c
| 1,425
|
py
|
Python
|
crime.py
|
tulerfeng/datamining-homework-1
|
6d9989c7f60d5fd05dae12ea754a13ee3f45f944
|
[
"MIT"
] | null | null | null |
crime.py
|
tulerfeng/datamining-homework-1
|
6d9989c7f60d5fd05dae12ea754a13ee3f45f944
|
[
"MIT"
] | null | null | null |
crime.py
|
tulerfeng/datamining-homework-1
|
6d9989c7f60d5fd05dae12ea754a13ee3f45f944
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
def loader():
filepath = "crime/records-for-2011.csv"
df = pd.read_csv(filepath, header=0)
return df
def cnt(str1,data1):
print(data1[str1].value_cnts()[:10])
def find(str2,data2):
nums = data2[str2]
nullnum = nums.isnull().sum()
nums = nums.dropna(axis = 0)
Minimum = min(nums)
Maximum = max(nums)
Q1 = np.percentile(nums, 25)
Median = np.median(nums)
Q3 = np.percentile(nums, 75)
print("Minimum:%d; Q1:%d; Median:%d; Q3:%d; Maximum:%d;"%(Minimum , Q1 , Median , Q3 , Maximum))
def f1(data1):
Data1 = data1.dropna(axis = 0)
hist = Data1.hist(bins=100)
Data1.plot.box()
plt.show()
def f2(data2):
Data2 = data2.fillna(data2.mode())
hist2 = Data2.hist(bins=100)
Data2.plot.box()
plt.show()
df = loader()
cnt("Agency",df)
cnt("Create Time",df)
cnt("Location",df)
cnt("Area Id",df)
cnt("Beat",df)
cnt("Priority",df)
cnt("Closed Time",df)
find("Area Id",df)
find("Area Id",df)
"""
dfff_Area_Id.value_counts().head(10).plot.bar()
plt.title('dfff_Area_Id')
plt.show()
dfff_Priority.value_counts().head(10).plot.bar()
plt.title('dfff_Priority')
plt.show()
dfff_Incident_Type_Id.value_counts().head(10).plot.bar()
plt.title('dfff_Incident_Type_Id')
plt.show()
"""
| 23.360656
| 102
| 0.621754
|
ddbdad8dbe00a299758723151aa67d6c423b39fb
| 311
|
py
|
Python
|
setup.py
|
chenyaoBOYqu/albino
|
8c43893b8d8dc3f92a01c2a6f8f66f2bd123b333
|
[
"CC0-1.0"
] | 8
|
2020-06-30T20:16:14.000Z
|
2021-01-26T00:51:27.000Z
|
setup.py
|
chenyaoBOYqu/albino
|
8c43893b8d8dc3f92a01c2a6f8f66f2bd123b333
|
[
"CC0-1.0"
] | null | null | null |
setup.py
|
chenyaoBOYqu/albino
|
8c43893b8d8dc3f92a01c2a6f8f66f2bd123b333
|
[
"CC0-1.0"
] | 3
|
2020-09-02T23:03:04.000Z
|
2021-03-21T23:47:46.000Z
|
#!/usr/bin/env python3
import setuptools
setuptools.setup(
name="gpt2_bot",
version="0",
author="Al Beano",
author_email="albino@autistici.org",
description="GPT-2 SUPER NEXT GENERATION AI irc shitposting bot",
url="https://github.com/albino/shithead-X",
scripts=["gpt2_bot.py"],
)
| 23.923077
| 69
| 0.681672
|
1e1b3c91409c975568e698a0c5458078390eea24
| 4,200
|
py
|
Python
|
code/train.py
|
marco-digio/Twitter4SSE
|
724b3f5d638b17a0e46353a1ea1a409da17ab8bd
|
[
"Apache-2.0"
] | 6
|
2021-10-09T17:24:53.000Z
|
2022-01-02T17:23:41.000Z
|
code/train.py
|
marco-digio/Twitter4SSE
|
724b3f5d638b17a0e46353a1ea1a409da17ab8bd
|
[
"Apache-2.0"
] | null | null | null |
code/train.py
|
marco-digio/Twitter4SSE
|
724b3f5d638b17a0e46353a1ea1a409da17ab8bd
|
[
"Apache-2.0"
] | null | null | null |
from sentence_transformers import SentenceTransformer, InputExample
from sentence_transformers import evaluation, losses
from torch.utils.data import DataLoader
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import os
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from torch import nn
import pandas as pd
import numpy as np
import gzip
import csv
import sys
import math
import re
from utils import derange
init_model_name = sys.argv[1] # Initialization model: my-vinai/bertweet-base, my-roberta-base, stsb-roberta-base, bert-base-nli-stsb-mean-tokens
n_train = int(sys.argv[2]) # number of training points
dataset_name = sys.argv[3] # Name of the training dataset: quote, reply, coquote, coreply
loss_name = sys.argv[4] # Name of Loss: MultipleNegativesLoss and TripletLoss
batch_size = int(sys.argv[5]) # Batch size
idx = sys.argv[5] # Index of run (tested on 5 runs)
# if the model name starst with "my-", remove this prefix and use the pretrained model with mean pooling
# otherwise it is a Sentence Model
if init_model_name[:2] == 'my':
word_embedding_model = models.Transformer(init_model_name[3:], max_seq_length=128)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
else:
model = SentenceTransformer(init_model_name)
# Datasets allowed
df = pd.read_csv('Data/dataset_'+dataset_name+'_FINAL.csv', sep='\t', index_col='Unnamed: 0')
df = df.sample(frac=1, replace=False, random_state=1)
print('Tot size of dataset: ', df.shape[0])
df_train = df.iloc[:n_train].copy()
print('Training size: ', df_train.shape[0])
if loss_name == 'MultipleNegativesRankingLoss':
train_loss = losses.MultipleNegativesRankingLoss(model=model)
train_samples = [InputExample(texts=[x[2], x[3]]) for x in df_train.values]
elif loss_name == 'TripletLoss':
train_loss = losses.TripletLoss(model=model)
df['text3'] = derange(df['text1'].values.copy()) # create negatives
train_samples = [InputExample(texts=[x[2], x[3], x[4]]) for x in df_train.values]
else:
print('wrong loss name')
exit()
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
# EVALUATE ON STSbenchmark
sts_dataset_path = 'datasets/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score)
if row['split'] == 'dev':
dev_samples.append(inp_example)
elif row['split'] == 'test':
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
# TRAINING
num_epochs = 1
model_save_path = init_model_name.replace('/', '-')+\
'_'+str(n_train)+'_'+dataset_name+'_'+loss_name+'_'+str(batch_size)+'_'+str(idx)
# Warmup steps 10% of total steps
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1)
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=400,
warmup_steps=warmup_steps,
output_path=model_save_path,
save_best_model=False)
model.save(model_save_path)
# Load model and test it on STSb test
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
test_evaluator(model, output_path=model_save_path)
| 40
| 144
| 0.721667
|
82458f4378ed530c5179351f5905bda6b3362f12
| 214,323
|
py
|
Python
|
kvirt/cli.py
|
pacevedom/kcli
|
6e2035fb92f4adbdc44b9e1fe19c1231f46deadf
|
[
"Apache-2.0"
] | null | null | null |
kvirt/cli.py
|
pacevedom/kcli
|
6e2035fb92f4adbdc44b9e1fe19c1231f46deadf
|
[
"Apache-2.0"
] | null | null | null |
kvirt/cli.py
|
pacevedom/kcli
|
6e2035fb92f4adbdc44b9e1fe19c1231f46deadf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# coding=utf-8
from distutils.spawn import find_executable
from getpass import getuser
from kvirt.config import Kconfig
from kvirt.examples import plandatacreate, vmdatacreate, hostcreate, _list, plancreate, planinfo, productinfo
from kvirt.examples import repocreate, isocreate, kubegenericcreate, kubek3screate, kubeopenshiftcreate, start
from kvirt.examples import dnscreate, diskcreate, diskdelete, vmcreate, vmconsole, vmexport, niccreate, nicdelete
from kvirt.examples import disconnectercreate, appopenshiftcreate, plantemplatecreate
from kvirt.baseconfig import Kbaseconfig
from kvirt.containerconfig import Kcontainerconfig
from kvirt import version
from kvirt.defaults import IMAGES, VERSION, LOCAL_OPENSHIFT_APPS
from prettytable import PrettyTable
import argcomplete
import argparse
from argparse import RawDescriptionHelpFormatter as rawhelp
from glob import glob
from kvirt import common
from kvirt.common import error, pprint, success, warning, ssh, _ssh_credentials
from kvirt import nameutils
import os
import random
import requests
from subprocess import call
import sys
from urllib.parse import urlparse
import yaml
def cache_vms(baseconfig, region, zone, namespace):
cache_file = "%s/.kcli/%s_vms.yml" % (os.environ['HOME'], baseconfig.client)
if os.path.exists(cache_file):
with open(cache_file, 'r') as vms:
_list = yaml.safe_load(vms)
pprint("Using cache information...")
else:
config = Kconfig(client=baseconfig.client, debug=baseconfig.debug, region=region, zone=zone,
namespace=namespace)
_list = config.k.list()
with open(cache_file, 'w') as c:
pprint("Caching results for %s..." % baseconfig.client)
try:
yaml.safe_dump(_list, c, default_flow_style=False, encoding='utf-8', allow_unicode=True,
sort_keys=False)
except:
yaml.safe_dump(_list, c, default_flow_style=False, encoding='utf-8', allow_unicode=True,
sort_keys=False)
return _list
def valid_fqdn(name):
if name is not None and '/' in name:
msg = "Vm name can't include /"
raise argparse.ArgumentTypeError(msg)
return name
def valid_url(url):
if url is not None:
parsed_url = urlparse(url)
if parsed_url.scheme == '' or parsed_url.netloc == '':
msg = "Malformed url"
raise argparse.ArgumentTypeError(msg)
return url
def valid_members(members):
try:
return members[1:-1].split(',')
except:
msg = "Incorrect members list"
raise argparse.ArgumentTypeError(msg)
def valid_cluster(name):
if name is not None:
if '/' in name:
msg = "Cluster name can't include /"
raise argparse.ArgumentTypeError(msg)
return name
def alias(text):
return "Alias for %s" % text
def get_subparser_print_help(parser, subcommand):
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
if choice == subcommand:
subparser.print_help()
return
def get_subparser(parser, subcommand):
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
if choice == subcommand:
return subparser
def get_version(args):
full_version = "version: %s" % VERSION
versiondir = os.path.dirname(version.__file__)
git_file = '%s/git' % versiondir
git_version = 'N/A'
git_date = ''
if os.path.exists(git_file) and os.stat(git_file).st_size > 0:
git_version, git_date = open(git_file).read().rstrip().split(' ')
git_date = '(%s)' % git_date
full_version += " commit: %s %s" % (git_version, git_date)
update = 'N/A'
if git_version != 'N/A':
try:
upstream_version = requests.get("https://api.github.com/repos/karmab/kcli/commits/master").json()['sha'][:7]
update = True if upstream_version != git_version else False
except:
pass
full_version += " Available Updates: %s" % update
print(full_version)
def delete_cache(args):
yes_top = args.yes_top
yes = args.yes
if not yes and not yes_top:
common.confirm("Are you sure?")
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
cache_file = "%s/.kcli/%s_vms.yml" % (os.environ['HOME'], baseconfig.client)
if os.path.exists(cache_file):
pprint("Deleting cache on %s" % baseconfig.client)
os.remove(cache_file)
else:
warning("No cache file found for %s" % baseconfig.client)
def start_vm(args):
"""Start vms"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
k = config.k
codes = []
for name in names:
pprint("Starting vm %s..." % name)
result = k.start(name)
code = common.handle_response(result, name, element='', action='started')
codes.append(code)
sys.exit(1 if 1 in codes else 0)
def start_container(args):
"""Start containers"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
cont = Kcontainerconfig(config, client=args.containerclient).cont
for name in names:
pprint("Starting container %s..." % name)
cont.start_container(name)
def stop_vm(args):
"""Stop vms"""
soft = args.soft
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
if config.extraclients:
ks = config.extraclients
ks.update({config.client: config.k})
else:
ks = {config.client: config.k}
codes = []
for cli in ks:
k = ks[cli]
for name in names:
pprint("Stopping vm %s in %s..." % (name, cli))
result = k.stop(name, soft=soft)
code = common.handle_response(result, name, element='', action='stopped')
codes.append(code)
sys.exit(1 if 1 in codes else 0)
def stop_container(args):
"""Stop containers"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
if config.extraclients:
ks = config.extraclients
ks.update({config.client: config.k})
else:
ks = {config.client: config.k}
for cli in ks:
cont = Kcontainerconfig(config, client=args.containerclient).cont
for name in names:
pprint("Stopping container %s in %s..." % (name, cli))
cont.stop_container(name)
def restart_vm(args):
"""Restart vms"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
k = config.k
codes = []
for name in names:
pprint("Restarting vm %s..." % name)
result = k.restart(name)
code = common.handle_response(result, name, element='', action='restarted')
codes.append(code)
sys.exit(1 if 1 in codes else 0)
def restart_container(args):
"""Restart containers"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
cont = Kcontainerconfig(config, client=args.containerclient).cont
for name in names:
pprint("Restarting container %s..." % name)
cont.stop_container(name)
cont.start_container(name)
def console_vm(args):
"""Vnc/Spice/Serial Vm console"""
serial = args.serial
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
name = common.get_lastvm(config.client) if not args.name else args.name
k = config.k
tunnel = config.tunnel
if serial:
k.serialconsole(name)
else:
k.console(name=name, tunnel=tunnel)
def console_container(args):
"""Container console"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
name = common.get_lastvm(config.client) if not args.name else args.name
cont = Kcontainerconfig(config, client=args.containerclient).cont
cont.console_container(name)
return
def delete_vm(args):
"""Delete vm"""
snapshots = args.snapshots
count = args.count
yes_top = args.yes_top
yes = args.yes
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.extraclients:
allclients = config.extraclients.copy()
allclients.update({config.client: config.k})
names = args.names
if not names:
error("Can't delete vms on multiple hosts without specifying their names")
sys.exit(1)
else:
allclients = {config.client: config.k}
names = [common.get_lastvm(config.client)] if not args.names else args.names
if count > 1:
if len(args.names) == 1:
names = ["%s-%d" % (args.names[0], number) for number in range(count)]
else:
error("Using count when deleting vms requires specifying an unique name")
sys.exit(1)
dnsclients = allclients.copy()
for cli in sorted(allclients):
k = allclients[cli]
if not yes and not yes_top:
common.confirm("Are you sure?")
codes = []
for name in names:
pprint("Deleting vm %s on %s" % (name, cli))
dnsclient, domain = k.dnsinfo(name)
if config.rhnunregister:
image = k.info(name).get('image')
if 'rhel' in image:
pprint("Removing rhel subscription for %s" % name)
ip, vmport = _ssh_credentials(k, name)[1:]
cmd = "subscription-manager unregister"
sshcmd = ssh(name, ip=ip, user='root', tunnel=config.tunnel,
tunnelhost=config.tunnelhost, tunnelport=config.tunnelport,
tunneluser=config.tunneluser, insecure=True, cmd=cmd, vmport=vmport)
os.system(sshcmd)
else:
warning("vm %s doesnt appear as a rhel box. Skipping unregistration" % name)
result = k.delete(name, snapshots=snapshots)
if result['result'] == 'success':
success("%s deleted" % name)
codes.append(0)
common.set_lastvm(name, cli, delete=True)
else:
reason = result['reason']
codes.append(1)
error("Could not delete %s because %s" % (name, reason))
common.set_lastvm(name, cli, delete=True)
if dnsclient is not None and domain is not None:
pprint("Deleting Dns entry for %s in %s" % (name, domain))
if dnsclient in dnsclients:
z = dnsclients[dnsclient]
else:
z = Kconfig(client=dnsclient).k
dnsclients[dnsclient] = z
z.delete_dns(name, domain)
cluster = name.split('-')[0] if '-master-' in name or '-worker-' in name else None
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if cluster is not None and os.path.exists(clusterdir):
os.environ['KUBECONFIG'] = "%s/auth/kubeconfig" % clusterdir
if os.path.exists("%s/kcli_parameters.yml" % clusterdir):
with open("%s/kcli_parameters.yml" % clusterdir, 'r') as install:
installparam = yaml.safe_load(install)
kubetype = installparam.get('kubetype', 'kubectl')
binary = 'oc' if kubetype == 'openshift' else 'kubectl'
domain = installparam.get('domain')
if domain is not None:
try:
pprint("Deleting node %s.%s from your cluster" % (name, domain))
call('%s delete node %s.%s' % (binary, name, domain), shell=True)
except:
continue
sys.exit(1 if 1 in codes else 0)
def delete_container(args):
"""Delete container"""
yes = args.yes
yes_top = args.yes_top
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.extraclients:
allclients = config.extraclients.copy()
allclients.update({config.client: config.k})
names = args.names
else:
allclients = {config.client: config.k}
names = args.names
for cli in sorted(allclients):
if not yes and not yes_top:
common.confirm("Are you sure?")
codes = [0]
cont = Kcontainerconfig(config, client=args.containerclient).cont
for name in names:
pprint("Deleting container %s on %s" % (name, cli))
cont.delete_container(name)
sys.exit(1 if 1 in codes else 0)
def download_image(args):
"""Download Image"""
pool = args.pool
image = args.image
cmd = args.cmd
url = args.url
size = args.size
arch = args.arch
openstack = args.openstack
update_profile = not args.skip_profile
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
result = config.handle_host(pool=pool, image=image, download=True, cmd=cmd, url=url, update_profile=update_profile,
size=size, arch=arch, kvm_openstack=openstack)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def download_iso(args):
"""Download ISO"""
pool = args.pool
url = args.url
iso = args.iso if args.iso is not None else os.path.basename(url)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
result = config.handle_host(pool=pool, image=iso, download=True, url=url, update_profile=False)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def delete_image(args):
images = args.images
pool = args.pool
yes = args.yes
yes_top = args.yes_top
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.extraclients:
allclients = config.extraclients.copy()
allclients.update({config.client: config.k})
else:
allclients = {config.client: config.k}
for cli in sorted(allclients):
k = allclients[cli]
if not yes and not yes_top:
common.confirm("Are you sure?")
codes = []
for image in images:
clientprofile = "%s_%s" % (cli, image)
imgprofiles = [p for p in config.profiles if 'image' in config.profiles[p] and
config.profiles[p]['image'] == os.path.basename(image) and
p.startswith('%s_' % cli)]
pprint("Deleting image %s on %s" % (image, cli))
if clientprofile in config.profiles and 'image' in config.profiles[clientprofile]:
profileimage = config.profiles[clientprofile]['image']
config.delete_profile(clientprofile, quiet=True)
result = k.delete_image(profileimage, pool=pool)
elif imgprofiles:
imgprofile = imgprofiles[0]
config.delete_profile(imgprofile, quiet=True)
result = k.delete_image(image, pool=pool)
else:
result = k.delete_image(image, pool=pool)
if result['result'] == 'success':
success("%s deleted" % image)
codes.append(0)
else:
reason = result['reason']
error("Could not delete image %s because %s" % (image, reason))
codes.append(1)
sys.exit(1 if 1 in codes else 0)
def create_profile(args):
"""Create profile"""
profile = args.profile
overrides = common.get_overrides(param=args.param)
baseconfig = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
result = baseconfig.create_profile(profile, overrides=overrides)
code = common.handle_response(result, profile, element='Profile', action='created', client=baseconfig.client)
return code
def delete_profile(args):
"""Delete profile"""
profile = args.profile
baseconfig = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
pprint("Deleting on %s" % baseconfig.client)
result = baseconfig.delete_profile(profile)
code = common.handle_response(result, profile, element='Profile', action='deleted', client=baseconfig.client)
return code
# sys.exit(0) if result['result'] == 'success' else sys.exit(1)
def update_profile(args):
"""Update profile"""
profile = args.profile
overrides = common.get_overrides(param=args.param)
baseconfig = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
result = baseconfig.update_profile(profile, overrides=overrides)
code = common.handle_response(result, profile, element='Profile', action='updated', client=baseconfig.client)
return code
def info_vm(args):
"""Get info on vm"""
output = args.output
fields = args.fields.split(',') if args.fields is not None else []
values = args.values
config = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if config.cache:
names = [common.get_lastvm(config.client)] if not args.names else args.names
_list = cache_vms(config, args.region, args.zone, args.namespace)
vms = {vm['name']: vm for vm in _list}
else:
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
for name in names:
if config.cache and name in vms:
data = vms[name]
else:
data = config.k.info(name, debug=args.debug)
if data:
print(common.print_info(data, output=output, fields=fields, values=values, pretty=True))
def enable_host(args):
"""Enable host"""
host = args.name
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
result = baseconfig.enable_host(host)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def disable_host(args):
"""Disable host"""
host = args.name
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
result = baseconfig.disable_host(host)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def delete_host(args):
"""Delete host"""
common.delete_host(args.name)
def sync_host(args):
"""Handle host"""
hosts = args.names
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
result = config.handle_host(sync=hosts)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def list_vm(args):
"""List vms"""
filters = args.filters
if args.client is not None and args.client == 'all':
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
args.client = ','.join(baseconfig.clients)
if args.client is not None and ',' in args.client:
vms = PrettyTable(["Name", "Host", "Status", "Ips", "Source", "Plan", "Profile"])
for client in args.client.split(','):
config = Kbaseconfig(client=client, debug=args.debug, quiet=True)
if config.cache:
_list = cache_vms(config, args.region, args.zone, args.namespace)
else:
config = Kconfig(client=client, debug=args.debug, region=args.region,
zone=args.zone, namespace=args.namespace)
_list = config.k.list()
for vm in _list:
name = vm.get('name')
status = vm.get('status')
ip = vm.get('ip', '')
source = vm.get('image', '')
plan = vm.get('plan', '')
profile = vm.get('profile', '')
vminfo = [name, client, status, ip, source, plan, profile]
if filters:
if status == filters:
vms.add_row(vminfo)
else:
vms.add_row(vminfo)
print(vms)
else:
vms = PrettyTable(["Name", "Status", "Ips", "Source", "Plan", "Profile"])
config = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if config.cache:
_list = cache_vms(config, args.region, args.zone, args.namespace)
else:
config = Kconfig(client=args.client, debug=args.debug, region=args.region,
zone=args.zone, namespace=args.namespace)
_list = config.k.list()
for vm in _list:
name = vm.get('name')
status = vm.get('status')
ip = vm.get('ip', '')
source = vm.get('image', '')
plan = vm.get('plan', '')
profile = vm.get('profile', '')
vminfo = [name, status, ip, source, plan, profile]
if filters:
if status == filters:
vms.add_row(vminfo)
else:
vms.add_row(vminfo)
print(vms)
return
def list_container(args):
"""List containers"""
filters = args.filters
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
cont = Kcontainerconfig(config, client=args.containerclient).cont
pprint("Listing containers...")
containers = PrettyTable(["Name", "Status", "Image", "Plan", "Command", "Ports", "Deploy"])
for container in cont.list_containers():
if filters:
status = container[1]
if status == filters:
containers.add_row(container)
else:
containers.add_row(container)
print(containers)
return
def profilelist_container(args):
"""List container profiles"""
short = args.short
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
profiles = baseconfig.list_containerprofiles()
if short:
profilestable = PrettyTable(["Profile"])
for profile in sorted(profiles):
profilename = profile[0]
profilestable.add_row([profilename])
else:
profilestable = PrettyTable(["Profile", "Image", "Nets", "Ports", "Volumes", "Cmd"])
for profile in sorted(profiles):
profilestable.add_row(profile)
profilestable.align["Profile"] = "l"
print(profilestable)
return
def list_containerimage(args):
"""List container images"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.type != 'kvm':
error("Operation not supported on this kind of client.Leaving...")
sys.exit(1)
cont = Kcontainerconfig(config, client=args.containerclient).cont
common.pprint("Listing images...")
images = PrettyTable(["Name"])
for image in cont.list_images():
images.add_row([image])
print(images)
return
def list_host(args):
"""List hosts"""
clientstable = PrettyTable(["Client", "Type", "Enabled", "Current"])
clientstable.align["Client"] = "l"
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
for client in sorted(baseconfig.clients):
enabled = baseconfig.ini[client].get('enabled', True)
_type = baseconfig.ini[client].get('type', 'kvm')
if client == baseconfig.client:
clientstable.add_row([client, _type, enabled, 'X'])
else:
clientstable.add_row([client, _type, enabled, ''])
print(clientstable)
return
def list_lb(args):
"""List lbs"""
short = args.short
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
loadbalancers = config.list_loadbalancers()
if short:
loadbalancerstable = PrettyTable(["Loadbalancer"])
for lb in sorted(loadbalancers):
loadbalancerstable.add_row([lb])
else:
loadbalancerstable = PrettyTable(["LoadBalancer", "IPAddress", "IPProtocol", "Ports", "Target"])
for lb in sorted(loadbalancers):
loadbalancerstable.add_row(lb)
loadbalancerstable.align["Loadbalancer"] = "l"
print(loadbalancerstable)
return
def info_profile(args):
"""List profiles"""
profile = args.profile
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
profiles = baseconfig.list_profiles()
for entry in profiles:
if entry[0] == profile:
profile, flavor, pool, disks, image, nets, cloudinit, nested, reservedns, reservehost = entry
print("profile: %s" % profile)
print("flavor: %s" % flavor)
print("pool: %s" % pool)
print("disks: %s" % disks)
print("image: %s" % image)
print("nets: %s" % nets)
print("cloudinit: %s" % cloudinit)
print("nested: %s" % nested)
print("reservedns: %s" % reservedns)
print("reservehost: %s" % reservehost)
sys.exit(0)
break
error("Profile %s doesn't exist" % profile)
sys.exit(1)
def list_profile(args):
"""List profiles"""
short = args.short
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
profiles = baseconfig.list_profiles()
if short:
profilestable = PrettyTable(["Profile"])
for profile in sorted(profiles):
profilename = profile[0]
profilestable.add_row([profilename])
else:
profilestable = PrettyTable(["Profile", "Flavor",
"Pool", "Disks", "Image",
"Nets", "Cloudinit", "Nested",
"Reservedns", "Reservehost"])
for profile in sorted(profiles):
profilestable.add_row(profile)
profilestable.align["Profile"] = "l"
print(profilestable)
return
def list_dns(args):
"""List flavors"""
short = args.short
domain = args.domain
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
entries = k.list_dns(domain)
if short:
dnstable = PrettyTable(["Entry"])
for entry in sorted(entries):
entryname = entry[0]
dnstable.add_row([entryname])
else:
dnstable = PrettyTable(["Entry", "Type", "TTL", "Data"])
for entry in sorted(entries):
dnstable.add_row(entry)
dnstable.align["Flavor"] = "l"
print(dnstable)
return
def list_flavor(args):
"""List flavors"""
short = args.short
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
flavors = k.flavors()
if short:
flavorstable = PrettyTable(["Flavor"])
for flavor in sorted(flavors):
flavorname = flavor[0]
flavorstable.add_row([flavorname])
else:
flavorstable = PrettyTable(["Flavor", "Numcpus", "Memory"])
for flavor in sorted(flavors):
flavorstable.add_row(flavor)
flavorstable.align["Flavor"] = "l"
print(flavorstable)
return
def list_image(args):
"""List images"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.client != 'all':
k = config.k
imagestable = PrettyTable(["Images"])
imagestable.align["Images"] = "l"
for image in k.volumes():
imagestable.add_row([image])
print(imagestable)
return
def list_iso(args):
"""List isos"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.client != 'all':
k = config.k
isostable = PrettyTable(["Iso"])
isostable.align["Iso"] = "l"
for iso in k.volumes(iso=True):
isostable.add_row([iso])
print(isostable)
return
def list_network(args):
"""List networks"""
short = args.short
subnets = args.subnets
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.client != 'all':
k = config.k
if not subnets:
networks = k.list_networks()
pprint("Listing Networks...")
if short:
networkstable = PrettyTable(["Network"])
for network in sorted(networks):
networkstable.add_row([network])
else:
networkstable = PrettyTable(["Network", "Type", "Cidr", "Dhcp", "Domain", "Mode"])
for network in sorted(networks):
networktype = networks[network]['type']
cidr = networks[network]['cidr']
dhcp = networks[network]['dhcp']
mode = networks[network]['mode']
if 'domain' in networks[network]:
domain = networks[network]['domain']
else:
domain = 'N/A'
networkstable.add_row([network, networktype, cidr, dhcp, domain, mode])
networkstable.align["Network"] = "l"
print(networkstable)
return
else:
subnets = k.list_subnets()
pprint("Listing Subnets...")
if short:
subnetstable = PrettyTable(["Subnets"])
for subnet in sorted(subnets):
subnetstable.add_row([subnet])
else:
subnetstable = PrettyTable(["Subnet", "Az", "Cidr", "Network"])
for subnet in sorted(subnets):
cidr = subnets[subnet]['cidr']
az = subnets[subnet]['az']
if 'network' in subnets[subnet]:
network = subnets[subnet]['network']
else:
network = 'N/A'
subnetstable.add_row([subnet, az, cidr, network])
subnetstable.align["Network"] = "l"
print(subnetstable)
return
def list_plan(args):
"""List plans"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.extraclients:
plans = PrettyTable(["Plan", "Host", "Vms"])
allclients = config.extraclients.copy()
allclients.update({config.client: config.k})
for cli in sorted(allclients):
currentconfig = Kconfig(client=cli, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
for plan in currentconfig.list_plans():
planname = plan[0]
planvms = plan[1]
plans.add_row([planname, cli, planvms])
else:
plans = PrettyTable(["Plan", "Vms"])
for plan in config.list_plans():
planname = plan[0]
planvms = plan[1]
plans.add_row([planname, planvms])
print(plans)
return
def choose_parameter_file(paramfile):
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
return paramfile
def create_app_generic(args):
apps = args.apps
outputdir = args.outputdir
if outputdir is not None:
if os.path.exists("/i_am_a_container") and not outputdir.startswith('/'):
outputdir = "/workdir/%s" % outputdir
if os.path.exists(outputdir) and os.path.isfile(outputdir):
error("Invalid outputdir %s" % outputdir)
sys.exit(1)
elif not os.path.exists(outputdir):
os.mkdir(outputdir)
paramfile = choose_parameter_file(args.paramfile)
if find_executable('kubectl') is None:
error("You need kubectl to install apps")
sys.exit(1)
if 'KUBECONFIG' not in os.environ:
error("KUBECONFIG env variable needs to be set")
sys.exit(1)
elif not os.path.isabs(os.environ['KUBECONFIG']):
os.environ['KUBECONFIG'] = "%s/%s" % (os.getcwd(), os.environ['KUBECONFIG'])
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
available_apps = baseconfig.list_apps_generic(quiet=True)
for app in apps:
if app not in available_apps:
error("app %s not available. Skipping..." % app)
continue
pprint("Adding app %s" % app)
overrides['%s_version' % app] = overrides['%s_version' % app] if '%s_version' % app in overrides else 'latest'
baseconfig.create_app_generic(app, overrides, outputdir=outputdir)
def create_app_openshift(args):
apps = args.apps
outputdir = args.outputdir
if outputdir is not None:
if os.path.exists("/i_am_a_container") and not outputdir.startswith('/'):
outputdir = "/workdir/%s" % outputdir
if os.path.exists(outputdir) and os.path.isfile(outputdir):
error("Invalid outputdir %s" % outputdir)
sys.exit(1)
elif not os.path.exists(outputdir):
os.mkdir(outputdir)
paramfile = choose_parameter_file(args.paramfile)
if find_executable('oc') is None:
error("You need oc to install apps")
sys.exit(1)
if 'KUBECONFIG' not in os.environ:
error("KUBECONFIG env variable needs to be set")
sys.exit(1)
elif not os.path.isabs(os.environ['KUBECONFIG']):
os.environ['KUBECONFIG'] = "%s/%s" % (os.getcwd(), os.environ['KUBECONFIG'])
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
for app in apps:
if app in LOCAL_OPENSHIFT_APPS:
name = app
else:
name, source, channel, csv, description, namespace, crd = common.olm_app(app)
if name is None:
error("Couldn't find any app matching %s. Skipping..." % app)
continue
app_data = {'name': name, 'source': source, 'channel': channel, 'csv': csv, 'namespace': namespace,
'crd': crd}
overrides.update(app_data)
pprint("Adding app %s" % name)
baseconfig.create_app_openshift(name, overrides, outputdir=outputdir)
def delete_app_generic(args):
apps = args.apps
paramfile = args.paramfile
if find_executable('kubectl') is None:
error("You need kubectl to install apps")
sys.exit(1)
if 'KUBECONFIG' not in os.environ:
error("KUBECONFIG env variable needs to be set")
sys.exit(1)
elif not os.path.isabs(os.environ['KUBECONFIG']):
os.environ['KUBECONFIG'] = "%s/%s" % (os.getcwd(), os.environ['KUBECONFIG'])
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
available_apps = baseconfig.list_apps_generic(quiet=True)
for app in apps:
if app not in available_apps:
error("app %s not available. Skipping..." % app)
continue
pprint("Deleting app %s" % app)
overrides['%s_version' % app] = overrides['%s_version' % app] if '%s_version' % app in overrides else 'latest'
baseconfig.delete_app_generic(app, overrides)
def delete_app_openshift(args):
apps = args.apps
paramfile = choose_parameter_file(args.paramfile)
if find_executable('oc') is None:
error("You need oc to install apps")
sys.exit(1)
if 'KUBECONFIG' not in os.environ:
error("KUBECONFIG env variable needs to be set")
sys.exit(1)
elif not os.path.isabs(os.environ['KUBECONFIG']):
os.environ['KUBECONFIG'] = "%s/%s" % (os.getcwd(), os.environ['KUBECONFIG'])
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
for app in apps:
if app in LOCAL_OPENSHIFT_APPS:
name = app
else:
name, source, channel, csv, description, namespace, crd = common.olm_app(app)
if name is None:
error("Couldn't find any app matching %s. Skipping..." % app)
continue
app_data = {'name': name, 'source': source, 'channel': channel, 'csv': csv, 'namespace': namespace,
'crd': crd}
overrides.update(app_data)
pprint("Deleting app %s" % name)
baseconfig.delete_app_openshift(app, overrides)
def list_apps_generic(args):
"""List generic kube apps"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
apps = PrettyTable(["Name"])
for app in baseconfig.list_apps_generic(quiet=True):
apps.add_row([app])
print(apps)
def list_apps_openshift(args):
"""List openshift kube apps"""
if find_executable('oc') is None:
error("You need oc to list apps")
sys.exit(1)
if 'KUBECONFIG' not in os.environ:
error("KUBECONFIG env variable needs to be set")
sys.exit(1)
elif not os.path.isabs(os.environ['KUBECONFIG']):
os.environ['KUBECONFIG'] = "%s/%s" % (os.getcwd(), os.environ['KUBECONFIG'])
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
apps = PrettyTable(["Name"])
for app in baseconfig.list_apps_openshift(quiet=True):
apps.add_row([app])
print(apps)
def list_kube(args):
"""List kube"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.extraclients:
kubestable = PrettyTable(["Cluster", "Type", "Plan", "Host", "Vms"])
allclients = config.extraclients.copy()
allclients.update({config.client: config.k})
for cli in sorted(allclients):
currentconfig = Kconfig(client=cli, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
kubes = currentconfig.list_kubes()
for kubename in kubes:
kube = kubes[kubename]
kubetype = kube['type']
kubeplan = kube['plan']
kubevms = kube['vms']
kubestable.add_row([kubename, kubetype, kubeplan, cli, kubevms])
else:
kubestable = PrettyTable(["Cluster", "Type", "Plan", "Vms"])
kubes = config.list_kubes()
for kubename in kubes:
kube = kubes[kubename]
kubetype = kube['type']
kubevms = kube['vms']
kubeplan = kube['plan']
kubestable.add_row([kubename, kubetype, kubeplan, kubevms])
print(kubestable)
return
def list_pool(args):
"""List pools"""
short = args.short
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pools = k.list_pools()
if short:
poolstable = PrettyTable(["Pool"])
for pool in sorted(pools):
poolstable.add_row([pool])
else:
poolstable = PrettyTable(["Pool", "Path"])
for pool in sorted(pools):
poolpath = k.get_pool_path(pool)
poolstable.add_row([pool, poolpath])
poolstable.align["Pool"] = "l"
print(poolstable)
return
def list_product(args):
"""List products"""
group = args.group
repo = args.repo
search = args.search
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
if search is not None:
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
products = PrettyTable(["Repo", "Product", "Group", "Description", "Numvms", "Memory"])
products.align["Repo"] = "l"
productsinfo = baseconfig.list_products(repo=repo)
for prod in sorted(productsinfo, key=lambda x: (x['repo'], x['group'], x['name'])):
name = prod['name']
repo = prod['repo']
prodgroup = prod['group']
description = prod.get('description', 'N/A')
if search.lower() not in name.lower() and search.lower() not in description.lower():
continue
if group is not None and prodgroup != group:
continue
numvms = prod.get('numvms', 'N/A')
memory = prod.get('memory', 'N/A')
group = prod.get('group', 'N/A')
products.add_row([repo, name, group, description, numvms, memory])
else:
products = PrettyTable(["Repo", "Product", "Group", "Description", "Numvms", "Memory"])
products.align["Repo"] = "l"
productsinfo = baseconfig.list_products(group=group, repo=repo)
for product in sorted(productsinfo, key=lambda x: (x['repo'], x['group'], x['name'])):
name = product['name']
repo = product['repo']
description = product.get('description', 'N/A')
numvms = product.get('numvms', 'N/A')
memory = product.get('memory', 'N/A')
group = product.get('group', 'N/A')
products.add_row([repo, name, group, description, numvms, memory])
print(products)
return
def list_repo(args):
"""List repos"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
repos = PrettyTable(["Repo", "Url"])
repos.align["Repo"] = "l"
reposinfo = baseconfig.list_repos()
for repo in sorted(reposinfo):
url = reposinfo[repo]
repos.add_row([repo, url])
print(repos)
return
def list_vmdisk(args):
"""List vm disks"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Listing disks...")
diskstable = PrettyTable(["Name", "Pool", "Path"])
diskstable.align["Name"] = "l"
disks = k.list_disks()
for disk in sorted(disks):
path = disks[disk]['path']
pool = disks[disk]['pool']
diskstable.add_row([disk, pool, path])
print(diskstable)
return
def create_openshift_iso(args):
cluster = args.cluster
ignitionfile = args.ignitionfile
overrides = common.get_overrides(param=args.param)
client = 'fake' if common.need_fake() else args.client
config = Kconfig(client=client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.create_openshift_iso(cluster, overrides=overrides, ignitionfile=ignitionfile)
def create_openshift_disconnecter(args):
plan = args.plan
if plan is None:
plan = nameutils.get_random_name()
pprint("Using %s as name of the plan" % plan)
overrides = common.get_overrides(param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.create_openshift_disconnecter(plan, overrides=overrides)
def create_vm(args):
"""Create vms"""
name = args.name
onlyassets = True if 'assets' in vars(args) else False
image = args.image
profile = args.profile
count = args.count
profilefile = args.profilefile
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
wait = args.wait
console = args.console
serial = args.serial
if 'wait' in overrides and isinstance(overrides['wait'], bool) and overrides['wait']:
wait = True
if wait and 'keys' not in overrides and not os.path.exists(os.path.expanduser("~/.ssh/id_rsa.pub"))\
and not os.path.exists(os.path.expanduser("~/.ssh/id_dsa.pub"))\
and not os.path.exists(os.path.expanduser("~/.kcli/id_rsa.pub"))\
and not os.path.exists(os.path.expanduser("~/.kcli/id_dsa.pub")):
error("No usable public key found, which is mandatory when using wait")
sys.exit(1)
customprofile = {}
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
for key in overrides:
if key in vars(config) and vars(config)[key] is not None and type(overrides[key]) != type(vars(config)[key]):
key_type = str(type(vars(config)[key]))
error("The provided parameter %s has a wrong type, it should be %s" % (key, key_type))
sys.exit(1)
if 'name' in overrides:
name = overrides['name']
if name is None:
name = nameutils.get_random_name()
if config.type in ['gcp', 'kubevirt']:
name = name.replace('_', '-')
if config.type != 'aws' and not onlyassets:
pprint("Using %s as name of the vm" % name)
if image is not None:
if image in config.profiles and not onlyassets:
pprint("Using %s as profile" % image)
profile = image
elif profile is not None:
if profile.endswith('.yml'):
profilefile = profile
profile = None
if not os.path.exists(profilefile):
error("Missing profile file %s" % profilefile)
sys.exit(1)
else:
with open(profilefile, 'r') as entries:
entries = yaml.safe_load(entries)
entrieskeys = list(entries.keys())
if len(entrieskeys) == 1:
profile = entrieskeys[0]
customprofile = entries[profile]
pprint("Using data from %s as profile" % profilefile)
else:
error("Cant' parse %s as profile file" % profilefile)
sys.exit(1)
elif overrides or onlyassets:
profile = 'kvirt'
config.profiles[profile] = {}
else:
error("You need to either provide a profile, an image or some parameters")
sys.exit(1)
if count == 1:
result = config.create_vm(name, profile, overrides=overrides, customprofile=customprofile, wait=wait,
onlyassets=onlyassets)
if not onlyassets:
if console:
config.k.console(name=name, tunnel=config.tunnel)
elif serial:
config.k.serialconsole(name)
else:
code = common.handle_response(result, name, element='', action='created', client=config.client)
return code
elif 'reason' in result:
error(result['reason'])
else:
print(result['data'])
else:
codes = []
if 'plan' not in overrides:
overrides['plan'] = name
for number in range(count):
currentname = "%s-%d" % (name, number)
result = config.create_vm(currentname, profile, overrides=overrides, customprofile=customprofile, wait=wait,
onlyassets=onlyassets)
if not onlyassets:
codes.append(common.handle_response(result, currentname, element='', action='created',
client=config.client))
return max(codes)
def clone_vm(args):
"""Clone existing vm"""
name = args.name
base = args.base
full = args.full
start = args.start
pprint("Cloning vm %s from vm %s..." % (name, base))
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
k.clone(base, name, full=full, start=start)
def update_vm(args):
"""Update ip, memory or numcpus"""
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
ip1 = overrides.get('ip1')
flavor = overrides.get('flavor')
numcpus = overrides.get('numcpus')
memory = overrides.get('memory')
plan = overrides.get('plan')
autostart = overrides.get('autostart')
noautostart = overrides.get('noautostart')
dns = overrides.get('dns')
host = overrides.get('host')
domain = overrides.get('domain')
cloudinit = overrides.get('cloudinit')
image = overrides.get('image')
nets = overrides.get('nets')
disks = overrides.get('disks')
information = overrides.get('information')
iso = overrides.get('iso')
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
extra_metadata = {k: overrides[k] for k in overrides if k not in config.list_keywords()}
k = config.k
names = [common.get_lastvm(config.client)] if not args.names else args.names
for name in names:
if dns:
pprint("Creating Dns entry for %s..." % name)
networks = k.vm_ports(name)
if networks and domain is None:
domain = networks[0]
if not nets:
return
else:
k.reserve_dns(name=name, nets=networks, domain=domain, ip=ip1)
if ip1 is not None:
pprint("Updating ip of vm %s to %s..." % (name, ip1))
k.update_metadata(name, 'ip', ip1)
if cloudinit:
pprint("Removing cloudinit information of vm %s" % name)
k.remove_cloudinit(name)
if plan is not None:
pprint("Updating plan of vm %s to %s..." % (name, plan))
k.update_metadata(name, 'plan', plan)
if image is not None:
pprint("Updating image of vm %s to %s..." % (name, image))
k.update_metadata(name, 'image', image)
if memory is not None:
pprint("Updating memory of vm %s to %s..." % (name, memory))
k.update_memory(name, memory)
if numcpus is not None:
pprint("Updating numcpus of vm %s to %s..." % (name, numcpus))
k.update_cpus(name, numcpus)
if autostart:
pprint("Setting autostart for vm %s..." % name)
k.update_start(name, start=True)
if noautostart:
pprint("Removing autostart for vm %s..." % name)
k.update_start(name, start=False)
if information:
pprint("Setting information for vm %s..." % name)
k.update_information(name, information)
if iso is not None:
pprint("Switching iso for vm %s to %s..." % (name, iso))
if iso == 'None':
iso = None
k.update_iso(name, iso)
if flavor is not None:
pprint("Updating flavor of vm %s to %s..." % (name, flavor))
k.update_flavor(name, flavor)
if host:
pprint("Creating Host entry for vm %s..." % name)
networks = k.vm_ports(name)
if networks:
if domain is None:
domain = networks[0]
k.reserve_host(name, networks, domain)
currentvm = k.info(name)
currentnets = currentvm.get('nets', [])
currentdisks = currentvm.get('disks', [])
if disks:
pprint("Updating disks of vm %s" % name)
if len(currentdisks) < len(disks):
pprint("Adding Disks to %s" % name)
for disk in disks[len(currentdisks):]:
if isinstance(disk, int):
size = disk
pool = config.pool
elif isinstance(disk, str) and disk.isdigit():
size = int(disk)
pool = config.pool
elif isinstance(disk, dict):
size = disk.get('size', config.disksize)
pool = disk.get('pool', config.pool)
else:
continue
k.add_disk(name=name, size=size, pool=pool)
if len(currentdisks) > len(disks):
pprint("Removing Disks of %s" % name)
for disk in currentdisks[len(currentdisks) - len(disks):]:
diskname = os.path.basename(disk['path'])
diskpool = os.path.dirname(disk['path'])
k.delete_disk(name=name, diskname=diskname, pool=diskpool)
if nets:
pprint("Updating nets of vm %s" % name)
if len(currentnets) < len(nets):
pprint("Adding Nics to %s" % name)
for net in nets[len(currentnets):]:
if isinstance(net, str):
network = net
elif isinstance(net, dict) and 'name' in net:
network = net['name']
else:
error("Skpping wrong nic spec for %s" % name)
continue
k.add_nic(name, network)
if len(currentnets) > len(nets):
pprint("Removing Nics of %s" % name)
for net in range(len(currentnets), len(nets), -1):
interface = "eth%s" % (net - 1)
k.delete_nic(name, interface)
if extra_metadata:
for key in extra_metadata:
k.update_metadata(name, key, extra_metadata[key])
def create_vmdisk(args):
"""Add disk to vm"""
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
name = args.name
novm = args.novm
size = args.size
image = args.image
interface = args.interface
if interface not in ['virtio', 'ide', 'scsi']:
error("Incorrect disk interface. Choose between virtio, scsi or ide...")
sys.exit(1)
pool = args.pool
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
if size is None:
error("Missing size. Leaving...")
sys.exit(1)
if pool is None:
error("Missing pool. Leaving...")
sys.exit(1)
if novm:
pprint("Creating disk %s..." % name)
else:
pprint("Adding disk to %s..." % name)
k.add_disk(name=name, size=size, pool=pool, image=image, interface=interface, novm=novm, overrides=overrides)
def delete_vmdisk(args):
"""Delete disk of vm"""
yes_top = args.yes_top
yes = args.yes
if not yes and not yes_top:
common.confirm("Are you sure?")
name = args.vm
diskname = args.diskname
novm = args.novm
pool = args.pool
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Deleting disk %s" % diskname)
k.delete_disk(name=name, diskname=diskname, pool=pool, novm=novm)
return
def create_dns(args):
"""Create dns entries"""
names = args.names
net = args.net
domain = args.domain
ip = args.ip
alias = args.alias
if alias is None:
alias = []
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
name = names[0]
if len(names) > 1:
alias.extend(names[1:])
if alias:
pprint("Creating alias entries for %s" % ' '.join(alias))
k.reserve_dns(name=name, nets=[net], domain=domain, ip=ip, alias=alias, primary=True)
def delete_dns(args):
"""Delete dns entries"""
names = args.names
net = args.net
allentries = args.all
domain = args.domain if args.domain is not None else net
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
for name in names:
pprint("Deleting Dns entry for %s..." % name)
k.delete_dns(name, domain, allentries=allentries)
def export_vm(args):
"""Export a vm"""
image = args.image
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
k = config.k
codes = []
for name in names:
result = k.export(name=name, image=image)
if result['result'] == 'success':
success("Vm %s exported" % name)
codes.append(0)
else:
reason = result['reason']
error("Could not export vm %s because %s" % (name, reason))
codes.append(1)
sys.exit(1 if 1 in codes else 0)
def create_lb(args):
"""Create loadbalancer"""
checkpath = args.checkpath
checkport = args.checkport
ports = args.ports
domain = args.domain
internal = args.internal
if args.vms is None:
vms = []
else:
good_vms = args.vms[1:-1] if args.vms.startswith('[') and args.vms.endswith(']') else args.vms
vms = [v.strip() for v in good_vms.split(',')]
good_ports = args.ports[1:-1] if args.ports.startswith('[') and args.ports.endswith(']') else args.ports
ports = [p.strip() for p in good_ports.split(',')]
name = nameutils.get_random_name().replace('_', '-') if args.name is None else args.name
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.create_loadbalancer(name, ports=ports, checkpath=checkpath, vms=vms, domain=domain, checkport=checkport,
internal=internal)
return 0
def delete_lb(args):
"""Delete loadbalancer"""
yes = args.yes
yes_top = args.yes_top
if not yes and not yes_top:
common.confirm("Are you sure?")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.delete_loadbalancer(args.name)
return 0
def create_generic_kube(args):
"""Create Generic kube"""
paramfile = args.paramfile
force = args.force
cluster = args.cluster
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if force:
config.delete_kube(cluster, overrides=overrides)
config.create_kube_generic(cluster, overrides=overrides)
def create_k3s_kube(args):
"""Create K3s kube"""
paramfile = args.paramfile
force = args.force
cluster = args.cluster if args.cluster is not None else 'testk'
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if force:
config.delete_kube(cluster, overrides=overrides)
config.create_kube_k3s(cluster, overrides=overrides)
def create_openshift_kube(args):
"""Create Openshift kube"""
paramfile = args.paramfile
force = args.force
cluster = args.cluster
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
client = 'fake' if common.need_fake() else args.client
config = Kconfig(client=client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if args.subcommand_create_kube == 'okd':
overrides['upstream'] = True
if force:
config.delete_kube(cluster, overrides=overrides)
config.create_kube_openshift(cluster, overrides=overrides)
def delete_kube(args):
"""Delete kube"""
yes = args.yes
yes_top = args.yes_top
cluster = args.cluster if args.cluster is not None else 'testk'
if not yes and not yes_top:
common.confirm("Are you sure?")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
config.delete_kube(cluster, overrides=overrides)
def scale_generic_kube(args):
"""Scale generic kube"""
workers = args.workers
paramfile = args.paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
cluster = overrides.get('cluster', args.cluster)
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if not os.path.exists(clusterdir):
error("Cluster directory %s not found..." % clusterdir)
sys.exit(1)
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if workers > 0:
overrides['workers'] = workers
config.scale_kube_generic(cluster, overrides=overrides)
def scale_k3s_kube(args):
"""Scale k3s kube"""
workers = args.workers
paramfile = args.paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
cluster = overrides.get('cluster', args.cluster)
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if not os.path.exists(clusterdir):
error("Cluster directory %s not found..." % clusterdir)
sys.exit(1)
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if workers > 0:
overrides['workers'] = workers
config.scale_kube_k3s(cluster, overrides=overrides)
def scale_openshift_kube(args):
"""Scale openshift kube"""
workers = args.workers
paramfile = args.paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
cluster = overrides.get('cluster', args.cluster)
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if not os.path.exists(clusterdir):
error("Cluster directory %s not found..." % clusterdir)
sys.exit(1)
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if workers > 0:
overrides['workers'] = workers
config.scale_kube_openshift(cluster, overrides=overrides)
def update_generic_kube(args):
args.type = 'generic'
update_kube(args)
def update_openshift_kube(args):
args.type = 'openshift'
update_kube(args)
def update_k3s_kube(args):
args.type = 'k3s'
update_kube(args)
def update_kube(args):
"""Update kube"""
cluster = args.cluster
_type = args.type
data = {'kube': cluster, 'kubetype': _type}
plan = None
paramfile = args.paramfile
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if not overrides:
warning("No parameters provided, using stored one")
if 'ipi' in overrides and overrides['ipi']:
error("Update cluster workflow not available when using ipi")
sys.exit(1)
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if not os.path.exists(clusterdir):
error("Cluster directory %s not found..." % clusterdir)
sys.exit(1)
if os.path.exists("%s/kcli_parameters.yml" % clusterdir):
with open("%s/kcli_parameters.yml" % clusterdir, 'r') as install:
installparam = yaml.safe_load(install)
data.update(installparam)
plan = installparam.get('plan', plan)
data.update(overrides)
data['basedir'] = '/workdir' if os.path.exists("/i_am_a_container") else '.'
if plan is None:
plan = cluster
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.update_kube(plan, _type, overrides=data)
def create_vmnic(args):
"""Add nic to vm"""
name = args.name
network = args.network
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
if network is None:
error("Missing network. Leaving...")
sys.exit(1)
pprint("Adding nic to vm %s..." % name)
k.add_nic(name=name, network=network)
def delete_vmnic(args):
"""Delete nic of vm"""
name = args.name
interface = args.interface
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Deleting nic from vm %s..." % name)
k.delete_nic(name, interface)
return
def create_pool(args):
"""Create/Delete pool"""
pool = args.pool
pooltype = args.pooltype
path = args.path
thinpool = args.thinpool
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
if path is None:
error("Missing path. Leaving...")
sys.exit(1)
pprint("Creating pool %s..." % pool)
k.create_pool(name=pool, poolpath=path, pooltype=pooltype, thinpool=thinpool)
def delete_pool(args):
"""Delete pool"""
pool = args.pool
full = args.full
yes = args.yes
yes_top = args.yes_top
if not yes and not yes_top:
common.confirm("Are you sure?")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Deleting pool %s..." % pool)
result = k.delete_pool(name=pool, full=full)
common.handle_response(result, pool, element='Pool', action='deleted')
def create_plan(args):
"""Create plan"""
plan = args.plan
ansible = args.ansible
url = args.url
path = args.path
container = args.container
inputfile = args.inputfile
force = args.force
pre = not args.skippre
post = not args.skippost
paramfile = args.paramfile
if inputfile is None:
inputfile = 'kcli_plan.yml'
if os.path.exists("/i_am_a_container"):
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
_type = config.ini[config.client].get('type', 'kvm')
overrides.update({'type': _type})
if force:
if plan is None:
error("Force requires specifying a plan name")
return
else:
config.delete_plan(plan, unregister=config.rhnunregister)
if plan is None:
plan = nameutils.get_random_name()
pprint("Using %s as name of the plan" % plan)
config.plan(plan, ansible=ansible, url=url, path=path, container=container, inputfile=inputfile,
overrides=overrides, pre=pre, post=post)
return 0
def create_playbook(args):
"""Create plan"""
inputfile = args.inputfile
store = args.store
paramfile = args.paramfile
if inputfile is None:
inputfile = 'kcli_plan.yml'
if os.path.exists("/i_am_a_container"):
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
_type = baseconfig.ini[baseconfig.client].get('type', 'kvm')
overrides.update({'type': _type})
baseconfig.create_playbook(inputfile, overrides=overrides, store=store)
return 0
def update_plan(args):
"""Update plan"""
autostart = args.autostart
noautostart = args.noautostart
plan = args.plan
url = args.url
path = args.path
container = args.container
inputfile = args.inputfile
paramfile = args.paramfile
if os.path.exists("/i_am_a_container"):
inputfile = "/workdir/%s" % inputfile if inputfile is not None else "/workdir/kcli_plan.yml"
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if autostart:
config.autostart_plan(plan)
return 0
elif noautostart:
config.noautostart_plan(plan)
return 0
config.plan(plan, url=url, path=path, container=container, inputfile=inputfile, overrides=overrides, update=True)
return 0
def delete_plan(args):
"""Delete plan"""
plan = args.plan
yes = args.yes
yes_top = args.yes_top
if not yes and not yes_top:
common.confirm("Are you sure?")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.delete_plan(plan, unregister=config.rhnunregister)
return 0
def expose_plan(args):
plan = args.plan
if plan is None:
plan = nameutils.get_random_name()
pprint("Using %s as name of the plan" % plan)
port = args.port
inputfile = args.inputfile
installermode = args.installermode
if inputfile is None:
inputfile = 'kcli_plan.yml'
if os.path.exists("/i_am_a_container"):
inputfile = "/workdir/%s" % inputfile
overrides = common.get_overrides(param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
extraconfigs = {}
for extraclient in config.extraclients:
extraconfigs[extraclient] = Kconfig(client=extraclient, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
config.expose_plan(plan, inputfile=inputfile, overrides=overrides, port=port, extraconfigs=extraconfigs,
installermode=installermode)
return 0
def start_plan(args):
"""Start plan"""
plan = args.plan
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.start_plan(plan)
return 0
def stop_plan(args):
"""Stop plan"""
plan = args.plan
soft = args.soft
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.stop_plan(plan, soft=soft)
return 0
def autostart_plan(args):
"""Autostart plan"""
plan = args.plan
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.autostart_plan(plan)
return 0
def noautostart_plan(args):
"""Noautostart plan"""
plan = args.plan
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.noautostart_plan(plan)
return 0
def restart_plan(args):
"""Restart plan"""
soft = args.soft
plan = args.plan
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.stop_plan(plan, soft=soft)
config.start_plan(plan)
return 0
def info_generic_app(args):
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_app_generic(args.app)
def info_openshift_disconnecter(args):
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_openshift_disconnecter()
def info_openshift_app(args):
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_app_openshift(args.app)
def info_plan(args):
"""Info plan """
doc = args.doc
quiet = args.quiet
url = args.url
path = args.path
inputfile = args.inputfile
if os.path.exists("/i_am_a_container"):
inputfile = "/workdir/%s" % inputfile if inputfile is not None else "/workdir/kcli_plan.yml"
if url is None:
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
baseconfig.info_plan(inputfile, quiet=quiet, doc=doc)
else:
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
config.plan('info', url=url, path=path, inputfile=inputfile, info=True, quiet=quiet, doc=doc)
return 0
def info_generic_kube(args):
"""Info Generic kube"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
baseconfig.info_kube_generic(quiet=True, offline=True)
def info_k3s_kube(args):
"""Info K3s kube"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
baseconfig.info_kube_k3s(quiet=True, offline=True)
def info_openshift_kube(args):
"""Info Openshift kube"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
baseconfig.info_kube_openshift(quiet=True, offline=True)
def download_plan(args):
"""Download plan"""
plan = args.plan
url = args.url
if plan is None:
plan = nameutils.get_random_name()
pprint("Using %s as name of the plan" % plan)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.plan(plan, url=url, download=True)
return 0
def download_coreos_installer(args):
"""Download Coreos Installer"""
paramfile = args.paramfile
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
common.get_coreos_installer(version=overrides.get('version', 'latest'), arch=overrides.get('arch'))
def download_kubectl(args):
"""Download Kubectl"""
paramfile = args.paramfile
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
common.get_kubectl(version=overrides.get('version', 'latest'))
def download_helm(args):
"""Download Helm"""
paramfile = args.paramfile
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
common.get_helm(version=overrides.get('version', 'latest'))
def download_oc(args):
"""Download Oc"""
paramfile = args.paramfile
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
common.get_oc(version=overrides.get('version', 'latest'))
def download_openshift_installer(args):
"""Download Openshift Installer"""
paramfile = args.paramfile
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
return baseconfig.download_openshift_installer(overrides)
def download_okd_installer(args):
"""Download Okd Installer"""
paramfile = args.paramfile
if os.path.exists("/i_am_a_container"):
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
overrides['upstream'] = True
return baseconfig.download_openshift_installer(overrides)
def create_pipeline_github(args):
"""Create Github Pipeline"""
inputfile = args.inputfile
kube = args.kube
script = args.script
paramfile = args.paramfile
if inputfile is None:
inputfile = 'kcli_plan.yml'
if os.path.exists("/i_am_a_container"):
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
overrides = common.get_overrides(param=args.param)
renderfile = baseconfig.create_github_pipeline(inputfile, paramfile=paramfile, overrides=overrides, kube=kube,
script=script)
print(renderfile)
return 0
def create_pipeline_jenkins(args):
"""Create Jenkins Pipeline"""
inputfile = args.inputfile
kube = args.kube
paramfile = args.paramfile
if inputfile is None:
inputfile = 'kcli_plan.yml'
if os.path.exists("/i_am_a_container"):
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
if not kube and not os.path.exists(inputfile):
error("File %s not found" % inputfile)
return 0
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
renderfile = baseconfig.create_jenkins_pipeline(inputfile, overrides=overrides, kube=kube)
print(renderfile)
return 0
def render_file(args):
"""Render file"""
plan = None
inputfile = args.inputfile
paramfiles = args.paramfile if args.paramfile is not None else []
ignore = args.ignore
if os.path.exists("/i_am_a_container"):
inputfile = "/workdir/%s" % inputfile if inputfile is not None else "/workdir/kcli_plan.yml"
if paramfiles:
paramfiles = ["/workdir/%s" % paramfile for paramfile in paramfiles]
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfiles = ["/workdir/kcli_parameters.yml"]
elif not paramfiles and os.path.exists("kcli_parameters.yml"):
paramfiles = ["kcli_parameters.yml"]
overrides = {}
allparamfiles = [paramfile for paramfile in glob("*_default.y*ml")]
allparamfiles.extend(paramfiles)
for paramfile in allparamfiles:
overrides.update(common.get_overrides(paramfile=paramfile))
overrides.update(common.get_overrides(param=args.param))
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
default_data = {'config_%s' % k: baseconfig.default[k] for k in baseconfig.default}
client_data = {'config_%s' % k: baseconfig.ini[baseconfig.client][k] for k in baseconfig.ini[baseconfig.client]}
client_data['config_type'] = client_data.get('config_type', 'kvm')
client_data['config_host'] = client_data.get('config_host', '127.0.0.1')
default_user = getuser() if client_data['config_type'] == 'kvm'\
and client_data['config_host'] in ['localhost', '127.0.0.1'] else 'root'
client_data['config_user'] = client_data.get('config_user', default_user)
config_data = default_data.copy()
config_data.update(client_data)
overrides.update(config_data)
if not os.path.exists(inputfile):
error("File %s not found" % inputfile)
return 0
renderfile = baseconfig.process_inputfile(plan, inputfile, overrides=overrides, ignore=ignore)
print(renderfile)
return 0
def create_vmdata(args):
"""Create cloudinit/ignition data for vm"""
args.assets = True
args.profile = None
args.profilefile = None
args.wait = False
args.console = None
args.serial = None
args.count = 1
create_vm(args)
return 0
def create_plandata(args):
"""Create cloudinit/ignition data"""
plan = None
inputfile = args.inputfile
outputdir = args.outputdir
paramfile = args.paramfile
if os.path.exists("/i_am_a_container"):
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
config_data = {'config_%s' % k: config.ini[config.client][k] for k in config.ini[config.client]}
config_data['config_type'] = config_data.get('config_type', 'kvm')
overrides.update(config_data)
if not os.path.exists(inputfile):
error("File %s not found" % inputfile)
return 0
results = config.plan(plan, inputfile=inputfile, overrides=overrides, onlyassets=True)
if results.get('assets'):
for num, asset in enumerate(results['assets']):
if outputdir is None:
print(asset)
else:
if not os.path.exists(outputdir):
os.mkdir(outputdir)
# if 'ignition' in asset:
# with open("%s/%s.ign" % (outputdir, "%0.2d" % num), 'w') as f:
# f.write(asset)
assetdata = yaml.safe_load(asset)
hostname = assetdata.get('hostname')
if hostname is None:
continue
pprint("Rendering %s" % hostname)
hostnamedir = "%s/%s" % (outputdir, hostname)
if not os.path.exists(hostnamedir):
os.mkdir(hostnamedir)
runcmd = assetdata.get('runcmd', [])
write_files = assetdata.get('write_files', [])
with open("%s/runcmd" % hostnamedir, 'w') as f:
f.write('\n'.join(runcmd))
for _file in write_files:
content = _file['content']
path = _file['path'].replace('/root/', '')
if path.endswith('id_rsa') or path.endswith('id_dsa') or path.endswith('id_rsa.pub')\
or path.endswith('id_dsa.pub') or 'openshift_pull.json' in path:
warning("Skipping %s" % path)
continue
if '/' in path and not os.path.exists("%s/%s" % (hostnamedir, os.path.dirname(path))):
os.makedirs("%s/%s" % (hostnamedir, os.path.dirname(path)))
with open("%s/%s/%s" % (hostnamedir, os.path.dirname(path), os.path.basename(path)), 'w') as f:
f.write(content)
else:
with open("%s/%s" % (hostnamedir, path), 'w') as f:
f.write(content)
if outputdir is not None:
renderplan = config.process_inputfile(plan, inputfile, overrides=overrides)
with open("%s/kcli_plan.yml" % outputdir, 'w') as f:
f.write(renderplan)
return 0
def create_plantemplate(args):
"""Create plan template"""
skipfiles = args.skipfiles
skipscripts = args.skipscripts
directory = args.directory
paramfile = args.paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
baseconfig.create_plan_template(directory, overrides=overrides, skipfiles=skipfiles, skipscripts=skipscripts)
def create_snapshot_plan(args):
"""Snapshot plan"""
plan = args.plan
snapshot = args.snapshot
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.snapshot_plan(plan, snapshotname=snapshot)
return 0
def delete_snapshot_plan(args):
"""Snapshot plan"""
plan = args.plan
snapshot = args.snapshot
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
for vm in sorted(k.list(), key=lambda x: x['name']):
name = vm['name']
if vm['plan'] == plan:
pprint("Deleting snapshot %s of vm %s..." % (snapshot, name))
k.snapshot(snapshot, name, delete=True)
return 0
def revert_snapshot_plan(args):
"""Revert snapshot of plan"""
plan = args.plan
snapshot = args.snapshot
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.revert_plan(plan, snapshotname=snapshot)
return 0
def create_repo(args):
"""Create repo"""
repo = args.repo
url = args.url
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
if repo is None:
error("Missing repo. Leaving...")
sys.exit(1)
if url is None:
error("Missing url. Leaving...")
sys.exit(1)
pprint("Adding repo %s..." % repo)
baseconfig.create_repo(repo, url)
return 0
def delete_repo(args):
"""Delete repo"""
repo = args.repo
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
if repo is None:
error("Missing repo. Leaving...")
sys.exit(1)
pprint("Deleting repo %s..." % repo)
baseconfig.delete_repo(repo)
return
def update_repo(args):
"""Update repo"""
repo = args.repo
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
if repo is None:
pprint("Updating all repos...")
repos = baseconfig.list_repos()
for repo in repos:
pprint("Updating repo %s..." % repo)
baseconfig.update_repo(repo)
else:
pprint("Updating repo %s..." % repo)
baseconfig.update_repo(repo)
return
def info_product(args):
"""Info product"""
repo = args.repo
product = args.product
group = args.group
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
pprint("Providing information on product %s..." % product)
baseconfig.info_product(product, repo, group)
def create_product(args):
"""Create product"""
repo = args.repo
product = args.product
latest = args.latest
group = args.group
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
plan = overrides['plan'] if 'plan' in overrides else None
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
pprint("Creating product %s..." % product)
config.create_product(product, repo=repo, group=group, plan=plan, latest=latest, overrides=overrides)
return 0
def ssh_vm(args):
"""Ssh into vm"""
local = args.L
remote = args.R
D = args.D
X = args.X
Y = args.Y
identityfile = args.identityfile
user = args.user
vmport = args.port
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
name = [common.get_lastvm(baseconfig.client)] if not args.name else args.name
tunnel = baseconfig.tunnel
tunnelhost = baseconfig.tunnelhost
tunneluser = baseconfig.tunneluser
tunnelport = baseconfig.tunnelport
if tunnel and tunnelhost is None:
error("Tunnel requested but no tunnelhost defined")
sys.exit(1)
insecure = baseconfig.insecure
if len(name) > 1:
cmd = ' '.join(name[1:])
else:
cmd = None
name = name[0]
if '@' in name and len(name.split('@')) == 2:
user = name.split('@')[0]
name = name.split('@')[1]
if os.path.exists("/i_am_a_container") and not os.path.exists("/root/.kcli/config.yml")\
and not os.path.exists("/root/.ssh/config"):
insecure = True
sshcommand = None
if baseconfig.cache:
_list = cache_vms(baseconfig, args.region, args.zone, args.namespace)
vms = [vm for vm in _list if vm['name'] == name]
if vms:
vm = vms[0]
ip = vm.get('ip')
if ip is None:
error("No ip found in cache for %s..." % name)
else:
if user is None:
user = baseconfig.vmuser if baseconfig.vmuser is not None else vm.get('user')
if vmport is None:
vmport = baseconfig.vmport if baseconfig.vmport is not None else vm.get('vmport')
sshcommand = common.ssh(name, ip=ip, user=user, local=local, remote=remote, tunnel=tunnel,
tunnelhost=tunnelhost, tunnelport=tunnelport, tunneluser=tunneluser,
insecure=insecure, cmd=cmd, X=X, Y=Y, D=D, debug=args.debug, vmport=vmport,
identityfile=identityfile)
if sshcommand is None:
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
k = config.k
u, ip, vmport = common._ssh_credentials(k, name)
if ip is None:
return
if user is None:
user = config.vmuser if config.vmuser is not None else u
if vmport is None and config.vmport is not None:
vmport = config.vmport
if config.type in ['kvm', 'packet'] and '.' not in ip and ':' not in ip:
vmport = ip
ip = config.host
sshcommand = common.ssh(name, ip=ip, user=user, local=local, remote=remote, tunnel=tunnel,
tunnelhost=tunnelhost, tunnelport=tunnelport, tunneluser=tunneluser,
insecure=insecure, cmd=cmd, X=X, Y=Y, D=D, debug=args.debug, vmport=vmport,
identityfile=identityfile)
if sshcommand is not None:
if find_executable('ssh') is not None:
os.system(sshcommand)
else:
print(sshcommand)
else:
error("Couldnt ssh to %s" % name)
def scp_vm(args):
"""Scp into vm"""
identityfile = args.identityfile
recursive = args.recursive
source = args.source[0]
source = source if not os.path.exists("/i_am_a_container") else "/workdir/%s" % source
destination = args.destination[0]
user = args.user
vmport = args.port
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
tunnel = baseconfig.tunnel
tunnelhost = baseconfig.tunnelhost
tunneluser = baseconfig.tunneluser
tunnelport = baseconfig.tunnelport
if tunnel and tunnelhost is None:
error("Tunnel requested but no tunnelhost defined")
sys.exit(1)
insecure = baseconfig.insecure
if len(source.split(':')) == 2:
name, source = source.split(':')
download = True
elif len(destination.split(':')) == 2:
name, destination = destination.split(':')
download = False
else:
error("Couldn't run scp")
return
if '@' in name and len(name.split('@')) == 2:
user, name = name.split('@')
if download:
pprint("Retrieving file %s from %s" % (source, name))
else:
pprint("Copying file %s to %s" % (source, name))
scpcommand = None
if baseconfig.cache:
_list = cache_vms(baseconfig, args.region, args.zone, args.namespace)
vms = [vm for vm in _list if vm['name'] == name]
if vms:
vm = vms[0]
ip = vm.get('ip')
if ip is None:
error("No ip found in cache for %s..." % name)
else:
if user is None:
user = baseconfig.vmuser if baseconfig.vmuser is not None else vm.get('user')
if vmport is None:
vmport = baseconfig.vmport if baseconfig.vmport is not None else vm.get('vmport')
scpcommand = common.scp(name, ip=ip, user=user, source=source, destination=destination,
recursive=recursive, tunnel=tunnel, tunnelhost=tunnelhost,
tunnelport=tunnelport, tunneluser=tunneluser, debug=args.debug,
download=download, vmport=vmport, insecure=insecure, identityfile=identityfile)
if scpcommand is None:
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
k = config.k
u, ip, vmport = common._ssh_credentials(k, name)
if ip is None:
return
if user is None:
user = config.vmuser if config.vmuser is not None else u
if vmport is None and config.vmport is not None:
vmport = config.vmport
if config.type in ['kvm', 'packet'] and '.' not in ip and ':' not in ip:
vmport = ip
ip = '127.0.0.1'
scpcommand = common.scp(name, ip=ip, user=user, source=source, destination=destination, recursive=recursive,
tunnel=tunnel, tunnelhost=tunnelhost, tunnelport=tunnelport, tunneluser=tunneluser,
debug=config.debug, download=download, vmport=vmport, insecure=insecure,
identityfile=identityfile)
if scpcommand is not None:
if find_executable('scp') is not None:
os.system(scpcommand)
else:
print(scpcommand)
else:
error("Couldn't run scp")
def create_network(args):
"""Create Network"""
name = args.name
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
isolated = args.isolated
cidr = args.cidr
nodhcp = args.nodhcp
domain = args.domain
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
if name is None:
error("Missing Network")
sys.exit(1)
if isolated:
nat = False
else:
nat = True
dhcp = not nodhcp
if args.dual is not None:
overrides['dual_cidr'] = args.dual
result = k.create_network(name=name, cidr=cidr, dhcp=dhcp, nat=nat, domain=domain, overrides=overrides)
common.handle_response(result, name, element='Network')
def delete_network(args):
"""Delete Network"""
yes = args.yes
yes_top = args.yes_top
names = args.names
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
if not yes and not yes_top:
common.confirm("Are you sure?")
for name in names:
result = k.delete_network(name=name)
common.handle_response(result, name, element='Network', action='deleted')
def create_host_group(args):
"""Generate Host group"""
data = {}
data['_type'] = 'group'
data['name'] = args.name
data['algorithm'] = args.algorithm
data['members'] = args.members
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_kvm(args):
"""Generate Kvm Host"""
data = {}
data['_type'] = 'kvm'
data['name'] = args.name
data['host'] = args.host
data['port'] = args.port
data['user'] = args.user
data['protocol'] = args.protocol
data['url'] = args.url
data['pool'] = args.pool
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_ovirt(args):
"""Create Ovirt Host"""
data = {}
data['name'] = args.name
data['_type'] = 'ovirt'
data['host'] = args.host
data['datacenter'] = args.datacenter
data['ca_file'] = args.ca
data['cluster'] = args.cluster
data['org'] = args.org
data['user'] = args.user
data['password'] = args.password
if args.pool is not None:
data['pool'] = args.pool
data['client'] = args.client
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_gcp(args):
"""Create Gcp Host"""
data = {}
data['name'] = args.name
data['credentials'] = args.credentials
data['project'] = args.project
data['zone'] = args.zone
data['_type'] = 'gcp'
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_aws(args):
"""Create Aws Host"""
data = {}
data['name'] = args.name
data['_type'] = 'aws'
data['access_key_id'] = args.access_key_id
data['access_key_secret'] = args.access_key_secret
data['region'] = args.region
data['keypair'] = args.keypair
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_ibm(args):
""""Create IBM Cloud host"""
data = {}
data['name'] = args.name
data['_type'] = 'ibm'
data['iam_api_key'] = args.iam_api_key
data['region'] = args.region
data['vpc'] = args.vpc
data['zone'] = args.zone
data['access_key_id'] = args.access_key_id
data['secret_access_key'] = args.secret_access_key
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_openstack(args):
"""Create Openstack Host"""
data = {}
data['name'] = args.name
data['_type'] = 'openstack'
data['user'] = args.user
data['password'] = args.password
data['project'] = args.project
data['domain'] = args.domain
data['auth_url'] = args.auth_url
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_kubevirt(args):
"""Create Kubevirt Host"""
data = {}
data['name'] = args.name
data['_type'] = 'kubevirt'
if args.pool is not None:
data['pool'] = args.pool
if args.token is not None:
data['token'] = args.token
if args.ca_file is not None:
data['ca_file'] = args.ca
data['multus'] = args.multus
data['cdi'] = args.cdi
if args.host is not None:
data['host'] = args.host
if args.port is not None:
data['port'] = args.port
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_vsphere(args):
"""Create Vsphere Host"""
data = {}
data['name'] = args.name
data['_type'] = 'vsphere'
data['host'] = args.host
data['user'] = args.user
data['password'] = args.password
data['datacenter'] = args.datacenter
data['cluster'] = args.cluster
if args.pool is not None:
data['pool'] = args.pool
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_container(args):
"""Create container"""
name = args.name
image = args.image
profile = args.profile
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
cont = Kcontainerconfig(config, client=args.containerclient).cont
containerprofiles = {k: v for k, v in config.profiles.items() if 'type' in v and v['type'] == 'container'}
if name is None:
name = nameutils.get_random_name()
if config.type == 'kubevirt':
name = name.replace('_', '-')
if image is not None:
profile = image
if image in containerprofiles:
pprint("Using %s as a profile" % image)
else:
containerprofiles[image] = {'image': image}
pprint("Deploying container %s from profile %s..." % (name, profile))
profile = containerprofiles[profile]
image = next((e for e in [profile.get('image'), profile.get('image')] if e is not None), None)
if image is None:
error("Missing image in profile %s. Leaving..." % profile)
sys.exit(1)
cmd = profile.get('cmd')
ports = profile.get('ports')
environment = profile.get('environment')
volumes = next((e for e in [profile.get('volumes'), profile.get('disks')] if e is not None), None)
profile.update(overrides)
params = {'name': name, 'image': image, 'ports': ports, 'volumes': volumes, 'environment': environment,
'overrides': overrides}
if cmd is not None:
params['cmds'] = [cmd]
cont.create_container(**params)
success("container %s created" % name)
return
def snapshotcreate_vm(args):
"""Create snapshot"""
snapshot = args.snapshot
name = args.name
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Creating snapshot of %s named %s..." % (name, snapshot))
result = k.snapshot(snapshot, name)
code = common.handle_response(result, name, element='', action='snapshotted')
return code
def snapshotdelete_vm(args):
"""Delete snapshot"""
snapshot = args.snapshot
name = args.name
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Deleting snapshot %s of vm %s..." % (snapshot, name))
result = k.snapshot(snapshot, name, delete=True)
code = common.handle_response(result, name, element='', action='snapshot deleted')
return code
def snapshotrevert_vm(args):
"""Revert snapshot of vm"""
snapshot = args.snapshot
name = args.name
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Reverting snapshot %s of vm %s..." % (snapshot, name))
result = k.snapshot(snapshot, name, revert=True)
code = common.handle_response(result, name, element='', action='snapshot reverted')
return code
def snapshotlist_vm(args):
"""List snapshots of vm"""
name = args.name
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Listing snapshots of %s..." % name)
snapshots = k.snapshot('', name, listing=True)
if isinstance(snapshots, dict):
error("Vm %s not found" % name)
return
else:
for snapshot in snapshots:
print(snapshot)
return
def create_bucket(args):
"""Create bucket"""
buckets = args.buckets
public = args.public
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
for bucket in buckets:
pprint("Creating bucket %s..." % bucket)
k.create_bucket(bucket, public=public)
def delete_bucket(args):
"""Delete bucket"""
yes_top = args.yes_top
yes = args.yes
if not yes and not yes_top:
common.confirm("Are you sure?")
buckets = args.buckets
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
for bucket in buckets:
pprint("Deleting bucket %s..." % bucket)
k.delete_bucket(bucket)
def list_bucket(args):
"""List buckets"""
pprint("Listing buckets...")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
buckets = k.list_buckets()
bucketstable = PrettyTable(["Bucket"])
for bucket in sorted(buckets):
bucketstable.add_row([bucket])
bucketstable.align["Bucket"] = "l"
print(bucketstable)
def list_bucketfiles(args):
"""List bucket files"""
bucket = args.bucket
pprint("Listing bucket files of bucket %s..." % bucket)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
bucketfiles = k.list_bucketfiles(bucket)
bucketfilestable = PrettyTable(["BucketFiles"])
for bucketfile in sorted(bucketfiles):
bucketfilestable.add_row([bucketfile])
bucketfilestable.align["BucketFiles"] = "l"
print(bucketfilestable)
def create_bucketfile(args):
bucket = args.bucket
temp_url = args.temp
public = args.public
path = args.path
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Uploading file %s to bucket %s..." % (path, bucket))
result = k.upload_to_bucket(bucket, path, temp_url=temp_url, public=public)
if result is not None:
pprint("bucketfile available at the following url:\n\n%s" % result)
def delete_bucketfile(args):
bucket = args.bucket
path = args.path
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Deleting file %s to bucket %s..." % (path, bucket))
k.delete_from_bucket(bucket, path)
def download_bucketfile(args):
bucket = args.bucket
path = args.path
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Downloading file %s from bucket %s..." % (path, bucket))
k.download_from_bucket(bucket, path)
def report_host(args):
"""Report info about host"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
k.report()
def switch_host(args):
"""Handle host"""
host = args.name
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
result = baseconfig.switch_host(host)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def list_keyword(args):
"""List keywords"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
keywordstable = PrettyTable(["Keyword", "Default Value"])
keywordstable.align["Client"] = "l"
keywords = baseconfig.list_keywords()
for keyword in sorted(keywords):
value = keywords[keyword]
keywordstable.add_row([keyword, value])
print(keywordstable)
return
def cli():
"""
"""
PARAMETERS_HELP = 'specify parameter or keyword for rendering (multiple can be specified)'
parser = argparse.ArgumentParser(description='Libvirt/Ovirt/Vsphere/Gcp/Aws/Openstack/Kubevirt Wrapper/Ibm Cloud')
parser.add_argument('-C', '--client')
parser.add_argument('--containerclient', help='Containerclient to use')
parser.add_argument('--dnsclient', help='Dnsclient to use')
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-n', '--namespace', help='Namespace to use. specific to kubevirt', default='default')
parser.add_argument('-r', '--region', help='Region to use. specific to aws/gcp/ibm')
parser.add_argument('-z', '--zone', help='Zone to use. specific to gcp/ibm')
subparsers = parser.add_subparsers(metavar='', title='Available Commands')
containerconsole_desc = 'Attach To Container'
containerconsole_parser = subparsers.add_parser('attach', description=containerconsole_desc,
help=containerconsole_desc)
containerconsole_parser.add_argument('name', metavar='CONTAINERNAME', nargs='?')
containerconsole_parser.set_defaults(func=console_container)
create_desc = 'Create Object'
create_parser = subparsers.add_parser('create', description=create_desc, help=create_desc, aliases=['add'])
create_subparsers = create_parser.add_subparsers(metavar='', dest='subcommand_create')
vmclone_desc = 'Clone Vm'
vmclone_epilog = None
vmclone_parser = subparsers.add_parser('clone', description=vmclone_desc, help=vmclone_desc, epilog=vmclone_epilog,
formatter_class=rawhelp)
vmclone_parser.add_argument('-b', '--base', help='Base VM', metavar='BASE')
vmclone_parser.add_argument('-f', '--full', action='store_true', help='Full Clone')
vmclone_parser.add_argument('-s', '--start', action='store_true', help='Start cloned VM')
vmclone_parser.add_argument('name', metavar='VMNAME')
vmclone_parser.set_defaults(func=clone_vm)
vmconsole_desc = 'Vm Console (vnc/spice/serial)'
vmconsole_epilog = "examples:\n%s" % vmconsole
vmconsole_parser = argparse.ArgumentParser(add_help=False)
vmconsole_parser.add_argument('-s', '--serial', action='store_true')
vmconsole_parser.add_argument('name', metavar='VMNAME', nargs='?')
vmconsole_parser.set_defaults(func=console_vm)
subparsers.add_parser('console', parents=[vmconsole_parser], description=vmconsole_desc, help=vmconsole_desc,
epilog=vmconsole_epilog, formatter_class=rawhelp)
delete_desc = 'Delete Object'
delete_parser = subparsers.add_parser('delete', description=delete_desc, help=delete_desc, aliases=['remove'])
delete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation', dest="yes_top")
delete_subparsers = delete_parser.add_subparsers(metavar='', dest='subcommand_delete')
disable_desc = 'Disable Host'
disable_parser = subparsers.add_parser('disable', description=disable_desc, help=disable_desc)
disable_subparsers = disable_parser.add_subparsers(metavar='', dest='subcommand_disable')
download_desc = 'Download Assets like Image, plans or binaries'
download_parser = subparsers.add_parser('download', description=download_desc, help=download_desc)
download_subparsers = download_parser.add_subparsers(metavar='', dest='subcommand_download')
enable_desc = 'Enable Host'
enable_parser = subparsers.add_parser('enable', description=enable_desc, help=enable_desc)
enable_subparsers = enable_parser.add_subparsers(metavar='', dest='subcommand_enable')
vmexport_desc = 'Export Vm'
vmexport_epilog = "examples:\n%s" % vmexport
vmexport_parser = subparsers.add_parser('export', description=vmexport_desc, help=vmexport_desc,
epilog=vmexport_epilog,
formatter_class=rawhelp)
vmexport_parser.add_argument('-i', '--image', help='Name for the generated image. Uses the vm name otherwise',
metavar='IMAGE')
vmexport_parser.add_argument('names', metavar='VMNAMES', nargs='*')
vmexport_parser.set_defaults(func=export_vm)
expose_desc = 'Expose Object'
expose_parser = subparsers.add_parser('expose', description=expose_desc, help=expose_desc)
expose_subparsers = expose_parser.add_subparsers(metavar='', dest='subcommand_expose')
hostlist_desc = 'List Hosts'
info_desc = 'Info Host/Kube/Plan/Vm'
info_parser = subparsers.add_parser('info', description=info_desc, help=info_desc, aliases=['show'])
info_subparsers = info_parser.add_subparsers(metavar='', dest='subcommand_info')
list_desc = 'List Object'
list_epilog = "examples:\n%s" % _list
list_parser = subparsers.add_parser('list', description=list_desc, help=list_desc, aliases=['get'],
epilog=list_epilog,
formatter_class=rawhelp)
list_subparsers = list_parser.add_subparsers(metavar='', dest='subcommand_list')
render_desc = 'Render file'
render_parser = subparsers.add_parser('render', description=render_desc, help=render_desc)
render_parser.add_argument('-f', '--inputfile', help='Input Plan/File', default='kcli_plan.yml')
render_parser.add_argument('-i', '--ignore', action='store_true', help='Ignore missing variables')
render_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
render_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE', action='append')
render_parser.set_defaults(func=render_file)
restart_desc = 'Restart Vm/Plan/Container'
restart_parser = subparsers.add_parser('restart', description=restart_desc, help=restart_desc)
restart_subparsers = restart_parser.add_subparsers(metavar='', dest='subcommand_restart')
revert_desc = 'Revert Vm/Plan Snapshot'
revert_parser = subparsers.add_parser('revert', description=revert_desc, help=revert_desc)
revert_subparsers = revert_parser.add_subparsers(metavar='', dest='subcommand_revert')
scale_desc = 'Scale Kube'
scale_parser = subparsers.add_parser('scale', description=scale_desc, help=scale_desc)
scale_subparsers = scale_parser.add_subparsers(metavar='', dest='subcommand_scale')
vmscp_desc = 'Scp Into Vm'
vmscp_epilog = None
vmscp_parser = argparse.ArgumentParser(add_help=False)
vmscp_parser.add_argument('-i', '--identityfile', help='Identity file')
vmscp_parser.add_argument('-r', '--recursive', help='Recursive', action='store_true')
vmscp_parser.add_argument('-u', '-l', '--user', help='User for ssh')
vmscp_parser.add_argument('-p', '-P', '--port', help='Port for ssh')
vmscp_parser.add_argument('source', nargs=1)
vmscp_parser.add_argument('destination', nargs=1)
vmscp_parser.set_defaults(func=scp_vm)
subparsers.add_parser('scp', parents=[vmscp_parser], description=vmscp_desc, help=vmscp_desc, epilog=vmscp_epilog,
formatter_class=rawhelp)
vmssh_desc = 'Ssh Into Vm'
vmssh_epilog = None
vmssh_parser = argparse.ArgumentParser(add_help=False)
vmssh_parser.add_argument('-D', help='Dynamic Forwarding', metavar='LOCAL')
vmssh_parser.add_argument('-L', help='Local Forwarding', metavar='LOCAL')
vmssh_parser.add_argument('-R', help='Remote Forwarding', metavar='REMOTE')
vmssh_parser.add_argument('-X', action='store_true', help='Enable X11 Forwarding')
vmssh_parser.add_argument('-Y', action='store_true', help='Enable X11 Forwarding(Insecure)')
vmssh_parser.add_argument('-i', '--identityfile', help='Identity file')
vmssh_parser.add_argument('-p', '--port', '--port', help='Port for ssh')
vmssh_parser.add_argument('-u', '-l', '--user', help='User for ssh')
vmssh_parser.add_argument('name', metavar='VMNAME', nargs='*')
vmssh_parser.set_defaults(func=ssh_vm)
subparsers.add_parser('ssh', parents=[vmssh_parser], description=vmssh_desc, help=vmssh_desc, epilog=vmssh_epilog,
formatter_class=rawhelp)
start_desc = 'Start Vm/Plan/Container'
start_epilog = "examples:\n%s" % start
start_parser = subparsers.add_parser('start', description=start_desc, help=start_desc, epilog=start_epilog,
formatter_class=rawhelp)
start_subparsers = start_parser.add_subparsers(metavar='', dest='subcommand_start')
stop_desc = 'Stop Vm/Plan/Container'
stop_parser = subparsers.add_parser('stop', description=stop_desc, help=stop_desc)
stop_subparsers = stop_parser.add_subparsers(metavar='', dest='subcommand_stop')
switch_desc = 'Switch Host'
switch_parser = subparsers.add_parser('switch', description=switch_desc, help=switch_desc)
switch_subparsers = switch_parser.add_subparsers(metavar='', dest='subcommand_switch')
sync_desc = 'Sync Host'
sync_parser = subparsers.add_parser('sync', description=sync_desc, help=sync_desc)
sync_subparsers = sync_parser.add_subparsers(metavar='', dest='subcommand_sync')
update_desc = 'Update Vm/Plan/Repo'
update_parser = subparsers.add_parser('update', description=update_desc, help=update_desc)
update_subparsers = update_parser.add_subparsers(metavar='', dest='subcommand_update')
version_desc = 'Version'
version_epilog = None
version_parser = argparse.ArgumentParser(add_help=False)
version_parser.set_defaults(func=get_version)
subparsers.add_parser('version', parents=[version_parser], description=version_desc, help=version_desc,
epilog=version_epilog, formatter_class=rawhelp)
# sub subcommands
createapp_desc = 'Create Kube Apps'
createapp_parser = create_subparsers.add_parser('app', description=createapp_desc,
help=createapp_desc, aliases=['apps'])
createapp_subparsers = createapp_parser.add_subparsers(metavar='', dest='subcommand_create_app')
appgenericcreate_desc = 'Create Kube App Generic'
appgenericcreate_epilog = None
appgenericcreate_parser = createapp_subparsers.add_parser('generic', description=appgenericcreate_desc,
help=appgenericcreate_desc,
epilog=appgenericcreate_epilog, formatter_class=rawhelp)
appgenericcreate_parser.add_argument('--outputdir', '-o', help='Output directory', metavar='OUTPUTDIR')
appgenericcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
appgenericcreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
appgenericcreate_parser.add_argument('apps', metavar='APPS', nargs='*')
appgenericcreate_parser.set_defaults(func=create_app_generic)
appopenshiftcreate_desc = 'Create Kube App Openshift'
appopenshiftcreate_epilog = "examples:\n%s" % appopenshiftcreate
appopenshiftcreate_parser = createapp_subparsers.add_parser('openshift', description=appopenshiftcreate_desc,
help=appopenshiftcreate_desc,
epilog=appopenshiftcreate_epilog,
formatter_class=rawhelp)
appopenshiftcreate_parser.add_argument('--outputdir', '-o', help='Output directory', metavar='OUTPUTDIR')
appopenshiftcreate_parser.add_argument('-P', '--param', action='append',
help=PARAMETERS_HELP, metavar='PARAM')
appopenshiftcreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
appopenshiftcreate_parser.add_argument('apps', metavar='APPS', nargs='*')
appopenshiftcreate_parser.set_defaults(func=create_app_openshift)
deleteapp_desc = 'Delete Kube App'
deleteapp_parser = delete_subparsers.add_parser('app', description=deleteapp_desc,
help=deleteapp_desc, aliases=['apps'])
deleteapp_subparsers = deleteapp_parser.add_subparsers(metavar='', dest='subcommand_delete_app')
appgenericdelete_desc = 'Delete Kube App Generic'
appgenericdelete_epilog = None
appgenericdelete_parser = deleteapp_subparsers.add_parser('generic', description=appgenericdelete_desc,
help=appgenericdelete_desc,
epilog=appgenericdelete_epilog, formatter_class=rawhelp)
appgenericdelete_parser.add_argument('-P', '--param', action='append',
help=PARAMETERS_HELP,
metavar='PARAM')
appgenericdelete_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
appgenericdelete_parser.add_argument('apps', metavar='APPS', nargs='*')
appgenericdelete_parser.set_defaults(func=delete_app_generic)
appopenshiftdelete_desc = 'Delete Kube App Openshift'
appopenshiftdelete_epilog = None
appopenshiftdelete_parser = deleteapp_subparsers.add_parser('openshift', description=appopenshiftdelete_desc,
help=appopenshiftdelete_desc,
epilog=appopenshiftdelete_epilog,
formatter_class=rawhelp)
appopenshiftdelete_parser.add_argument('-P', '--param', action='append',
help=PARAMETERS_HELP,
metavar='PARAM')
appopenshiftdelete_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
appopenshiftdelete_parser.add_argument('apps', metavar='APPS', nargs='*')
appopenshiftdelete_parser.set_defaults(func=delete_app_openshift)
appinfo_desc = 'Info App'
appinfo_parser = info_subparsers.add_parser('app', description=appinfo_desc, help=appinfo_desc)
appinfo_subparsers = appinfo_parser.add_subparsers(metavar='', dest='subcommand_info_app')
appgenericinfo_desc = 'Info Generic App'
appgenericinfo_parser = appinfo_subparsers.add_parser('generic', description=appgenericinfo_desc,
help=appgenericinfo_desc)
appgenericinfo_parser.add_argument('app', metavar='APP')
appgenericinfo_parser.set_defaults(func=info_generic_app)
appopenshiftinfo_desc = 'Info Openshift App'
appopenshiftinfo_parser = appinfo_subparsers.add_parser('openshift', description=appopenshiftinfo_desc,
help=appopenshiftinfo_desc)
appopenshiftinfo_parser.add_argument('app', metavar='APP')
appopenshiftinfo_parser.set_defaults(func=info_openshift_app)
openshiftdisconnecterinfo_desc = 'Info Openshift Disonnecter'
openshiftdisconnecterinfo_parser = info_subparsers.add_parser('disconnecter',
description=openshiftdisconnecterinfo_desc,
help=openshiftdisconnecterinfo_desc,
aliases=['openshift-disconnecter'])
openshiftdisconnecterinfo_parser.set_defaults(func=info_openshift_disconnecter)
listapp_desc = 'List Available Kube Apps'
listapp_parser = list_subparsers.add_parser('app', description=listapp_desc,
help=listapp_desc, aliases=['apps'])
listapp_subparsers = listapp_parser.add_subparsers(metavar='', dest='subcommand_list_app')
appgenericlist_desc = 'List Available Kube Apps Generic'
appgenericlist_parser = listapp_subparsers.add_parser('generic', description=appgenericlist_desc,
help=appgenericlist_desc)
appgenericlist_parser.set_defaults(func=list_apps_generic)
appopenshiftlist_desc = 'List Available Kube Components Openshift'
appopenshiftlist_parser = listapp_subparsers.add_parser('openshift', description=appopenshiftlist_desc,
help=appopenshiftlist_desc)
appopenshiftlist_parser.set_defaults(func=list_apps_openshift)
bucketcreate_desc = 'Create Bucket'
bucketcreate_epilog = None
bucketcreate_parser = create_subparsers.add_parser('bucket', description=bucketcreate_desc,
help=bucketcreate_desc, epilog=bucketcreate_epilog,
formatter_class=rawhelp)
bucketcreate_parser.add_argument('-p', '--public', action='store_true', help='Make the bucket public')
bucketcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
bucketcreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
bucketcreate_parser.add_argument('buckets', metavar='BUCKETS', nargs='+')
bucketcreate_parser.set_defaults(func=create_bucket)
bucketfilecreate_desc = 'Create Bucket file'
bucketfilecreate_parser = argparse.ArgumentParser(add_help=False)
bucketfilecreate_parser.add_argument('-p', '--public', action='store_true', help='Make the file public')
bucketfilecreate_parser.add_argument('-t', '--temp', action='store_true', help='Get temp url')
bucketfilecreate_parser.add_argument('bucket', metavar='BUCKET')
bucketfilecreate_parser.add_argument('path', metavar='PATH')
bucketfilecreate_parser.set_defaults(func=create_bucketfile)
create_subparsers.add_parser('bucket-file', parents=[bucketfilecreate_parser],
description=bucketfilecreate_desc, help=bucketfilecreate_desc)
bucketfiledelete_desc = 'Delete Bucket file'
bucketfiledelete_parser = argparse.ArgumentParser(add_help=False)
bucketfiledelete_parser.add_argument('bucket', metavar='BUCKET')
bucketfiledelete_parser.add_argument('path', metavar='PATH')
bucketfiledelete_parser.set_defaults(func=delete_bucketfile)
delete_subparsers.add_parser('bucket-file', parents=[bucketfiledelete_parser],
description=bucketfiledelete_desc, help=bucketfiledelete_desc)
bucketfiledownload_desc = 'Download Bucket file'
bucketfiledownload_parser = argparse.ArgumentParser(add_help=False)
bucketfiledownload_parser.add_argument('bucket', metavar='BUCKET')
bucketfiledownload_parser.add_argument('path', metavar='PATH')
bucketfiledownload_parser.set_defaults(func=download_bucketfile)
download_subparsers.add_parser('bucket-file', parents=[bucketfiledownload_parser],
description=bucketfiledownload_desc, help=bucketfiledownload_desc)
bucketdelete_desc = 'Delete Bucket'
bucketdelete_parser = delete_subparsers.add_parser('bucket', description=bucketdelete_desc, help=bucketdelete_desc)
bucketdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
bucketdelete_parser.add_argument('buckets', metavar='BUCKETS', nargs='+')
bucketdelete_parser.set_defaults(func=delete_bucket)
bucketlist_desc = 'List Buckets'
bucketlist_parser = list_subparsers.add_parser('bucket', description=bucketlist_desc, help=bucketlist_desc,
aliases=['buckets'])
bucketlist_parser.set_defaults(func=list_bucket)
bucketfileslist_desc = 'List Bucket files'
bucketfileslist_parser = list_subparsers.add_parser('bucket-file', description=bucketfileslist_desc,
help=bucketfileslist_desc, aliases=['bucket-files'])
bucketfileslist_parser.add_argument('bucket', metavar='BUCKET')
bucketfileslist_parser.set_defaults(func=list_bucketfiles)
cachedelete_desc = 'Delete Cache'
cachedelete_parser = delete_subparsers.add_parser('cache', description=cachedelete_desc, help=cachedelete_desc)
cachedelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
cachedelete_parser.set_defaults(func=delete_cache)
containercreate_desc = 'Create Container'
containercreate_epilog = None
containercreate_parser = create_subparsers.add_parser('container', description=containercreate_desc,
help=containercreate_desc, epilog=containercreate_epilog,
formatter_class=rawhelp)
containercreate_parser_group = containercreate_parser.add_mutually_exclusive_group(required=True)
containercreate_parser_group.add_argument('-i', '--image', help='Image to use', metavar='Image')
containercreate_parser_group.add_argument('-p', '--profile', help='Profile to use', metavar='PROFILE')
containercreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
containercreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
containercreate_parser.add_argument('name', metavar='NAME', nargs='?')
containercreate_parser.set_defaults(func=create_container)
containerdelete_desc = 'Delete Container'
containerdelete_parser = delete_subparsers.add_parser('container', description=containerdelete_desc,
help=containerdelete_desc)
containerdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
containerdelete_parser.add_argument('names', metavar='CONTAINERIMAGES', nargs='+')
containerdelete_parser.set_defaults(func=delete_container)
containerimagelist_desc = 'List Container Images'
containerimagelist_parser = list_subparsers.add_parser('container-image', description=containerimagelist_desc,
help=containerimagelist_desc,
aliases=['container-images'])
containerimagelist_parser.set_defaults(func=list_containerimage)
containerlist_desc = 'List Containers'
containerlist_parser = list_subparsers.add_parser('container', description=containerlist_desc,
help=containerlist_desc, aliases=['containers'])
containerlist_parser.add_argument('--filters', choices=('up', 'down'))
containerlist_parser.set_defaults(func=list_container)
containerprofilelist_desc = 'List Container Profiles'
containerprofilelist_parser = list_subparsers.add_parser('container-profile', description=containerprofilelist_desc,
help=containerprofilelist_desc,
aliases=['container-profiles'])
containerprofilelist_parser.add_argument('--short', action='store_true')
containerprofilelist_parser.set_defaults(func=profilelist_container)
containerrestart_desc = 'Restart Containers'
containerrestart_parser = restart_subparsers.add_parser('container', description=containerrestart_desc,
help=containerrestart_desc)
containerrestart_parser.add_argument('names', metavar='CONTAINERNAMES', nargs='*')
containerrestart_parser.set_defaults(func=restart_container)
containerstart_desc = 'Start Containers'
containerstart_parser = start_subparsers.add_parser('container', description=containerstart_desc,
help=containerstart_desc)
containerstart_parser.add_argument('names', metavar='VMNAMES', nargs='*')
containerstart_parser.set_defaults(func=start_container)
containerstop_desc = 'Stop Containers'
containerstop_parser = stop_subparsers.add_parser('container', description=containerstop_desc,
help=containerstop_desc)
containerstop_parser.add_argument('names', metavar='CONTAINERNAMES', nargs='*')
containerstop_parser.set_defaults(func=stop_container)
dnscreate_desc = 'Create Dns Entries'
dnscreate_epilog = "examples:\n%s" % dnscreate
dnscreate_parser = create_subparsers.add_parser('dns', description=dnscreate_desc, help=dnscreate_desc,
epilog=dnscreate_epilog,
formatter_class=rawhelp)
dnscreate_parser.add_argument('-a', '--alias', action='append', help='specify alias (can specify multiple)',
metavar='ALIAS')
dnscreate_parser.add_argument('-d', '--domain', help='Domain where to create entry', metavar='DOMAIN')
dnscreate_parser.add_argument('-n', '--net', help='Network where to create entry. Defaults to default',
default='default', metavar='NET')
dnscreate_parser.add_argument('-i', '--ip', help='Ip', metavar='IP')
dnscreate_parser.add_argument('names', metavar='NAMES', nargs='*')
dnscreate_parser.set_defaults(func=create_dns)
dnsdelete_desc = 'Delete Dns Entries'
dnsdelete_parser = delete_subparsers.add_parser('dns', description=dnsdelete_desc, help=dnsdelete_desc)
dnsdelete_parser.add_argument('-a', '--all', action='store_true',
help='Whether to delete the entire host block. Libvirt specific')
dnsdelete_parser.add_argument('-d', '--domain', help='Domain of the entry', metavar='DOMAIN')
dnsdelete_parser.add_argument('-n', '--net', help='Network where to delete entry. Defaults to default',
default='default', metavar='NET')
dnsdelete_parser.add_argument('names', metavar='NAMES', nargs='*')
dnsdelete_parser.set_defaults(func=delete_dns)
dnslist_desc = 'List Dns Entries'
dnslist_parser = argparse.ArgumentParser(add_help=False)
dnslist_parser.add_argument('--short', action='store_true')
dnslist_parser.add_argument('domain', metavar='DOMAIN', help='Domain where to list entry (network for libvirt)')
dnslist_parser.set_defaults(func=list_dns)
list_subparsers.add_parser('dns', parents=[dnslist_parser], description=dnslist_desc, help=dnslist_desc)
hostcreate_desc = 'Create Host'
hostcreate_epilog = "examples:\n%s" % hostcreate
hostcreate_parser = create_subparsers.add_parser('host', help=hostcreate_desc, description=hostcreate_desc,
aliases=['client'], epilog=hostcreate_epilog,
formatter_class=rawhelp)
hostcreate_subparsers = hostcreate_parser.add_subparsers(metavar='', dest='subcommand_create_host')
awshostcreate_desc = 'Create Aws Host'
awshostcreate_parser = hostcreate_subparsers.add_parser('aws', help=awshostcreate_desc,
description=awshostcreate_desc)
awshostcreate_parser.add_argument('--access_key_id', help='Access Key Id', metavar='ACCESS_KEY_ID', required=True)
awshostcreate_parser.add_argument('--access_key_secret', help='Access Key Secret', metavar='ACCESS_KEY_SECRET',
required=True)
awshostcreate_parser.add_argument('-k', '--keypair', help='Keypair', metavar='KEYPAIR', required=True)
awshostcreate_parser.add_argument('-r', '--region', help='Region', metavar='REGION', required=True)
awshostcreate_parser.add_argument('name', metavar='NAME')
awshostcreate_parser.set_defaults(func=create_host_aws)
ibmhostcreate_desc = 'Create IBM Cloud Host'
ibmhostcreate_parser = hostcreate_subparsers.add_parser('ibm', help=ibmhostcreate_desc,
description=ibmhostcreate_desc)
ibmhostcreate_parser.add_argument('--iam_api_key', help='IAM API Key', metavar='IAM_API_KEY', required=True)
ibmhostcreate_parser.add_argument('--access_key_id', help='Access Key Id', metavar='ACCESS_KEY_ID')
ibmhostcreate_parser.add_argument('--access_key_secret', help='Access Key Secret', metavar='ACCESS_KEY_SECRET')
ibmhostcreate_parser.add_argument('--vpc', help='VPC name', metavar='VPC')
ibmhostcreate_parser.add_argument('--zone', help='Zone within the region', metavar='ZONE')
ibmhostcreate_parser.add_argument('-r', '--region', help='Region', metavar='REGION', required=True)
ibmhostcreate_parser.add_argument('name', metavar='NAME')
ibmhostcreate_parser.set_defaults(func=create_host_ibm)
gcphostcreate_desc = 'Create Gcp Host'
gcphostcreate_parser = hostcreate_subparsers.add_parser('gcp', help=gcphostcreate_desc,
description=gcphostcreate_desc)
gcphostcreate_parser.add_argument('--credentials', help='Path to credentials file', metavar='credentials')
gcphostcreate_parser.add_argument('--project', help='Project', metavar='project', required=True)
gcphostcreate_parser.add_argument('--zone', help='Zone', metavar='zone', required=True)
gcphostcreate_parser.add_argument('name', metavar='NAME')
gcphostcreate_parser.set_defaults(func=create_host_gcp)
grouphostcreate_desc = 'Create Group Host'
grouphostcreate_parser = hostcreate_subparsers.add_parser('group', help=grouphostcreate_desc,
description=grouphostcreate_desc)
grouphostcreate_parser.add_argument('-a', '--algorithm', help='Algorithm. Defaults to random',
metavar='ALGORITHM', default='random')
grouphostcreate_parser.add_argument('-m', '--members', help='Members', metavar='MEMBERS', type=valid_members)
grouphostcreate_parser.add_argument('name', metavar='NAME')
grouphostcreate_parser.set_defaults(func=create_host_group)
kvmhostcreate_desc = 'Create Kvm Host'
kvmhostcreate_parser = hostcreate_subparsers.add_parser('kvm', help=kvmhostcreate_desc,
description=kvmhostcreate_desc)
kvmhostcreate_parser_group = kvmhostcreate_parser.add_mutually_exclusive_group(required=True)
kvmhostcreate_parser_group.add_argument('-H', '--host', help='Host. Defaults to localhost', metavar='HOST',
default='localhost')
kvmhostcreate_parser.add_argument('--pool', help='Pool. Defaults to default', metavar='POOL', default='default')
kvmhostcreate_parser.add_argument('-p', '--port', help='Port', metavar='PORT')
kvmhostcreate_parser.add_argument('-P', '--protocol', help='Protocol to use', default='ssh', metavar='PROTOCOL')
kvmhostcreate_parser_group.add_argument('-U', '--url', help='URL to use', metavar='URL')
kvmhostcreate_parser.add_argument('-u', '--user', help='User. Defaults to root', default='root', metavar='USER')
kvmhostcreate_parser.add_argument('name', metavar='NAME')
kvmhostcreate_parser.set_defaults(func=create_host_kvm)
kubevirthostcreate_desc = 'Create Kubevirt Host'
kubevirthostcreate_parser = hostcreate_subparsers.add_parser('kubevirt', help=kubevirthostcreate_desc,
description=kubevirthostcreate_desc)
kubevirthostcreate_parser.add_argument('--ca', help='Ca file', metavar='CA')
kubevirthostcreate_parser.add_argument('--cdi', help='Cdi Support', action='store_true', default=True)
kubevirthostcreate_parser.add_argument('-c', '--context', help='Context', metavar='CONTEXT')
kubevirthostcreate_parser.add_argument('-H', '--host', help='Api Host', metavar='HOST')
kubevirthostcreate_parser.add_argument('-p', '--pool', help='Storage Class', metavar='POOL')
kubevirthostcreate_parser.add_argument('--port', help='Api Port', metavar='HOST')
kubevirthostcreate_parser.add_argument('--token', help='Token', metavar='TOKEN')
kubevirthostcreate_parser.add_argument('--multus', help='Multus Support', action='store_true', default=True)
kubevirthostcreate_parser.add_argument('name', metavar='NAME')
kubevirthostcreate_parser.set_defaults(func=create_host_kubevirt)
openstackhostcreate_desc = 'Create Openstack Host'
openstackhostcreate_parser = hostcreate_subparsers.add_parser('openstack', help=openstackhostcreate_desc,
description=openstackhostcreate_desc)
openstackhostcreate_parser.add_argument('--auth-url', help='Auth url', metavar='AUTH_URL', required=True)
openstackhostcreate_parser.add_argument('--domain', help='Domain', metavar='DOMAIN', default='Default')
openstackhostcreate_parser.add_argument('-p', '--password', help='Password', metavar='PASSWORD', required=True)
openstackhostcreate_parser.add_argument('--project', help='Project', metavar='PROJECT', required=True)
openstackhostcreate_parser.add_argument('-u', '--user', help='User', metavar='USER', required=True)
openstackhostcreate_parser.add_argument('name', metavar='NAME')
openstackhostcreate_parser.set_defaults(func=create_host_openstack)
ovirthostcreate_desc = 'Create Ovirt Host'
ovirthostcreate_parser = hostcreate_subparsers.add_parser('ovirt', help=ovirthostcreate_desc,
description=ovirthostcreate_desc)
ovirthostcreate_parser.add_argument('--ca', help='Path to certificate file', metavar='CA')
ovirthostcreate_parser.add_argument('-c', '--cluster', help='Cluster. Defaults to Default', default='Default',
metavar='CLUSTER')
ovirthostcreate_parser.add_argument('-d', '--datacenter', help='Datacenter. Defaults to Default', default='Default',
metavar='DATACENTER')
ovirthostcreate_parser.add_argument('-H', '--host', help='Host to use', metavar='HOST', required=True)
ovirthostcreate_parser.add_argument('-o', '--org', help='Organization', metavar='ORGANIZATION', required=True)
ovirthostcreate_parser.add_argument('-p', '--password', help='Password to use', metavar='PASSWORD', required=True)
ovirthostcreate_parser.add_argument('--pool', help='Storage Domain', metavar='POOL')
ovirthostcreate_parser.add_argument('-u', '--user', help='User. Defaults to admin@internal',
metavar='USER', default='admin@internal')
ovirthostcreate_parser.add_argument('name', metavar='NAME')
ovirthostcreate_parser.set_defaults(func=create_host_ovirt)
vspherehostcreate_desc = 'Create Vsphere Host'
vspherehostcreate_parser = hostcreate_subparsers.add_parser('vsphere', help=vspherehostcreate_desc,
description=vspherehostcreate_desc)
vspherehostcreate_parser.add_argument('-c', '--cluster', help='Cluster', metavar='CLUSTER', required=True)
vspherehostcreate_parser.add_argument('-d', '--datacenter', help='Datacenter', metavar='DATACENTER', required=True)
vspherehostcreate_parser.add_argument('-H', '--host', help='Vcenter Host', metavar='HOST', required=True)
vspherehostcreate_parser.add_argument('-p', '--password', help='Password', metavar='PASSWORD', required=True)
vspherehostcreate_parser.add_argument('-u', '--user', help='User', metavar='USER', required=True)
vspherehostcreate_parser.add_argument('--pool', help='Pool', metavar='POOL')
vspherehostcreate_parser.add_argument('name', metavar='NAME')
vspherehostcreate_parser.set_defaults(func=create_host_vsphere)
hostdelete_desc = 'Delete Host'
hostdelete_parser = delete_subparsers.add_parser('host', description=hostdelete_desc, help=hostdelete_desc,
aliases=['client'])
hostdelete_parser.add_argument('name', metavar='NAME')
hostdelete_parser.set_defaults(func=delete_host)
hostdisable_desc = 'Disable Host'
hostdisable_parser = disable_subparsers.add_parser('host', description=hostdisable_desc, help=hostdisable_desc,
aliases=['client'])
hostdisable_parser.add_argument('name', metavar='NAME')
hostdisable_parser.set_defaults(func=disable_host)
hostenable_desc = 'Enable Host'
hostenable_parser = enable_subparsers.add_parser('host', description=hostenable_desc, help=hostenable_desc,
aliases=['client'])
hostenable_parser.add_argument('name', metavar='NAME')
hostenable_parser.set_defaults(func=enable_host)
hostlist_parser = list_subparsers.add_parser('host', description=hostlist_desc, help=hostlist_desc,
aliases=['hosts', 'client', 'clients'])
hostlist_parser.set_defaults(func=list_host)
hostreport_desc = 'Report Info About Host'
hostreport_parser = argparse.ArgumentParser(add_help=False)
hostreport_parser.set_defaults(func=report_host)
info_subparsers.add_parser('host', parents=[hostreport_parser], description=hostreport_desc, help=hostreport_desc,
aliases=['client'])
hostswitch_desc = 'Switch Host'
hostswitch_parser = argparse.ArgumentParser(add_help=False)
hostswitch_parser.add_argument('name', help='NAME')
hostswitch_parser.set_defaults(func=switch_host)
switch_subparsers.add_parser('host', parents=[hostswitch_parser], description=hostswitch_desc, help=hostswitch_desc,
aliases=['client'])
hostsync_desc = 'Sync Host'
hostsync_parser = sync_subparsers.add_parser('host', description=hostsync_desc, help=hostsync_desc,
aliases=['client'])
hostsync_parser.add_argument('names', help='NAMES', nargs='*')
hostsync_parser.set_defaults(func=sync_host)
imagedelete_desc = 'Delete Image'
imagedelete_help = "Image to delete"
imagedelete_parser = argparse.ArgumentParser(add_help=False)
imagedelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
imagedelete_parser.add_argument('-p', '--pool', help='Pool to use', metavar='POOL')
imagedelete_parser.add_argument('images', help=imagedelete_help, metavar='IMAGES', nargs='*')
imagedelete_parser.set_defaults(func=delete_image)
delete_subparsers.add_parser('image', parents=[imagedelete_parser], description=imagedelete_desc,
help=imagedelete_desc)
delete_subparsers.add_parser('iso', parents=[imagedelete_parser], description=imagedelete_desc,
help=imagedelete_desc)
kubecreate_desc = 'Create Kube'
kubecreate_parser = create_subparsers.add_parser('kube', description=kubecreate_desc, help=kubecreate_desc,
aliases=['cluster'])
kubecreate_subparsers = kubecreate_parser.add_subparsers(metavar='', dest='subcommand_create_kube')
kubegenericcreate_desc = 'Create Generic Kube'
kubegenericcreate_epilog = "examples:\n%s" % kubegenericcreate
kubegenericcreate_parser = argparse.ArgumentParser(add_help=False)
kubegenericcreate_parser.add_argument('-f', '--force', action='store_true', help='Delete existing cluster first')
kubegenericcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubegenericcreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubegenericcreate_parser.add_argument('cluster', metavar='CLUSTER', nargs='?', type=valid_cluster)
kubegenericcreate_parser.set_defaults(func=create_generic_kube)
kubecreate_subparsers.add_parser('generic', parents=[kubegenericcreate_parser],
description=kubegenericcreate_desc,
help=kubegenericcreate_desc,
epilog=kubegenericcreate_epilog,
formatter_class=rawhelp, aliases=['kubeadm'])
kubek3screate_desc = 'Create K3s Kube'
kubek3screate_epilog = "examples:\n%s" % kubek3screate
kubek3screate_parser = argparse.ArgumentParser(add_help=False)
kubek3screate_parser.add_argument('-f', '--force', action='store_true', help='Delete existing cluster first')
kubek3screate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubek3screate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubek3screate_parser.add_argument('cluster', metavar='CLUSTER', nargs='?', type=valid_cluster)
kubek3screate_parser.set_defaults(func=create_k3s_kube)
kubecreate_subparsers.add_parser('k3s', parents=[kubek3screate_parser],
description=kubek3screate_desc,
help=kubek3screate_desc,
epilog=kubek3screate_epilog,
formatter_class=rawhelp)
parameterhelp = "specify parameter or keyword for rendering (multiple can be specified)"
kubeopenshiftcreate_desc = 'Create Openshift Kube'
kubeopenshiftcreate_epilog = "examples:\n%s" % kubeopenshiftcreate
kubeopenshiftcreate_parser = argparse.ArgumentParser(add_help=False)
kubeopenshiftcreate_parser.add_argument('-f', '--force', action='store_true', help='Delete existing cluster first')
kubeopenshiftcreate_parser.add_argument('-P', '--param', action='append', help=parameterhelp, metavar='PARAM')
kubeopenshiftcreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubeopenshiftcreate_parser.add_argument('cluster', metavar='CLUSTER', nargs='?', type=valid_cluster)
kubeopenshiftcreate_parser.set_defaults(func=create_openshift_kube)
kubecreate_subparsers.add_parser('openshift', parents=[kubeopenshiftcreate_parser],
description=kubeopenshiftcreate_desc,
help=kubeopenshiftcreate_desc,
epilog=kubeopenshiftcreate_epilog,
formatter_class=rawhelp, aliases=['okd'])
kubedelete_desc = 'Delete Kube'
kubedelete_parser = argparse.ArgumentParser(add_help=False)
kubedelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
kubedelete_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubedelete_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubedelete_parser.add_argument('cluster', metavar='CLUSTER', nargs='?', type=valid_cluster)
kubedelete_parser.set_defaults(func=delete_kube)
delete_subparsers.add_parser('kube', parents=[kubedelete_parser], description=kubedelete_desc, help=kubedelete_desc,
aliases=['cluster'])
kubeinfo_desc = 'Info Kube'
kubeinfo_parser = info_subparsers.add_parser('kube', description=kubeinfo_desc, help=kubeinfo_desc,
aliases=['cluster'])
kubeinfo_subparsers = kubeinfo_parser.add_subparsers(metavar='', dest='subcommand_info_kube')
kubegenericinfo_desc = 'Info Generic Kube'
kubegenericinfo_parser = kubeinfo_subparsers.add_parser('generic', description=kubegenericinfo_desc,
help=kubegenericinfo_desc, aliases=['kubeadm'])
kubegenericinfo_parser.set_defaults(func=info_generic_kube)
kubek3sinfo_desc = 'Info K3s Kube'
kubek3sinfo_parser = kubeinfo_subparsers.add_parser('k3s', description=kubek3sinfo_desc, help=kubek3sinfo_desc)
kubek3sinfo_parser.set_defaults(func=info_k3s_kube)
kubeopenshiftinfo_desc = 'Info Openshift Kube'
kubeopenshiftinfo_parser = kubeinfo_subparsers.add_parser('openshift', description=kubeopenshiftinfo_desc,
help=kubeopenshiftinfo_desc, aliases=['okd'])
kubeopenshiftinfo_parser.set_defaults(func=info_openshift_kube)
kubelist_desc = 'List Kubes'
kubelist_parser = list_subparsers.add_parser('kube', description=kubelist_desc, help=kubelist_desc,
aliases=['kubes', 'cluster', 'clusters'])
kubelist_parser.set_defaults(func=list_kube)
kubescale_desc = 'Scale Kube'
kubescale_parser = scale_subparsers.add_parser('kube', description=kubescale_desc, help=kubescale_desc,
aliases=['cluster'])
kubescale_subparsers = kubescale_parser.add_subparsers(metavar='', dest='subcommand_scale_kube')
kubegenericscale_desc = 'Scale Generic Kube'
kubegenericscale_parser = argparse.ArgumentParser(add_help=False)
kubegenericscale_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubegenericscale_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubegenericscale_parser.add_argument('-w', '--workers', help='Total number of workers', type=int, default=0)
kubegenericscale_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubegenericscale_parser.set_defaults(func=scale_generic_kube)
kubescale_subparsers.add_parser('generic', parents=[kubegenericscale_parser], description=kubegenericscale_desc,
help=kubegenericscale_desc, aliases=['kubeadm'])
kubek3sscale_desc = 'Scale K3s Kube'
kubek3sscale_parser = argparse.ArgumentParser(add_help=False)
kubek3sscale_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubek3sscale_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubek3sscale_parser.add_argument('-w', '--workers', help='Total number of workers', type=int, default=0)
kubek3sscale_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubek3sscale_parser.set_defaults(func=scale_k3s_kube)
kubescale_subparsers.add_parser('k3s', parents=[kubek3sscale_parser], description=kubek3sscale_desc,
help=kubek3sscale_desc)
parameterhelp = "specify parameter or keyword for rendering (multiple can be specified)"
kubeopenshiftscale_desc = 'Scale Openshift Kube'
kubeopenshiftscale_parser = argparse.ArgumentParser(add_help=False)
kubeopenshiftscale_parser.add_argument('-P', '--param', action='append', help=parameterhelp, metavar='PARAM')
kubeopenshiftscale_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubeopenshiftscale_parser.add_argument('-w', '--workers', help='Total number of workers', type=int, default=0)
kubeopenshiftscale_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubeopenshiftscale_parser.set_defaults(func=scale_openshift_kube)
kubescale_subparsers.add_parser('openshift', parents=[kubeopenshiftscale_parser],
description=kubeopenshiftscale_desc,
help=kubeopenshiftscale_desc, aliases=['okd'])
kubeupdate_desc = 'Update Kube'
kubeupdate_parser = update_subparsers.add_parser('kube', description=kubeupdate_desc, help=kubeupdate_desc,
aliases=['cluster'])
kubeupdate_subparsers = kubeupdate_parser.add_subparsers(metavar='', dest='subcommand_update_kube')
kubegenericupdate_desc = 'Update Generic Kube'
kubegenericupdate_parser = argparse.ArgumentParser(add_help=False)
kubegenericupdate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubegenericupdate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubegenericupdate_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubegenericupdate_parser.set_defaults(func=update_generic_kube)
kubeupdate_subparsers.add_parser('generic', parents=[kubegenericupdate_parser], description=kubegenericupdate_desc,
help=kubegenericupdate_desc, aliases=['kubeadm'])
kubek3supdate_desc = 'Update K3s Kube'
kubek3supdate_parser = argparse.ArgumentParser(add_help=False)
kubek3supdate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubek3supdate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubek3supdate_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubek3supdate_parser.set_defaults(func=update_k3s_kube)
kubeupdate_subparsers.add_parser('k3s', parents=[kubek3supdate_parser], description=kubek3supdate_desc,
help=kubek3supdate_desc)
parameterhelp = "specify parameter or keyword for rendering (multiple can be specified)"
kubeopenshiftupdate_desc = 'Update Openshift Kube'
kubeopenshiftupdate_parser = argparse.ArgumentParser(add_help=False)
kubeopenshiftupdate_parser.add_argument('-P', '--param', action='append', help=parameterhelp, metavar='PARAM')
kubeopenshiftupdate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubeopenshiftupdate_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubeopenshiftupdate_parser.set_defaults(func=update_openshift_kube)
kubeupdate_subparsers.add_parser('openshift', parents=[kubeopenshiftupdate_parser],
description=kubeopenshiftupdate_desc,
help=kubeopenshiftupdate_desc, aliases=['okd'])
lbcreate_desc = 'Create Load Balancer'
lbcreate_parser = create_subparsers.add_parser('lb', description=lbcreate_desc, help=lbcreate_desc,
aliases=['loadbalancer'])
lbcreate_parser.add_argument('--checkpath', default='/index.html', help="Path to check. Defaults to /index.html")
lbcreate_parser.add_argument('--checkport', default=80, help="Port to check. Defaults to 80")
lbcreate_parser.add_argument('--domain', help='Domain to create a dns entry associated to the load balancer')
lbcreate_parser.add_argument('-i', '--internal', action='store_true')
lbcreate_parser.add_argument('-p', '--ports', default='443', help='Load Balancer Ports. Defaults to 443')
lbcreate_parser.add_argument('-v', '--vms', help='Vms to add to the pool. Can also be a list of ips')
lbcreate_parser.add_argument('name', metavar='NAME', nargs='?')
lbcreate_parser.set_defaults(func=create_lb)
lbdelete_desc = 'Delete Load Balancer'
lbdelete_parser = delete_subparsers.add_parser('lb', description=lbdelete_desc, help=lbdelete_desc,
aliases=['loadbalancer'])
lbdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
lbdelete_parser.add_argument('name', metavar='NAME')
lbdelete_parser.set_defaults(func=delete_lb)
lblist_desc = 'List Load Balancers'
lblist_parser = list_subparsers.add_parser('lb', description=lblist_desc, help=lblist_desc,
aliases=['loadbalancers', 'lbs'])
lblist_parser.add_argument('--short', action='store_true')
lblist_parser.set_defaults(func=list_lb)
profilecreate_desc = 'Create Profile'
profilecreate_parser = argparse.ArgumentParser(add_help=False)
profilecreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (can specify multiple)',
metavar='PARAM')
profilecreate_parser.add_argument('profile', metavar='PROFILE')
profilecreate_parser.set_defaults(func=create_profile)
create_subparsers.add_parser('profile', parents=[profilecreate_parser], description=profilecreate_desc,
help=profilecreate_desc)
profileinfo_desc = 'Info Profile'
profileinfo_parser = info_subparsers.add_parser('profile', description=profileinfo_desc, help=profileinfo_desc)
profileinfo_parser.add_argument('profile', metavar='PROFILE')
profileinfo_parser.set_defaults(func=info_profile)
profilelist_desc = 'List Profiles'
profilelist_parser = list_subparsers.add_parser('profile', description=profilelist_desc, help=profilelist_desc,
aliases=['profiles'])
profilelist_parser.add_argument('--short', action='store_true')
profilelist_parser.set_defaults(func=list_profile)
profileupdate_desc = 'Update Profile'
profileupdate_parser = update_subparsers.add_parser('profile', description=profileupdate_desc,
help=profileupdate_desc)
profileupdate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
profileupdate_parser.add_argument('profile', metavar='PROFILE', nargs='?')
profileupdate_parser.set_defaults(func=update_profile)
flavorlist_desc = 'List Flavors'
flavorlist_parser = list_subparsers.add_parser('flavor', description=flavorlist_desc, help=flavorlist_desc,
aliases=['flavors'])
flavorlist_parser.add_argument('--short', action='store_true')
flavorlist_parser.set_defaults(func=list_flavor)
isolist_desc = 'List Isos'
isolist_parser = list_subparsers.add_parser('iso', description=isolist_desc, help=isolist_desc, aliases=['isos'])
isolist_parser.set_defaults(func=list_iso)
keywordlist_desc = 'List Keyword'
keywordlist_parser = list_subparsers.add_parser('keyword', description=keywordlist_desc, help=keywordlist_desc,
aliases=['keywords'])
keywordlist_parser.set_defaults(func=list_keyword)
networklist_desc = 'List Networks'
networklist_parser = list_subparsers.add_parser('network', description=networklist_desc, help=networklist_desc,
aliases=['networks'])
networklist_parser.add_argument('--short', action='store_true')
networklist_parser.add_argument('-s', '--subnets', action='store_true')
networklist_parser.set_defaults(func=list_network)
networkcreate_desc = 'Create Network'
networkcreate_parser = create_subparsers.add_parser('network', description=networkcreate_desc,
help=networkcreate_desc)
networkcreate_parser.add_argument('-i', '--isolated', action='store_true', help='Isolated Network')
networkcreate_parser.add_argument('-c', '--cidr', help='Cidr of the net', metavar='CIDR')
networkcreate_parser.add_argument('-d', '--dual', help='Cidr of dual net', metavar='DUAL')
networkcreate_parser.add_argument('--nodhcp', action='store_true', help='Disable dhcp on the net')
networkcreate_parser.add_argument('--domain', help='DNS domain. Defaults to network name')
networkcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (can specify multiple)',
metavar='PARAM')
networkcreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
networkcreate_parser.add_argument('name', metavar='NETWORK')
networkcreate_parser.set_defaults(func=create_network)
networkdelete_desc = 'Delete Network'
networkdelete_parser = delete_subparsers.add_parser('network', description=networkdelete_desc,
help=networkdelete_desc)
networkdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
networkdelete_parser.add_argument('names', metavar='NETWORKS', nargs='+')
networkdelete_parser.set_defaults(func=delete_network)
disconnectercreate_desc = 'Create a disconnecter vm for openshift'
disconnectercreate_epilog = "examples:\n%s" % disconnectercreate
disconnectercreate_parser = argparse.ArgumentParser(add_help=False)
disconnectercreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (can specify multiple)',
metavar='PARAM')
disconnectercreate_parser.add_argument('plan', metavar='PLAN', help='Plan', nargs='?')
disconnectercreate_parser.set_defaults(func=create_openshift_disconnecter)
create_subparsers.add_parser('openshift-disconnecter', parents=[disconnectercreate_parser],
description=disconnectercreate_desc, help=disconnectercreate_desc,
epilog=disconnectercreate_epilog, formatter_class=rawhelp)
isocreate_desc = 'Create an iso ignition for baremetal install'
isocreate_epilog = "examples:\n%s" % isocreate
isocreate_parser = argparse.ArgumentParser(add_help=False)
isocreate_parser.add_argument('-f', '--ignitionfile', help='Ignition file')
isocreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (can specify multiple)',
metavar='PARAM')
isocreate_parser.add_argument('cluster', metavar='CLUSTER', help='Cluster')
isocreate_parser.set_defaults(func=create_openshift_iso)
create_subparsers.add_parser('openshift-iso', parents=[isocreate_parser], description=isocreate_desc,
help=isocreate_desc, epilog=isocreate_epilog, formatter_class=rawhelp)
pipelinecreate_desc = 'Create Pipeline'
pipelinecreate_parser = create_subparsers.add_parser('pipeline', description=pipelinecreate_desc,
help=pipelinecreate_desc)
pipelinecreate_subparsers = pipelinecreate_parser.add_subparsers(metavar='', dest='subcommand_create_pipeline')
githubpipelinecreate_desc = 'Create Github Pipeline'
githubpipelinecreate_parser = pipelinecreate_subparsers.add_parser('github', description=githubpipelinecreate_desc,
help=githubpipelinecreate_desc, aliases=['gha'])
githubpipelinecreate_parser.add_argument('-f', '--inputfile', help='Input Plan (or script) file')
githubpipelinecreate_parser.add_argument('-k', '--kube', action='store_true', help='Create kube pipeline')
githubpipelinecreate_parser.add_argument('-s', '--script', action='store_true', help='Create script pipeline')
githubpipelinecreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)',
metavar='PARAM')
githubpipelinecreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
githubpipelinecreate_parser.set_defaults(func=create_pipeline_github)
jenkinspipelinecreate_desc = 'Create Jenkins Pipeline'
jenkinspipelinecreate_parser = pipelinecreate_subparsers.add_parser('jenkins',
description=jenkinspipelinecreate_desc,
help=jenkinspipelinecreate_desc)
jenkinspipelinecreate_parser.add_argument('-f', '--inputfile', help='Input Plan file')
jenkinspipelinecreate_parser.add_argument('-k', '--kube', action='store_true', help='Create kube pipeline')
jenkinspipelinecreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)',
metavar='PARAM')
jenkinspipelinecreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
jenkinspipelinecreate_parser.set_defaults(func=create_pipeline_jenkins)
plancreate_desc = 'Create Plan'
plancreate_epilog = "examples:\n%s" % plancreate
plancreate_parser = create_subparsers.add_parser('plan', description=plancreate_desc, help=plancreate_desc,
epilog=plancreate_epilog,
formatter_class=rawhelp)
plancreate_parser.add_argument('-A', '--ansible', help='Generate ansible inventory', action='store_true')
plancreate_parser.add_argument('-u', '--url', help='Url for plan', metavar='URL', type=valid_url)
plancreate_parser.add_argument('-p', '--path', help='Path where to download plans. Defaults to plan',
metavar='PATH')
plancreate_parser.add_argument('-c', '--container', action='store_true', help='Handle container')
plancreate_parser.add_argument('--force', action='store_true', help='Delete existing vms first')
plancreate_parser.add_argument('-f', '--inputfile', help='Input Plan file')
plancreate_parser.add_argument('-k', '--skippre', action='store_true', help='Skip pre script')
plancreate_parser.add_argument('-z', '--skippost', action='store_true', help='Skip post script')
plancreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
plancreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
plancreate_parser.add_argument('plan', metavar='PLAN', nargs='?')
plancreate_parser.set_defaults(func=create_plan)
plandelete_desc = 'Delete Plan'
plandelete_parser = delete_subparsers.add_parser('plan', description=plandelete_desc, help=plandelete_desc)
plandelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
plandelete_parser.add_argument('plan', metavar='PLAN')
plandelete_parser.set_defaults(func=delete_plan)
plansnapshotdelete_desc = 'Delete Plan Snapshot'
plansnapshotdelete_parser = delete_subparsers.add_parser('plan-snapshot', description=plansnapshotdelete_desc,
help=plansnapshotdelete_desc)
plansnapshotdelete_parser.add_argument('-p', '--plan', help='plan name', required=True, metavar='PLAN')
plansnapshotdelete_parser.add_argument('snapshot', metavar='SNAPSHOT')
plansnapshotdelete_parser.set_defaults(func=delete_snapshot_plan)
planexpose_desc = 'Expose plan'
planexpose_epilog = None
planexpose_parser = argparse.ArgumentParser(add_help=False)
planexpose_parser.add_argument('-f', '--inputfile', help='Input Plan file')
planexpose_parser.add_argument('-i', '--installermode', action='store_true', help='Filter by installervm')
planexpose_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
planexpose_parser.add_argument('--port', help='Port where to listen', type=int, default=9000, metavar='PORT')
planexpose_parser.add_argument('plan', metavar='PLAN', nargs='?')
planexpose_parser.set_defaults(func=expose_plan)
expose_subparsers.add_parser('plan', parents=[planexpose_parser], description=vmssh_desc, help=planexpose_desc,
epilog=planexpose_epilog, formatter_class=rawhelp)
planinfo_desc = 'Info Plan'
planinfo_epilog = "examples:\n%s" % planinfo
planinfo_parser = info_subparsers.add_parser('plan', description=planinfo_desc, help=planinfo_desc,
epilog=planinfo_epilog,
formatter_class=rawhelp)
planinfo_parser.add_argument('--doc', action='store_true', help='Render info as markdown table')
planinfo_parser.add_argument('-f', '--inputfile', help='Input Plan file')
planinfo_parser.add_argument('-p', '--path', help='Path where to download plans. Defaults to plan', metavar='PATH')
planinfo_parser.add_argument('-q', '--quiet', action='store_true', help='Provide parameter file output')
planinfo_parser.add_argument('-u', '--url', help='Url for plan', metavar='URL', type=valid_url)
planinfo_parser.set_defaults(func=info_plan)
planlist_desc = 'List Plans'
planlist_parser = list_subparsers.add_parser('plan', description=planlist_desc, help=planlist_desc,
aliases=['plans'])
planlist_parser.set_defaults(func=list_plan)
planrestart_desc = 'Restart Plan'
planrestart_parser = restart_subparsers.add_parser('plan', description=planrestart_desc, help=planrestart_desc)
planrestart_parser.add_argument('-s', '--soft', action='store_true', help='Do a soft stop')
planrestart_parser.add_argument('plan', metavar='PLAN')
planrestart_parser.set_defaults(func=restart_plan)
plandatacreate_desc = 'Create Cloudinit/Ignition from plan file'
plandatacreate_epilog = "examples:\n%s" % plandatacreate
plandatacreate_parser = create_subparsers.add_parser('plan-data', description=plandatacreate_desc,
help=plandatacreate_desc, epilog=plandatacreate_epilog,
formatter_class=rawhelp)
plandatacreate_parser.add_argument('-f', '--inputfile', help='Input Plan file', default='kcli_plan.yml')
plandatacreate_parser.add_argument('--outputdir', '-o', help='Output directory', metavar='OUTPUTDIR')
plandatacreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
plandatacreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
plandatacreate_parser.add_argument('name', metavar='VMNAME', nargs='?', type=valid_fqdn)
plandatacreate_parser.set_defaults(func=create_plandata)
plantemplatecreate_desc = 'Create plan template'
plantemplatecreate_epilog = "examples:\n%s" % plantemplatecreate
plantemplatecreate_parser = create_subparsers.add_parser('plan-template', description=plantemplatecreate_desc,
help=plantemplatecreate_desc,
epilog=plantemplatecreate_epilog, formatter_class=rawhelp)
plantemplatecreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)',
metavar='PARAM')
plantemplatecreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
plantemplatecreate_parser.add_argument('-x', '--skipfiles', action='store_true', help='Skip files in assets')
plantemplatecreate_parser.add_argument('-y', '--skipscripts', action='store_true', help='Skip scripts in assets')
plantemplatecreate_parser.add_argument('directory', metavar='DIR')
plantemplatecreate_parser.set_defaults(func=create_plantemplate)
planrevert_desc = 'Revert Snapshot Of Plan'
planrevert_parser = revert_subparsers.add_parser('plan-snapshot', description=planrevert_desc, help=planrevert_desc,
aliases=['plan'])
planrevert_parser.add_argument('-p', '--plan', help='Plan name', required=True, metavar='PLANNAME')
planrevert_parser.add_argument('snapshot', metavar='SNAPSHOT')
planrevert_parser.set_defaults(func=revert_snapshot_plan)
plansnapshotcreate_desc = 'Create Plan Snapshot'
plansnapshotcreate_parser = create_subparsers.add_parser('plan-snapshot', description=plansnapshotcreate_desc,
help=plansnapshotcreate_desc)
plansnapshotcreate_parser.add_argument('-p', '--plan', help='plan name', required=True, metavar='PLAN')
plansnapshotcreate_parser.add_argument('snapshot', metavar='SNAPSHOT')
plansnapshotcreate_parser.set_defaults(func=create_snapshot_plan)
planstart_desc = 'Start Plan'
planstart_parser = start_subparsers.add_parser('plan', description=planstart_desc, help=planstart_desc)
planstart_parser.add_argument('plan', metavar='PLAN')
planstart_parser.set_defaults(func=start_plan)
planstop_desc = 'Stop Plan'
planstop_parser = stop_subparsers.add_parser('plan', description=planstop_desc, help=planstop_desc)
planstop_parser.add_argument('-s', '--soft', action='store_true', help='Do a soft stop')
planstop_parser.add_argument('plan', metavar='PLAN')
planstop_parser.set_defaults(func=stop_plan)
planupdate_desc = 'Update Plan'
planupdate_parser = update_subparsers.add_parser('plan', description=planupdate_desc, help=planupdate_desc)
planupdate_parser.add_argument('--autostart', action='store_true', help='Set autostart for vms of the plan')
planupdate_parser.add_argument('--noautostart', action='store_true', help='Remove autostart for vms of the plan')
planupdate_parser.add_argument('-u', '--url', help='Url for plan', metavar='URL', type=valid_url)
planupdate_parser.add_argument('-p', '--path', help='Path where to download plans. Defaults to plan',
metavar='PATH')
planupdate_parser.add_argument('-c', '--container', action='store_true', help='Handle container')
planupdate_parser.add_argument('-f', '--inputfile', help='Input Plan file')
planupdate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
planupdate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
planupdate_parser.add_argument('plan', metavar='PLAN')
planupdate_parser.set_defaults(func=update_plan)
playbookcreate_desc = 'Create playbook from plan'
playbookcreate_parser = create_subparsers.add_parser('playbook', description=playbookcreate_desc,
help=playbookcreate_desc)
playbookcreate_parser.add_argument('-f', '--inputfile', help='Input Plan/File', default='kcli_plan.yml')
playbookcreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
playbookcreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
playbookcreate_parser.add_argument('-s', '--store', action='store_true', help="Store results in files")
playbookcreate_parser.set_defaults(func=create_playbook)
poolcreate_desc = 'Create Pool'
poolcreate_parser = create_subparsers.add_parser('pool', description=poolcreate_desc, help=poolcreate_desc)
poolcreate_parser.add_argument('-f', '--full', action='store_true')
poolcreate_parser.add_argument('-t', '--pooltype', help='Type of the pool', choices=('dir', 'lvm', 'zfs'),
default='dir')
poolcreate_parser.add_argument('-p', '--path', help='Path of the pool', metavar='PATH')
poolcreate_parser.add_argument('--thinpool', help='Existing thin pool to use with lvm', metavar='THINPOOL')
poolcreate_parser.add_argument('pool')
poolcreate_parser.set_defaults(func=create_pool)
pooldelete_desc = 'Delete Pool'
pooldelete_parser = delete_subparsers.add_parser('pool', description=pooldelete_desc, help=pooldelete_desc)
pooldelete_parser.add_argument('-d', '--delete', action='store_true')
pooldelete_parser.add_argument('-f', '--full', action='store_true')
pooldelete_parser.add_argument('-p', '--path', help='Path of the pool', metavar='PATH')
pooldelete_parser.add_argument('--thinpool', help='Existing thin pool to use with lvm', metavar='THINPOOL')
pooldelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
pooldelete_parser.add_argument('pool')
pooldelete_parser.set_defaults(func=delete_pool)
poollist_desc = 'List Pools'
poollist_parser = list_subparsers.add_parser('pool', description=poollist_desc, help=poollist_desc,
aliases=['pools'])
poollist_parser.add_argument('--short', action='store_true')
poollist_parser.set_defaults(func=list_pool)
profiledelete_desc = 'Delete Profile'
profiledelete_help = "Profile to delete"
profiledelete_parser = argparse.ArgumentParser(add_help=False)
profiledelete_parser.add_argument('profile', help=profiledelete_help, metavar='PROFILE')
profiledelete_parser.set_defaults(func=delete_profile)
delete_subparsers.add_parser('profile', parents=[profiledelete_parser], description=profiledelete_desc,
help=profiledelete_desc)
productcreate_desc = 'Create Product'
productcreate_parser = create_subparsers.add_parser('product', description=productcreate_desc,
help=productcreate_desc)
productcreate_parser.add_argument('-g', '--group', help='Group to use as a name during deployment', metavar='GROUP')
productcreate_parser.add_argument('-l', '--latest', action='store_true', help='Grab latest version of the plans')
productcreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering within scripts.'
'Can be repeated several times', metavar='PARAM')
productcreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
productcreate_parser.add_argument('-r', '--repo',
help='Repo to use, if deploying a product present in several repos',
metavar='REPO')
productcreate_parser.add_argument('product', metavar='PRODUCT')
productcreate_parser.set_defaults(func=create_product)
productinfo_desc = 'Info Of Product'
productinfo_epilog = "examples:\n%s" % productinfo
productinfo_parser = argparse.ArgumentParser(add_help=False)
productinfo_parser.set_defaults(func=info_product)
productinfo_parser.add_argument('-g', '--group', help='Only Display products of the indicated group',
metavar='GROUP')
productinfo_parser.add_argument('-r', '--repo', help='Only Display products of the indicated repository',
metavar='REPO')
productinfo_parser.add_argument('product', metavar='PRODUCT')
info_subparsers.add_parser('product', parents=[productinfo_parser], description=productinfo_desc,
help=productinfo_desc,
epilog=productinfo_epilog, formatter_class=rawhelp)
productlist_desc = 'List Products'
productlist_parser = list_subparsers.add_parser('product', description=productlist_desc, help=productlist_desc,
aliases=['products'])
productlist_parser.add_argument('-g', '--group', help='Only Display products of the indicated group',
metavar='GROUP')
productlist_parser.add_argument('-r', '--repo', help='Only Display products of the indicated repository',
metavar='REPO')
productlist_parser.add_argument('-s', '--search', help='Search matching products')
productlist_parser.set_defaults(func=list_product)
repocreate_desc = 'Create Repo'
repocreate_epilog = "examples:\n%s" % repocreate
repocreate_parser = create_subparsers.add_parser('repo', description=repocreate_desc, help=repocreate_desc,
epilog=repocreate_epilog,
formatter_class=rawhelp)
repocreate_parser.add_argument('-u', '--url', help='URL of the repo', metavar='URL', type=valid_url)
repocreate_parser.add_argument('repo')
repocreate_parser.set_defaults(func=create_repo)
repodelete_desc = 'Delete Repo'
repodelete_parser = delete_subparsers.add_parser('repo', description=repodelete_desc, help=repodelete_desc)
repodelete_parser.add_argument('repo')
repodelete_parser.set_defaults(func=delete_repo)
repolist_desc = 'List Repos'
repolist_parser = list_subparsers.add_parser('repo', description=repolist_desc, help=repolist_desc,
aliases=['repos'])
repolist_parser.set_defaults(func=list_repo)
repoupdate_desc = 'Update Repo'
repoupdate_parser = update_subparsers.add_parser('repo', description=repoupdate_desc, help=repoupdate_desc)
repoupdate_parser.add_argument('repo')
repoupdate_parser.set_defaults(func=update_repo)
coreosinstallerdownload_desc = 'Download Coreos Installer'
coreosinstallerdownload_parser = argparse.ArgumentParser(add_help=False)
coreosinstallerdownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)',
metavar='PARAM')
coreosinstallerdownload_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
coreosinstallerdownload_parser.set_defaults(func=download_coreos_installer)
download_subparsers.add_parser('coreos-installer', parents=[coreosinstallerdownload_parser],
description=coreosinstallerdownload_desc,
help=coreosinstallerdownload_desc)
imagedownload_desc = 'Download Cloud Image'
imagedownload_help = "Image to download. Choose between \n%s" % '\n'.join(IMAGES.keys())
imagedownload_parser = argparse.ArgumentParser(add_help=False)
imagedownload_parser.add_argument('-a', '--arch', help='Target arch', choices=['x86_64', 'aarch64'],
default='x86_64')
imagedownload_parser.add_argument('-c', '--cmd', help='Extra command to launch after downloading', metavar='CMD')
imagedownload_parser.add_argument('-o', '--openstack', help='Use openstack variant (kvm specific)',
action='store_true')
imagedownload_parser.add_argument('-p', '--pool', help='Pool to use. Defaults to default', metavar='POOL')
imagedownload_parser.add_argument('-u', '--url', help='Url to use', metavar='URL', type=valid_url)
imagedownload_parser.add_argument('--size', help='Disk size (kubevirt specific)', type=int, metavar='SIZE')
imagedownload_parser.add_argument('-s', '--skip-profile', help='Skip Profile update', action='store_true')
imagedownload_parser.add_argument('image', help=imagedownload_help, metavar='IMAGE')
imagedownload_parser.set_defaults(func=download_image)
download_subparsers.add_parser('image', parents=[imagedownload_parser], description=imagedownload_desc,
help=imagedownload_desc)
isodownload_desc = 'Download Iso'
isodownload_help = "Iso name"
isodownload_parser = argparse.ArgumentParser(add_help=False)
isodownload_parser.add_argument('-p', '--pool', help='Pool to use. Defaults to default', metavar='POOL')
isodownload_parser.add_argument('-u', '--url', help='Url to use', metavar='URL', required=True, type=valid_url)
isodownload_parser.add_argument('iso', help=isodownload_help, metavar='ISO', nargs='?')
isodownload_parser.set_defaults(func=download_iso)
download_subparsers.add_parser('iso', parents=[isodownload_parser], description=isodownload_desc,
help=isodownload_desc)
okddownload_desc = 'Download Okd Installer'
okddownload_parser = argparse.ArgumentParser(add_help=False)
okddownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
okddownload_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
okddownload_parser.set_defaults(func=download_okd_installer)
download_subparsers.add_parser('okd-installer', parents=[okddownload_parser],
description=okddownload_desc,
help=okddownload_desc)
openshiftdownload_desc = 'Download Openshift Installer'
openshiftdownload_parser = argparse.ArgumentParser(add_help=False)
openshiftdownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
openshiftdownload_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
openshiftdownload_parser.set_defaults(func=download_openshift_installer)
download_subparsers.add_parser('openshift-installer', parents=[openshiftdownload_parser],
description=openshiftdownload_desc,
help=openshiftdownload_desc)
helmdownload_desc = 'Download Helm'
helmdownload_parser = argparse.ArgumentParser(add_help=False)
helmdownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
helmdownload_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
helmdownload_parser.set_defaults(func=download_helm)
download_subparsers.add_parser('helm', parents=[helmdownload_parser],
description=helmdownload_desc,
help=helmdownload_desc)
kubectldownload_desc = 'Download Kubectl'
kubectldownload_parser = argparse.ArgumentParser(add_help=False)
kubectldownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
kubectldownload_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
kubectldownload_parser.set_defaults(func=download_kubectl)
download_subparsers.add_parser('kubectl', parents=[kubectldownload_parser],
description=kubectldownload_desc,
help=kubectldownload_desc)
ocdownload_desc = 'Download Oc'
ocdownload_parser = argparse.ArgumentParser(add_help=False)
ocdownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
ocdownload_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
ocdownload_parser.set_defaults(func=download_oc)
download_subparsers.add_parser('oc', parents=[ocdownload_parser],
description=ocdownload_desc,
help=ocdownload_desc)
plandownload_desc = 'Download Plan'
plandownload_parser = argparse.ArgumentParser(add_help=False)
plandownload_parser.add_argument('-u', '--url', help='Url to use', metavar='URL', required=True, type=valid_url)
plandownload_parser.add_argument('plan', metavar='PLAN', nargs='?')
plandownload_parser.set_defaults(func=download_plan)
download_subparsers.add_parser('plan', parents=[plandownload_parser], description=plandownload_desc,
help=plandownload_desc)
imagelist_desc = 'List Images'
imagelist_parser = list_subparsers.add_parser('image', description=imagelist_desc, help=imagelist_desc,
aliases=['images'])
imagelist_parser.set_defaults(func=list_image)
vmcreate_desc = 'Create Vm'
vmcreate_epilog = "examples:\n%s" % vmcreate
vmcreate_parser = argparse.ArgumentParser(add_help=False)
vmcreate_parser.add_argument('-p', '--profile', help='Profile to use', metavar='PROFILE')
vmcreate_parser.add_argument('--console', help='Directly switch to console after creation', action='store_true')
vmcreate_parser.add_argument('-c', '--count', help='How many vms to create', type=int, default=1, metavar='COUNT')
vmcreate_parser.add_argument('-i', '--image', help='Image to use', metavar='IMAGE')
vmcreate_parser.add_argument('--profilefile', help='File to load profiles from', metavar='PROFILEFILE')
vmcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
vmcreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
vmcreate_parser.add_argument('-s', '--serial', help='Directly switch to serial console after creation',
action='store_true')
vmcreate_parser.add_argument('-w', '--wait', action='store_true', help='Wait for cloudinit to finish')
vmcreate_parser.add_argument('name', metavar='VMNAME', nargs='?', type=valid_fqdn)
vmcreate_parser.set_defaults(func=create_vm)
create_subparsers.add_parser('vm', parents=[vmcreate_parser], description=vmcreate_desc, help=vmcreate_desc,
epilog=vmcreate_epilog, formatter_class=rawhelp)
vmdelete_desc = 'Delete Vm'
vmdelete_parser = argparse.ArgumentParser(add_help=False)
vmdelete_parser.add_argument('-c', '--count', help='How many vms to delete', type=int, default=1, metavar='COUNT')
vmdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
vmdelete_parser.add_argument('-s', '--snapshots', action='store_true', help='Remove snapshots if needed')
vmdelete_parser.add_argument('names', metavar='VMNAMES', nargs='*')
vmdelete_parser.set_defaults(func=delete_vm)
delete_subparsers.add_parser('vm', parents=[vmdelete_parser], description=vmdelete_desc, help=vmdelete_desc)
vmdatacreate_desc = 'Create Cloudinit/Ignition for a single vm'
vmdatacreate_epilog = "examples:\n%s" % vmdatacreate
vmdatacreate_parser = create_subparsers.add_parser('vm-data', description=vmdatacreate_desc,
help=vmdatacreate_desc, epilog=vmdatacreate_epilog,
formatter_class=rawhelp)
vmdatacreate_parser.add_argument('-i', '--image', help='Image to use', metavar='IMAGE')
vmdatacreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
vmdatacreate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
vmdatacreate_parser.add_argument('name', metavar='VMNAME', nargs='?', type=valid_fqdn)
vmdatacreate_parser.set_defaults(func=create_vmdata)
vmdiskadd_desc = 'Add Disk To Vm'
diskcreate_epilog = "examples:\n%s" % diskcreate
vmdiskadd_parser = argparse.ArgumentParser(add_help=False)
vmdiskadd_parser.add_argument('-s', '--size', type=int, help='Size of the disk to add, in GB', metavar='SIZE',
default=10)
vmdiskadd_parser.add_argument('-i', '--image', help='Name or Path of a Image', metavar='IMAGE')
vmdiskadd_parser.add_argument('--interface', default='virtio', help='Disk Interface. Defaults to virtio',
metavar='INTERFACE')
vmdiskadd_parser.add_argument('-n', '--novm', action='store_true', help='Dont attach to any vm')
vmdiskadd_parser.add_argument('-p', '--pool', default='default', help='Pool', metavar='POOL')
vmdiskadd_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (can specify multiple)',
metavar='PARAM')
vmdiskadd_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
vmdiskadd_parser.add_argument('name', metavar='VMNAME')
vmdiskadd_parser.set_defaults(func=create_vmdisk)
create_subparsers.add_parser('vm-disk', parents=[vmdiskadd_parser], description=vmdiskadd_desc, help=vmdiskadd_desc,
aliases=['disk'], epilog=diskcreate_epilog,
formatter_class=rawhelp)
vmdiskdelete_desc = 'Delete Vm Disk'
diskdelete_epilog = "examples:\n%s" % diskdelete
vmdiskdelete_parser = argparse.ArgumentParser(add_help=False)
vmdiskdelete_parser.add_argument('-n', '--novm', action='store_true', help='Dont try to locate vm')
vmdiskdelete_parser.add_argument('--vm', help='Name of the vm', metavar='VMNAME')
vmdiskdelete_parser.add_argument('-p', '--pool', default='default', help='Pool', metavar='POOL')
vmdiskdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
vmdiskdelete_parser.add_argument('diskname', metavar='DISKNAME')
vmdiskdelete_parser.set_defaults(func=delete_vmdisk)
delete_subparsers.add_parser('vm-disk', parents=[vmdiskdelete_parser], description=vmdiskdelete_desc,
aliases=['disk'], help=vmdiskdelete_desc, epilog=diskdelete_epilog,
formatter_class=rawhelp)
vmdisklist_desc = 'List All Vm Disks'
vmdisklist_parser = argparse.ArgumentParser(add_help=False)
vmdisklist_parser.set_defaults(func=list_vmdisk)
list_subparsers.add_parser('disk', parents=[vmdisklist_parser], description=vmdisklist_desc,
help=vmdisklist_desc, aliases=['disks'])
vminfo_desc = 'Info Of Vms'
vminfo_parser = argparse.ArgumentParser(add_help=False)
vminfo_parser.add_argument('-f', '--fields', help='Display Corresponding list of fields,'
'separated by a comma', metavar='FIELDS')
vminfo_parser.add_argument('-o', '--output', choices=['plain', 'yaml'], help='Format of the output')
vminfo_parser.add_argument('-v', '--values', action='store_true', help='Only report values')
vminfo_parser.add_argument('names', help='VMNAMES', nargs='*')
vminfo_parser.set_defaults(func=info_vm)
info_subparsers.add_parser('vm', parents=[vminfo_parser], description=vminfo_desc, help=vminfo_desc)
vmlist_desc = 'List Vms'
vmlist_parser = argparse.ArgumentParser(add_help=False)
vmlist_parser.add_argument('--filters', choices=('up', 'down'))
vmlist_parser.set_defaults(func=list_vm)
list_subparsers.add_parser('vm', parents=[vmlist_parser], description=vmlist_desc, help=vmlist_desc,
aliases=['vms'])
create_vmnic_desc = 'Add Nic To Vm'
create_vmnic_epilog = "examples:\n%s" % niccreate
create_vmnic_parser = argparse.ArgumentParser(add_help=False)
create_vmnic_parser.add_argument('-n', '--network', help='Network', metavar='NETWORK')
create_vmnic_parser.add_argument('name', metavar='VMNAME')
create_vmnic_parser.set_defaults(func=create_vmnic)
create_subparsers.add_parser('vm-nic', parents=[create_vmnic_parser], description=create_vmnic_desc,
help=create_vmnic_desc, aliases=['nic'],
epilog=create_vmnic_epilog, formatter_class=rawhelp)
delete_vmnic_desc = 'Delete Nic From vm'
delete_vmnic_epilog = "examples:\n%s" % nicdelete
delete_vmnic_parser = argparse.ArgumentParser(add_help=False)
delete_vmnic_parser.add_argument('-i', '--interface', help='Interface name', metavar='INTERFACE')
delete_vmnic_parser.add_argument('-n', '--network', help='Network', metavar='NETWORK')
delete_vmnic_parser.add_argument('name', metavar='VMNAME')
delete_vmnic_parser.set_defaults(func=delete_vmnic)
delete_subparsers.add_parser('vm-nic', parents=[delete_vmnic_parser], description=delete_vmnic_desc,
help=delete_vmnic_desc, aliases=['nic'],
epilog=delete_vmnic_epilog, formatter_class=rawhelp)
vmrestart_desc = 'Restart Vms'
vmrestart_parser = restart_subparsers.add_parser('vm', description=vmrestart_desc, help=vmrestart_desc)
vmrestart_parser.add_argument('names', metavar='VMNAMES', nargs='*')
vmrestart_parser.set_defaults(func=restart_vm)
vmsnapshotcreate_desc = 'Create Snapshot Of Vm'
vmsnapshotcreate_parser = create_subparsers.add_parser('vm-snapshot', description=vmsnapshotcreate_desc,
help=vmsnapshotcreate_desc, aliases=['snapshot'])
vmsnapshotcreate_parser.add_argument('-n', '--name', help='vm name', required=True, metavar='VMNAME')
vmsnapshotcreate_parser.add_argument('snapshot')
vmsnapshotcreate_parser.set_defaults(func=snapshotcreate_vm)
vmsnapshotdelete_desc = 'Delete Snapshot Of Vm'
vmsnapshotdelete_parser = delete_subparsers.add_parser('vm-snapshot', description=vmsnapshotdelete_desc,
help=vmsnapshotdelete_desc)
vmsnapshotdelete_parser.add_argument('-n', '--name', help='vm name', required=True, metavar='VMNAME')
vmsnapshotdelete_parser.add_argument('snapshot')
vmsnapshotdelete_parser.set_defaults(func=snapshotdelete_vm)
vmsnapshotlist_desc = 'List Snapshots Of Vm'
vmsnapshotlist_parser = list_subparsers.add_parser('vm-snapshot', description=vmsnapshotlist_desc,
help=vmsnapshotlist_desc, aliases=['vm-snapshots'])
vmsnapshotlist_parser.add_argument('-n', '--name', help='vm name', required=True, metavar='VMNAME')
vmsnapshotlist_parser.set_defaults(func=snapshotlist_vm)
vmsnapshotrevert_desc = 'Revert Snapshot Of Vm'
vmsnapshotrevert_parser = revert_subparsers.add_parser('vm-snapshot', description=vmsnapshotrevert_desc,
help=vmsnapshotrevert_desc, aliases=['vm'])
vmsnapshotrevert_parser.add_argument('-n', '--name', help='vm name', required=True, metavar='VMNAME')
vmsnapshotrevert_parser.add_argument('snapshot')
vmsnapshotrevert_parser.set_defaults(func=snapshotrevert_vm)
vmstart_desc = 'Start Vms'
vmstart_parser = argparse.ArgumentParser(add_help=False)
vmstart_parser.add_argument('names', metavar='VMNAMES', nargs='*')
vmstart_parser.set_defaults(func=start_vm)
start_subparsers.add_parser('vm', parents=[vmstart_parser], description=vmstart_desc, help=vmstart_desc)
vmstop_desc = 'Stop Vms'
vmstop_parser = argparse.ArgumentParser(add_help=False)
vmstop_parser.add_argument('-s', '--soft', action='store_true', help='Do a soft stop')
vmstop_parser.add_argument('names', metavar='VMNAMES', nargs='*')
vmstop_parser.set_defaults(func=stop_vm)
stop_subparsers.add_parser('vm', parents=[vmstop_parser], description=vmstop_desc, help=vmstop_desc)
vmupdate_desc = 'Update Vm\'s Ip, Memory Or Numcpus'
vmupdate_parser = update_subparsers.add_parser('vm', description=vmupdate_desc, help=vmupdate_desc)
vmupdate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
vmupdate_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')
vmupdate_parser.add_argument('names', help='VMNAMES', nargs='*')
vmupdate_parser.set_defaults(func=update_vm)
argcomplete.autocomplete(parser)
if len(sys.argv) == 1 or (len(sys.argv) == 3 and sys.argv[1] == '-C'):
parser.print_help()
sys.exit(0)
args = parser.parse_args()
if not hasattr(args, 'func'):
for attr in dir(args):
if attr.startswith('subcommand_') and getattr(args, attr) is None:
split = attr.split('_')
if len(split) == 2:
subcommand = split[1]
get_subparser_print_help(parser, subcommand)
elif len(split) == 3:
subcommand = split[1]
subsubcommand = split[2]
subparser = get_subparser(parser, subcommand)
get_subparser_print_help(subparser, subsubcommand)
sys.exit(0)
sys.exit(0)
elif args.func.__name__ == 'vmcreate' and args.client is not None and ',' in args.client:
args.client = random.choice(args.client.split(','))
pprint("Selecting %s for creation" % args.client)
args.func(args)
if __name__ == '__main__':
cli()
| 48.887546
| 120
| 0.651456
|
3bf7f25653e6dd2a5bfde349116f7a0420fc5fc1
| 37,698
|
py
|
Python
|
zerver/tests/test_event_queue.py
|
Signior-X/zulip
|
7f0381d4c7f17706e7840ca54c171db940f0d885
|
[
"Apache-2.0"
] | 1
|
2020-07-07T05:28:25.000Z
|
2020-07-07T05:28:25.000Z
|
zerver/tests/test_event_queue.py
|
Signior-X/zulip
|
7f0381d4c7f17706e7840ca54c171db940f0d885
|
[
"Apache-2.0"
] | 10
|
2018-11-26T23:16:45.000Z
|
2019-02-18T23:17:03.000Z
|
zerver/tests/test_event_queue.py
|
Signior-X/zulip
|
7f0381d4c7f17706e7840ca54c171db940f0d885
|
[
"Apache-2.0"
] | 2
|
2017-08-22T12:52:22.000Z
|
2019-03-29T05:20:32.000Z
|
import time
from typing import Any, Callable, Collection, Dict, List
from unittest import mock
import orjson
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import do_change_subscription_property, do_mute_topic
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import HostRequestMock, mock_queue_publish
from zerver.lib.user_groups import create_user_group, remove_user_from_user_group
from zerver.models import Recipient, Stream, Subscription, UserProfile, get_stream
from zerver.tornado.event_queue import (
ClientDescriptor,
allocate_client_descriptor,
get_client_descriptor,
maybe_enqueue_notifications,
missedmessage_hook,
persistent_queue_filename,
process_notification,
)
from zerver.tornado.views import cleanup_event_queue, get_events
class MissedMessageNotificationsTest(ZulipTestCase):
"""Tests the logic for when missed-message notifications
should be triggered, based on user settings"""
def test_maybe_enqueue_notifications(self) -> None:
# We've already tested the "when to send notifications" logic as part of the
# notification_data module.
# This test is for verifying whether `maybe_enqueue_notifications` returns the
# `already_notified` data correctly.
params = self.get_maybe_enqueue_notifications_parameters(
message_id=1, user_id=1, acting_user_id=2
)
with mock_queue_publish(
"zerver.tornado.event_queue.queue_json_publish"
) as mock_queue_json_publish:
notified = maybe_enqueue_notifications(**params)
mock_queue_json_publish.assert_not_called()
with mock_queue_publish(
"zerver.tornado.event_queue.queue_json_publish"
) as mock_queue_json_publish:
params["user_notifications_data"] = self.create_user_notifications_data_object(
user_id=1, pm_push_notify=True, pm_email_notify=True
)
notified = maybe_enqueue_notifications(**params)
self.assertTrue(mock_queue_json_publish.call_count, 2)
queues_pushed = [entry[0][0] for entry in mock_queue_json_publish.call_args_list]
self.assertIn("missedmessage_mobile_notifications", queues_pushed)
self.assertIn("missedmessage_emails", queues_pushed)
self.assertTrue(notified["email_notified"])
self.assertTrue(notified["push_notified"])
with mock_queue_publish(
"zerver.tornado.event_queue.queue_json_publish"
) as mock_queue_json_publish:
params = self.get_maybe_enqueue_notifications_parameters(
message_id=1,
acting_user_id=2,
user_id=3,
mention_push_notify=True,
mention_email_notify=True,
mentioned_user_group_id=33,
)
notified = maybe_enqueue_notifications(**params)
self.assertTrue(mock_queue_json_publish.call_count, 2)
push_notice = mock_queue_json_publish.call_args_list[0][0][1]
self.assertEqual(push_notice["mentioned_user_group_id"], 33)
email_notice = mock_queue_json_publish.call_args_list[1][0][1]
self.assertEqual(email_notice["mentioned_user_group_id"], 33)
def tornado_call(
self,
view_func: Callable[[HttpRequest, UserProfile], HttpResponse],
user_profile: UserProfile,
post_data: Dict[str, Any],
) -> HttpResponse:
request = HostRequestMock(post_data, user_profile)
return view_func(request, user_profile)
def test_stream_watchers(self) -> None:
"""
We used to have a bug with stream_watchers, where we set their flags to
None.
"""
cordelia = self.example_user("cordelia")
hamlet = self.example_user("hamlet")
realm = hamlet.realm
stream_name = "Denmark"
self.unsubscribe(hamlet, stream_name)
queue_data = dict(
all_public_streams=True,
apply_markdown=True,
client_gravatar=True,
client_type_name="home grown API program",
event_types=["message"],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
self.send_stream_message(cordelia, stream_name)
self.assert_length(client.event_queue.contents(), 1)
# This next line of code should silently succeed and basically do
# nothing under the covers. This test is here to prevent a bug
# from re-appearing.
missedmessage_hook(
user_profile_id=hamlet.id,
client=client,
last_for_client=True,
)
def test_end_to_end_missedmessage_hook(self) -> None:
"""Tests what arguments missedmessage_hook passes into maybe_enqueue_notifications.
Combined with the previous test, this ensures that the missedmessage_hook is correct"""
user_profile = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
user_profile.enable_online_push_notifications = False
user_profile.save()
iago = self.example_user("iago")
# Fetch the Denmark stream for testing
stream = get_stream("Denmark", user_profile.realm)
sub = Subscription.objects.get(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
recipient__type_id=stream.id,
)
self.login_user(user_profile)
def change_subscription_properties(
user_profile: UserProfile,
stream: Stream,
sub: Subscription,
properties: Dict[str, bool],
) -> None:
for property_name, value in properties.items():
do_change_subscription_property(
user_profile, sub, stream, property_name, value, acting_user=None
)
def allocate_event_queue() -> ClientDescriptor:
result = self.tornado_call(
get_events,
user_profile,
{
"apply_markdown": orjson.dumps(True).decode(),
"client_gravatar": orjson.dumps(True).decode(),
"event_types": orjson.dumps(["message"]).decode(),
"user_client": "website",
"dont_block": orjson.dumps(True).decode(),
},
)
self.assert_json_success(result)
queue_id = orjson.loads(result.content)["queue_id"]
return get_client_descriptor(queue_id)
def destroy_event_queue(queue_id: str) -> None:
result = self.tornado_call(cleanup_event_queue, user_profile, {"queue_id": queue_id})
self.assert_json_success(result)
def assert_maybe_enqueue_notifications_call_args(
args_dict: Collection[Any],
message_id: int,
**kwargs: Any,
) -> None:
expected_args_dict = self.get_maybe_enqueue_notifications_parameters(
user_id=user_profile.id,
acting_user_id=iago.id,
message_id=message_id,
**kwargs,
)
self.assertEqual(args_dict, expected_args_dict)
client_descriptor = allocate_event_queue()
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
# To test the missed_message hook, we first need to send a message
msg_id = self.send_stream_message(iago, "Denmark")
# Verify that nothing happens if you call it as not the
# "last client descriptor", in which case the function
# short-circuits, since the `missedmessage_hook` handler
# for garbage-collection is only for the user's last queue.
missedmessage_hook(user_profile.id, client_descriptor, False)
mock_enqueue.assert_not_called()
# Now verify that we called the appropriate enqueue function
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
already_notified={"email_notified": False, "push_notified": False},
)
destroy_event_queue(client_descriptor.event_queue.id)
# Test the hook with a private message; this should trigger notifications
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_personal_message(iago, user_profile)
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
pm_email_notify=True,
pm_push_notify=True,
already_notified={"email_notified": True, "push_notified": True},
)
destroy_event_queue(client_descriptor.event_queue.id)
# If `enable_offline_email_notifications` is disabled, email otifications shouldn't
# be sent even for PMs
user_profile.enable_offline_email_notifications = False
user_profile.save()
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_personal_message(iago, user_profile)
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
pm_email_notify=False,
pm_push_notify=True,
already_notified={"email_notified": False, "push_notified": True},
)
destroy_event_queue(client_descriptor.event_queue.id)
user_profile.enable_offline_email_notifications = True
user_profile.save()
# Test the hook with a mention; this should trigger notifications
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_stream_message(
self.example_user("iago"), "Denmark", content="@**King Hamlet** what's up?"
)
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
mention_push_notify=True,
mention_email_notify=True,
already_notified={"email_notified": True, "push_notified": True},
)
destroy_event_queue(client_descriptor.event_queue.id)
# If `enable_offline_push_notifications` is disabled, push otifications shouldn't
# be sent even for mentions
user_profile.enable_offline_push_notifications = False
user_profile.save()
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_personal_message(iago, user_profile)
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
pm_email_notify=True,
pm_push_notify=False,
already_notified={"email_notified": True, "push_notified": False},
)
destroy_event_queue(client_descriptor.event_queue.id)
user_profile.enable_offline_push_notifications = True
user_profile.save()
# Test the hook with a wildcard mention; this should trigger notifications
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_stream_message(iago, "Denmark", content="@**all** what's up?")
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
wildcard_mention_notify=True,
already_notified={"email_notified": True, "push_notified": True},
)
destroy_event_queue(client_descriptor.event_queue.id)
# Wildcard mentions in muted streams don't notify.
change_subscription_properties(user_profile, stream, sub, {"is_muted": True})
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_stream_message(iago, "Denmark", content="@**all** what's up?")
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
wildcard_mention_notify=False,
message_id=msg_id,
already_notified={"email_notified": False, "push_notified": False},
)
destroy_event_queue(client_descriptor.event_queue.id)
change_subscription_properties(user_profile, stream, sub, {"is_muted": False})
# With wildcard_mentions_notify=False, we treat the user as not mentioned.
user_profile.wildcard_mentions_notify = False
user_profile.save()
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_stream_message(iago, "Denmark", content="@**all** what's up?")
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
wildcard_mention_notify=False,
already_notified={"email_notified": False, "push_notified": False},
)
destroy_event_queue(client_descriptor.event_queue.id)
user_profile.wildcard_mentions_notify = True
user_profile.save()
# If wildcard_mentions_notify=True for a stream and False for a user, we treat the user
# as mentioned for that stream.
user_profile.wildcard_mentions_notify = False
sub.wildcard_mentions_notify = True
user_profile.save()
sub.save()
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_stream_message(iago, "Denmark", content="@**all** what's up?")
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
wildcard_mention_notify=True,
already_notified={"email_notified": True, "push_notified": True},
)
destroy_event_queue(client_descriptor.event_queue.id)
user_profile.wildcard_mentions_notify = True
sub.wildcard_mentions_notify = None
user_profile.save()
sub.save()
# Test with a user group mention
hamlet_and_cordelia = create_user_group(
"hamlet_and_cordelia", [user_profile, cordelia], cordelia.realm
)
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_stream_message(
iago, "Denmark", content="@*hamlet_and_cordelia* what's up?"
)
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
mention_push_notify=True,
mention_email_notify=True,
mentioned_user_group_id=hamlet_and_cordelia.id,
already_notified={"email_notified": True, "push_notified": True},
)
destroy_event_queue(client_descriptor.event_queue.id)
remove_user_from_user_group(user_profile, hamlet_and_cordelia)
remove_user_from_user_group(cordelia, hamlet_and_cordelia)
# Test the hook with a stream message with stream_push_notify
change_subscription_properties(user_profile, stream, sub, {"push_notifications": True})
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_stream_message(iago, "Denmark", content="what's up everyone?")
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
stream_push_notify=True,
stream_email_notify=False,
already_notified={"email_notified": False, "push_notified": True},
)
destroy_event_queue(client_descriptor.event_queue.id)
# Test the hook with a stream message with stream_email_notify
client_descriptor = allocate_event_queue()
change_subscription_properties(
user_profile, stream, sub, {"push_notifications": False, "email_notifications": True}
)
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_stream_message(iago, "Denmark", content="what's up everyone?")
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
stream_push_notify=False,
stream_email_notify=True,
already_notified={"email_notified": True, "push_notified": False},
)
destroy_event_queue(client_descriptor.event_queue.id)
# Test the hook with stream message with stream_push_notify on
# a muted topic, which we should not push notify for
client_descriptor = allocate_event_queue()
change_subscription_properties(
user_profile, stream, sub, {"push_notifications": True, "email_notifications": False}
)
self.assertTrue(client_descriptor.event_queue.empty())
do_mute_topic(user_profile, stream, "mutingtest")
msg_id = self.send_stream_message(
iago,
"Denmark",
content="what's up everyone?",
topic_name="mutingtest",
)
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
already_notified={"email_notified": False, "push_notified": False},
)
destroy_event_queue(client_descriptor.event_queue.id)
# Test the hook with stream message with stream_email_notify on
# a muted stream, which we should not push notify for
client_descriptor = allocate_event_queue()
change_subscription_properties(
user_profile, stream, sub, {"push_notifications": False, "email_notifications": True}
)
self.assertTrue(client_descriptor.event_queue.empty())
change_subscription_properties(user_profile, stream, sub, {"is_muted": True})
msg_id = self.send_stream_message(iago, "Denmark", content="what's up everyone?")
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
already_notified={"email_notified": False, "push_notified": False},
)
destroy_event_queue(client_descriptor.event_queue.id)
# Clean up the state we just changed (not necessary unless we add more test code below)
change_subscription_properties(
user_profile, stream, sub, {"push_notifications": True, "is_muted": False}
)
# Test the hook when the sender has been muted
result = self.api_post(user_profile, f"/api/v1/users/me/muted_users/{iago.id}")
self.assert_json_success(result)
client_descriptor = allocate_event_queue()
self.assertTrue(client_descriptor.event_queue.empty())
msg_id = self.send_personal_message(iago, user_profile)
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
missedmessage_hook(user_profile.id, client_descriptor, True)
mock_enqueue.assert_called_once()
args_dict = mock_enqueue.call_args_list[0][1]
assert_maybe_enqueue_notifications_call_args(
args_dict=args_dict,
message_id=msg_id,
sender_is_muted=True,
pm_email_notify=True,
pm_push_notify=True,
already_notified={"email_notified": False, "push_notified": False},
)
destroy_event_queue(client_descriptor.event_queue.id)
result = self.api_delete(user_profile, f"/api/v1/users/me/muted_users/{iago.id}")
self.assert_json_success(result)
class FileReloadLogicTest(ZulipTestCase):
def test_persistent_queue_filename(self) -> None:
with self.settings(
JSON_PERSISTENT_QUEUE_FILENAME_PATTERN="/home/zulip/tornado/event_queues%s.json"
):
self.assertEqual(
persistent_queue_filename(9800), "/home/zulip/tornado/event_queues.json"
)
self.assertEqual(
persistent_queue_filename(9800, last=True),
"/home/zulip/tornado/event_queues.json.last",
)
with self.settings(
JSON_PERSISTENT_QUEUE_FILENAME_PATTERN="/home/zulip/tornado/event_queues%s.json",
TORNADO_PROCESSES=4,
):
self.assertEqual(
persistent_queue_filename(9800), "/home/zulip/tornado/event_queues.9800.json"
)
self.assertEqual(
persistent_queue_filename(9800, last=True),
"/home/zulip/tornado/event_queues.9800.last.json",
)
class PruneInternalDataTest(ZulipTestCase):
def test_prune_internal_data(self) -> None:
user_profile = self.example_user("hamlet")
queue_data = dict(
all_public_streams=True,
apply_markdown=True,
client_gravatar=True,
client_type_name="website",
event_types=["message"],
last_connection_time=time.time(),
queue_timeout=600,
realm_id=user_profile.realm.id,
user_profile_id=user_profile.id,
)
client = allocate_client_descriptor(queue_data)
self.assertTrue(client.event_queue.empty())
self.send_stream_message(
self.example_user("iago"), "Denmark", content="@**King Hamlet** what's up?"
)
self.send_stream_message(
self.example_user("iago"), "Denmark", content="@**all** what's up?"
)
self.send_personal_message(self.example_user("iago"), user_profile)
events = client.event_queue.contents()
self.assert_length(events, 3)
self.assertFalse("internal_data" in events[0])
self.assertFalse("internal_data" in events[1])
self.assertFalse("internal_data" in events[2])
events = client.event_queue.contents(include_internal_data=True)
self.assertTrue("internal_data" in events[0])
self.assertTrue("internal_data" in events[1])
self.assertTrue("internal_data" in events[2])
class EventQueueTest(ZulipTestCase):
def get_client_descriptor(self) -> ClientDescriptor:
hamlet = self.example_user("hamlet")
realm = hamlet.realm
queue_data = dict(
all_public_streams=False,
apply_markdown=False,
client_gravatar=True,
client_type_name="website",
event_types=None,
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
return client
def verify_to_dict_end_to_end(self, client: ClientDescriptor) -> None:
client_dict = client.to_dict()
new_client = ClientDescriptor.from_dict(client_dict)
self.assertEqual(client.to_dict(), new_client.to_dict())
client_dict = client.to_dict()
del client_dict["event_queue"]["newest_pruned_id"]
new_client = ClientDescriptor.from_dict(client_dict)
self.assertEqual(client_dict, new_client.to_dict())
def test_one_event(self) -> None:
client = self.get_client_descriptor()
queue = client.event_queue
in_dict = dict(
type="arbitrary",
x="foo",
y=42,
z=False,
timestamp="1",
)
out_dict = dict(
id=0,
**in_dict,
)
queue.push(in_dict)
self.assertFalse(queue.empty())
self.verify_to_dict_end_to_end(client)
self.assertEqual(queue.contents(), [out_dict])
self.verify_to_dict_end_to_end(client)
def test_event_collapsing(self) -> None:
client = self.get_client_descriptor()
queue = client.event_queue
"""
The update_message_flags events are special, because
they can be collapsed together. Given two umfe's, we:
* use the latest timestamp
* concatenate the messages
"""
def umfe(timestamp: int, messages: List[int]) -> Dict[str, Any]:
return dict(
type="update_message_flags",
operation="add",
flag="read",
all=False,
timestamp=timestamp,
messages=messages,
)
events = [
umfe(timestamp=1, messages=[101]),
umfe(timestamp=2, messages=[201, 202]),
dict(type="unknown"),
dict(type="restart", server_generation="1"),
umfe(timestamp=3, messages=[301, 302, 303]),
dict(type="restart", server_generation="2"),
umfe(timestamp=4, messages=[401, 402, 403, 404]),
]
for event in events:
queue.push(event)
self.verify_to_dict_end_to_end(client)
self.assertEqual(
queue.contents(),
[
dict(id=2, type="unknown"),
dict(id=5, type="restart", server_generation="2"),
dict(
id=6,
type="update_message_flags",
operation="add",
flag="read",
all=False,
timestamp=4,
messages=[101, 201, 202, 301, 302, 303, 401, 402, 403, 404],
),
],
)
"""
Note that calling queue.contents() has the side
effect that we will no longer be able to collapse
the previous events, so the next event will just
get added to the queue, rather than collapsed.
"""
queue.push(
umfe(timestamp=5, messages=[501, 502, 503, 504, 505]),
)
self.assertEqual(
queue.contents(),
[
dict(id=2, type="unknown"),
dict(id=5, type="restart", server_generation="2"),
dict(
id=6,
type="update_message_flags",
operation="add",
flag="read",
all=False,
timestamp=4,
messages=[101, 201, 202, 301, 302, 303, 401, 402, 403, 404],
),
dict(
id=7,
type="update_message_flags",
operation="add",
flag="read",
all=False,
timestamp=5,
messages=[501, 502, 503, 504, 505],
),
],
)
def test_flag_add_collapsing(self) -> None:
client = self.get_client_descriptor()
queue = client.event_queue
queue.push(
{
"type": "update_message_flags",
"flag": "read",
"operation": "add",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1",
}
)
self.verify_to_dict_end_to_end(client)
queue.push(
{
"type": "update_message_flags",
"flag": "read",
"all": False,
"operation": "add",
"messages": [5, 6],
"timestamp": "1",
}
)
self.verify_to_dict_end_to_end(client)
self.assertEqual(
queue.contents(),
[
{
"id": 1,
"type": "update_message_flags",
"all": False,
"flag": "read",
"operation": "add",
"messages": [1, 2, 3, 4, 5, 6],
"timestamp": "1",
}
],
)
self.verify_to_dict_end_to_end(client)
def test_flag_remove_collapsing(self) -> None:
client = self.get_client_descriptor()
queue = client.event_queue
queue.push(
{
"type": "update_message_flags",
"flag": "collapsed",
"operation": "remove",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1",
}
)
self.verify_to_dict_end_to_end(client)
queue.push(
{
"type": "update_message_flags",
"flag": "collapsed",
"all": False,
"operation": "remove",
"messages": [5, 6],
"timestamp": "1",
}
)
self.verify_to_dict_end_to_end(client)
self.assertEqual(
queue.contents(),
[
{
"id": 1,
"type": "update_message_flags",
"all": False,
"flag": "collapsed",
"operation": "remove",
"messages": [1, 2, 3, 4, 5, 6],
"timestamp": "1",
}
],
)
self.verify_to_dict_end_to_end(client)
def test_collapse_event(self) -> None:
"""
This mostly focues on the internals of
how we store "virtual_events" that we
can collapse if subsequent events are
of the same form. See the code in
EventQueue.push for more context.
"""
client = self.get_client_descriptor()
queue = client.event_queue
queue.push({"type": "restart", "server_generation": 1, "timestamp": "1"})
# Verify the server_generation event is stored as a virtual event
self.assertEqual(
queue.virtual_events,
{"restart": {"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"}},
)
# And we can reconstruct newest_pruned_id etc.
self.verify_to_dict_end_to_end(client)
queue.push({"type": "unknown", "timestamp": "1"})
self.assertEqual(list(queue.queue), [{"id": 1, "type": "unknown", "timestamp": "1"}])
self.assertEqual(
queue.virtual_events,
{"restart": {"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"}},
)
# And we can still reconstruct newest_pruned_id etc. correctly
self.verify_to_dict_end_to_end(client)
# Verify virtual events are converted to real events by .contents()
self.assertEqual(
queue.contents(),
[
{"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"},
{"id": 1, "type": "unknown", "timestamp": "1"},
],
)
# And now verify to_dict after pruning
queue.prune(0)
self.verify_to_dict_end_to_end(client)
queue.prune(1)
self.verify_to_dict_end_to_end(client)
class SchemaMigrationsTests(ZulipTestCase):
def test_reformat_legacy_send_message_event(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
othello = self.example_user("othello")
old_format_event = dict(
type="message",
message=1,
message_dict={},
presence_idle_user_ids=[hamlet.id, othello.id],
)
old_format_users = [
dict(
id=hamlet.id,
flags=["mentioned"],
mentioned=True,
online_push_enabled=True,
stream_push_notify=False,
stream_email_notify=True,
wildcard_mention_notify=False,
sender_is_muted=False,
),
dict(
id=cordelia.id,
flags=["wildcard_mentioned"],
mentioned=False,
online_push_enabled=True,
stream_push_notify=True,
stream_email_notify=False,
wildcard_mention_notify=True,
sender_is_muted=False,
),
]
notice = dict(event=old_format_event, users=old_format_users)
expected_current_format_users = [
dict(
id=hamlet.id,
flags=["mentioned"],
),
dict(
id=cordelia.id,
flags=["wildcard_mentioned"],
),
]
expected_current_format_event = dict(
type="message",
message=1,
message_dict={},
presence_idle_user_ids=[hamlet.id, othello.id],
online_push_user_ids=[hamlet.id, cordelia.id],
stream_push_user_ids=[cordelia.id],
stream_email_user_ids=[hamlet.id],
wildcard_mention_user_ids=[cordelia.id],
muted_sender_user_ids=[],
)
with mock.patch("zerver.tornado.event_queue.process_message_event") as m:
process_notification(notice)
m.assert_called_once()
self.assertDictEqual(m.call_args[0][0], expected_current_format_event)
self.assertEqual(m.call_args[0][1], expected_current_format_users)
| 41.155022
| 98
| 0.612048
|
79b8d8200432ce95ac878bfcbfc5eb342ccbdc76
| 1,814
|
py
|
Python
|
src/collectors/loadavg/loadavg.py
|
lreed/Diamond
|
2772cdbc27a7ba3fedeb6d4241aeee9d2fcbdb80
|
[
"MIT"
] | 1
|
2015-03-13T00:29:53.000Z
|
2015-03-13T00:29:53.000Z
|
src/collectors/loadavg/loadavg.py
|
jwilder/Diamond
|
d5c59eaa4441ac671e3ba7b922e89650b8d9e52a
|
[
"MIT"
] | null | null | null |
src/collectors/loadavg/loadavg.py
|
jwilder/Diamond
|
d5c59eaa4441ac671e3ba7b922e89650b8d9e52a
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Uses /proc/loadavg to collect data on load average
#### Dependencies
* /proc/loadavg
"""
import diamond.collector
import re
import os
from diamond.collector import str_to_bool
_RE = re.compile(r'([\d.]+) ([\d.]+) ([\d.]+) (\d+)/(\d+)')
class LoadAverageCollector(diamond.collector.Collector):
PROC = '/proc/loadavg'
def get_default_config_help(self):
config_help = super(LoadAverageCollector,
self).get_default_config_help()
config_help.update({
'simple': 'Only collect the 1 minute load average'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(LoadAverageCollector, self).get_default_config()
config.update({
'enabled': 'True',
'path': 'loadavg',
'method': 'Threaded',
'simple': 'False'
})
return config
def collect(self):
if not os.access(self.PROC, os.R_OK):
self.log.error("Can not read path %s" % self.PROC)
return None
file = open(self.PROC)
for line in file:
match = _RE.match(line)
if match:
if not str_to_bool(self.config['simple']):
self.publish_gauge('01', float(match.group(1)), 2)
self.publish_gauge('05', float(match.group(2)), 2)
self.publish_gauge('15', float(match.group(3)), 2)
else:
self.publish_gauge('load', float(match.group(1)), 2)
self.publish_gauge('processes_running', int(match.group(4)))
self.publish_gauge('processes_total', int(match.group(5)))
file.close()
| 28.793651
| 76
| 0.553473
|
0fcf5b0cb2c136431e751c9afba7e7adc3a1ac5c
| 2,305
|
py
|
Python
|
api/app/main/model/user.py
|
PrathamBooks/assisted-translations
|
bc68bf50abf5752304eb2ca87a1cebc8f1308591
|
[
"MIT"
] | null | null | null |
api/app/main/model/user.py
|
PrathamBooks/assisted-translations
|
bc68bf50abf5752304eb2ca87a1cebc8f1308591
|
[
"MIT"
] | null | null | null |
api/app/main/model/user.py
|
PrathamBooks/assisted-translations
|
bc68bf50abf5752304eb2ca87a1cebc8f1308591
|
[
"MIT"
] | null | null | null |
from .. import db, flask_bcrypt
import datetime
from app.main.model.blacklist import BlacklistToken
from ..config import key
import jwt
class User(db.Model):
""" User Model for storing user related details """
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
admin = db.Column(db.Boolean, nullable=False, default=False)
public_id = db.Column(db.String(100), unique=True)
username = db.Column(db.String(50), unique=True)
password_hash = db.Column(db.String(100))
@property
def password(self):
raise AttributeError('password: write-only field')
@password.setter
def password(self, password):
self.password_hash = flask_bcrypt.generate_password_hash(password).decode('utf-8')
def check_password(self, password):
return flask_bcrypt.check_password_hash(self.password_hash, password)
@staticmethod
def encode_auth_token(user_id):
"""
Generates the Auth Token
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
key,
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Decodes the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, key)
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token blacklisted. Please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
def __repr__(self):
return "<User '{}'>".format(self.username)
| 31.575342
| 90
| 0.611714
|
9e9f9b62a7ff1e7ba3b5045f38c6aaa0c4b05049
| 25,828
|
py
|
Python
|
sdks/python/appcenter_sdk/models/LogFlowDevice.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | null | null | null |
sdks/python/appcenter_sdk/models/LogFlowDevice.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 6
|
2019-10-23T06:38:53.000Z
|
2022-01-22T07:57:58.000Z
|
sdks/python/appcenter_sdk/models/LogFlowDevice.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 2
|
2019-10-23T06:31:05.000Z
|
2021-08-21T17:32:47.000Z
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class LogFlowDevice(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'sdk_name': 'string',
'sdk_version': 'string',
'wrapper_sdk_version': 'string',
'wrapper_sdk_name': 'string',
'model': 'string',
'oem_name': 'string',
'os_name': 'string',
'os_version': 'string',
'os_build': 'string',
'os_api_level': 'integer',
'locale': 'string',
'time_zone_offset': 'integer',
'screen_size': 'string',
'app_version': 'string',
'carrier_name': 'string',
'carrier_code': 'string',
'carrier_country': 'string',
'app_build': 'string',
'app_namespace': 'string',
'live_update_release_label': 'string',
'live_update_deployment_key': 'string',
'live_update_package_hash': 'string',
'wrapper_runtime_version': 'string'
}
attribute_map = {
'sdk_name': 'sdk_name',
'sdk_version': 'sdk_version',
'wrapper_sdk_version': 'wrapper_sdk_version',
'wrapper_sdk_name': 'wrapper_sdk_name',
'model': 'model',
'oem_name': 'oem_name',
'os_name': 'os_name',
'os_version': 'os_version',
'os_build': 'os_build',
'os_api_level': 'os_api_level',
'locale': 'locale',
'time_zone_offset': 'time_zone_offset',
'screen_size': 'screen_size',
'app_version': 'app_version',
'carrier_name': 'carrier_name',
'carrier_code': 'carrier_code',
'carrier_country': 'carrier_country',
'app_build': 'app_build',
'app_namespace': 'app_namespace',
'live_update_release_label': 'live_update_release_label',
'live_update_deployment_key': 'live_update_deployment_key',
'live_update_package_hash': 'live_update_package_hash',
'wrapper_runtime_version': 'wrapper_runtime_version'
}
def __init__(self, sdk_name=None, sdk_version=None, wrapper_sdk_version=None, wrapper_sdk_name=None, model=None, oem_name=None, os_name=None, os_version=None, os_build=None, os_api_level=None, locale=None, time_zone_offset=None, screen_size=None, app_version=None, carrier_name=None, carrier_code=None, carrier_country=None, app_build=None, app_namespace=None, live_update_release_label=None, live_update_deployment_key=None, live_update_package_hash=None, wrapper_runtime_version=None): # noqa: E501
"""LogFlowDevice - a model defined in Swagger""" # noqa: E501
self._sdk_name = None
self._sdk_version = None
self._wrapper_sdk_version = None
self._wrapper_sdk_name = None
self._model = None
self._oem_name = None
self._os_name = None
self._os_version = None
self._os_build = None
self._os_api_level = None
self._locale = None
self._time_zone_offset = None
self._screen_size = None
self._app_version = None
self._carrier_name = None
self._carrier_code = None
self._carrier_country = None
self._app_build = None
self._app_namespace = None
self._live_update_release_label = None
self._live_update_deployment_key = None
self._live_update_package_hash = None
self._wrapper_runtime_version = None
self.discriminator = None
self.sdk_name = sdk_name
self.sdk_version = sdk_version
if wrapper_sdk_version is not None:
self.wrapper_sdk_version = wrapper_sdk_version
if wrapper_sdk_name is not None:
self.wrapper_sdk_name = wrapper_sdk_name
if model is not None:
self.model = model
if oem_name is not None:
self.oem_name = oem_name
self.os_name = os_name
self.os_version = os_version
if os_build is not None:
self.os_build = os_build
if os_api_level is not None:
self.os_api_level = os_api_level
self.locale = locale
self.time_zone_offset = time_zone_offset
if screen_size is not None:
self.screen_size = screen_size
self.app_version = app_version
if carrier_name is not None:
self.carrier_name = carrier_name
if carrier_code is not None:
self.carrier_code = carrier_code
if carrier_country is not None:
self.carrier_country = carrier_country
self.app_build = app_build
if app_namespace is not None:
self.app_namespace = app_namespace
if live_update_release_label is not None:
self.live_update_release_label = live_update_release_label
if live_update_deployment_key is not None:
self.live_update_deployment_key = live_update_deployment_key
if live_update_package_hash is not None:
self.live_update_package_hash = live_update_package_hash
if wrapper_runtime_version is not None:
self.wrapper_runtime_version = wrapper_runtime_version
@property
def sdk_name(self):
"""Gets the sdk_name of this LogFlowDevice. # noqa: E501
Name of the SDK. Consists of the name of the SDK and the platform, e.g. "appcenter.ios", "hockeysdk.android".
# noqa: E501
:return: The sdk_name of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._sdk_name
@sdk_name.setter
def sdk_name(self, sdk_name):
"""Sets the sdk_name of this LogFlowDevice.
Name of the SDK. Consists of the name of the SDK and the platform, e.g. "appcenter.ios", "hockeysdk.android".
# noqa: E501
:param sdk_name: The sdk_name of this LogFlowDevice. # noqa: E501
:type: string
"""
if sdk_name is None:
raise ValueError("Invalid value for `sdk_name`, must not be `None`") # noqa: E501
self._sdk_name = sdk_name
@property
def sdk_version(self):
"""Gets the sdk_version of this LogFlowDevice. # noqa: E501
Version of the SDK in semver format, e.g. "1.2.0" or "0.12.3-alpha.1".
# noqa: E501
:return: The sdk_version of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._sdk_version
@sdk_version.setter
def sdk_version(self, sdk_version):
"""Sets the sdk_version of this LogFlowDevice.
Version of the SDK in semver format, e.g. "1.2.0" or "0.12.3-alpha.1".
# noqa: E501
:param sdk_version: The sdk_version of this LogFlowDevice. # noqa: E501
:type: string
"""
if sdk_version is None:
raise ValueError("Invalid value for `sdk_version`, must not be `None`") # noqa: E501
self._sdk_version = sdk_version
@property
def wrapper_sdk_version(self):
"""Gets the wrapper_sdk_version of this LogFlowDevice. # noqa: E501
Version of the wrapper SDK in semver format. When the SDK is embedding another base SDK (for example Xamarin.Android wraps Android), the Xamarin specific version is populated into this field while sdkVersion refers to the original Android SDK.
# noqa: E501
:return: The wrapper_sdk_version of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._wrapper_sdk_version
@wrapper_sdk_version.setter
def wrapper_sdk_version(self, wrapper_sdk_version):
"""Sets the wrapper_sdk_version of this LogFlowDevice.
Version of the wrapper SDK in semver format. When the SDK is embedding another base SDK (for example Xamarin.Android wraps Android), the Xamarin specific version is populated into this field while sdkVersion refers to the original Android SDK.
# noqa: E501
:param wrapper_sdk_version: The wrapper_sdk_version of this LogFlowDevice. # noqa: E501
:type: string
"""
self._wrapper_sdk_version = wrapper_sdk_version
@property
def wrapper_sdk_name(self):
"""Gets the wrapper_sdk_name of this LogFlowDevice. # noqa: E501
Name of the wrapper SDK. Consists of the name of the SDK and the wrapper platform, e.g. "appcenter.xamarin", "hockeysdk.cordova".
# noqa: E501
:return: The wrapper_sdk_name of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._wrapper_sdk_name
@wrapper_sdk_name.setter
def wrapper_sdk_name(self, wrapper_sdk_name):
"""Sets the wrapper_sdk_name of this LogFlowDevice.
Name of the wrapper SDK. Consists of the name of the SDK and the wrapper platform, e.g. "appcenter.xamarin", "hockeysdk.cordova".
# noqa: E501
:param wrapper_sdk_name: The wrapper_sdk_name of this LogFlowDevice. # noqa: E501
:type: string
"""
self._wrapper_sdk_name = wrapper_sdk_name
@property
def model(self):
"""Gets the model of this LogFlowDevice. # noqa: E501
Device model (example: iPad2,3).
# noqa: E501
:return: The model of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._model
@model.setter
def model(self, model):
"""Sets the model of this LogFlowDevice.
Device model (example: iPad2,3).
# noqa: E501
:param model: The model of this LogFlowDevice. # noqa: E501
:type: string
"""
self._model = model
@property
def oem_name(self):
"""Gets the oem_name of this LogFlowDevice. # noqa: E501
Device manufacturer (example: HTC).
# noqa: E501
:return: The oem_name of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._oem_name
@oem_name.setter
def oem_name(self, oem_name):
"""Sets the oem_name of this LogFlowDevice.
Device manufacturer (example: HTC).
# noqa: E501
:param oem_name: The oem_name of this LogFlowDevice. # noqa: E501
:type: string
"""
self._oem_name = oem_name
@property
def os_name(self):
"""Gets the os_name of this LogFlowDevice. # noqa: E501
OS name (example: iOS). The following OS names are standardized (non-exclusive): Android, iOS, macOS, tvOS, Windows.
# noqa: E501
:return: The os_name of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._os_name
@os_name.setter
def os_name(self, os_name):
"""Sets the os_name of this LogFlowDevice.
OS name (example: iOS). The following OS names are standardized (non-exclusive): Android, iOS, macOS, tvOS, Windows.
# noqa: E501
:param os_name: The os_name of this LogFlowDevice. # noqa: E501
:type: string
"""
if os_name is None:
raise ValueError("Invalid value for `os_name`, must not be `None`") # noqa: E501
self._os_name = os_name
@property
def os_version(self):
"""Gets the os_version of this LogFlowDevice. # noqa: E501
OS version (example: 9.3.0).
# noqa: E501
:return: The os_version of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._os_version
@os_version.setter
def os_version(self, os_version):
"""Sets the os_version of this LogFlowDevice.
OS version (example: 9.3.0).
# noqa: E501
:param os_version: The os_version of this LogFlowDevice. # noqa: E501
:type: string
"""
if os_version is None:
raise ValueError("Invalid value for `os_version`, must not be `None`") # noqa: E501
self._os_version = os_version
@property
def os_build(self):
"""Gets the os_build of this LogFlowDevice. # noqa: E501
OS build code (example: LMY47X).
# noqa: E501
:return: The os_build of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._os_build
@os_build.setter
def os_build(self, os_build):
"""Sets the os_build of this LogFlowDevice.
OS build code (example: LMY47X).
# noqa: E501
:param os_build: The os_build of this LogFlowDevice. # noqa: E501
:type: string
"""
self._os_build = os_build
@property
def os_api_level(self):
"""Gets the os_api_level of this LogFlowDevice. # noqa: E501
API level when applicable like in Android (example: 15).
# noqa: E501
:return: The os_api_level of this LogFlowDevice. # noqa: E501
:rtype: integer
"""
return self._os_api_level
@os_api_level.setter
def os_api_level(self, os_api_level):
"""Sets the os_api_level of this LogFlowDevice.
API level when applicable like in Android (example: 15).
# noqa: E501
:param os_api_level: The os_api_level of this LogFlowDevice. # noqa: E501
:type: integer
"""
self._os_api_level = os_api_level
@property
def locale(self):
"""Gets the locale of this LogFlowDevice. # noqa: E501
Language code (example: en_US).
# noqa: E501
:return: The locale of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this LogFlowDevice.
Language code (example: en_US).
# noqa: E501
:param locale: The locale of this LogFlowDevice. # noqa: E501
:type: string
"""
if locale is None:
raise ValueError("Invalid value for `locale`, must not be `None`") # noqa: E501
self._locale = locale
@property
def time_zone_offset(self):
"""Gets the time_zone_offset of this LogFlowDevice. # noqa: E501
The offset in minutes from UTC for the device time zone, including daylight savings time.
# noqa: E501
:return: The time_zone_offset of this LogFlowDevice. # noqa: E501
:rtype: integer
"""
return self._time_zone_offset
@time_zone_offset.setter
def time_zone_offset(self, time_zone_offset):
"""Sets the time_zone_offset of this LogFlowDevice.
The offset in minutes from UTC for the device time zone, including daylight savings time.
# noqa: E501
:param time_zone_offset: The time_zone_offset of this LogFlowDevice. # noqa: E501
:type: integer
"""
if time_zone_offset is None:
raise ValueError("Invalid value for `time_zone_offset`, must not be `None`") # noqa: E501
self._time_zone_offset = time_zone_offset
@property
def screen_size(self):
"""Gets the screen_size of this LogFlowDevice. # noqa: E501
Screen size of the device in pixels (example: 640x480).
# noqa: E501
:return: The screen_size of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._screen_size
@screen_size.setter
def screen_size(self, screen_size):
"""Sets the screen_size of this LogFlowDevice.
Screen size of the device in pixels (example: 640x480).
# noqa: E501
:param screen_size: The screen_size of this LogFlowDevice. # noqa: E501
:type: string
"""
self._screen_size = screen_size
@property
def app_version(self):
"""Gets the app_version of this LogFlowDevice. # noqa: E501
Application version name, e.g. 1.1.0
# noqa: E501
:return: The app_version of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._app_version
@app_version.setter
def app_version(self, app_version):
"""Sets the app_version of this LogFlowDevice.
Application version name, e.g. 1.1.0
# noqa: E501
:param app_version: The app_version of this LogFlowDevice. # noqa: E501
:type: string
"""
if app_version is None:
raise ValueError("Invalid value for `app_version`, must not be `None`") # noqa: E501
self._app_version = app_version
@property
def carrier_name(self):
"""Gets the carrier_name of this LogFlowDevice. # noqa: E501
Carrier name (for mobile devices).
# noqa: E501
:return: The carrier_name of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._carrier_name
@carrier_name.setter
def carrier_name(self, carrier_name):
"""Sets the carrier_name of this LogFlowDevice.
Carrier name (for mobile devices).
# noqa: E501
:param carrier_name: The carrier_name of this LogFlowDevice. # noqa: E501
:type: string
"""
self._carrier_name = carrier_name
@property
def carrier_code(self):
"""Gets the carrier_code of this LogFlowDevice. # noqa: E501
Carrier country code (for mobile devices).
# noqa: E501
:return: The carrier_code of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._carrier_code
@carrier_code.setter
def carrier_code(self, carrier_code):
"""Sets the carrier_code of this LogFlowDevice.
Carrier country code (for mobile devices).
# noqa: E501
:param carrier_code: The carrier_code of this LogFlowDevice. # noqa: E501
:type: string
"""
self._carrier_code = carrier_code
@property
def carrier_country(self):
"""Gets the carrier_country of this LogFlowDevice. # noqa: E501
Carrier country.
# noqa: E501
:return: The carrier_country of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._carrier_country
@carrier_country.setter
def carrier_country(self, carrier_country):
"""Sets the carrier_country of this LogFlowDevice.
Carrier country.
# noqa: E501
:param carrier_country: The carrier_country of this LogFlowDevice. # noqa: E501
:type: string
"""
self._carrier_country = carrier_country
@property
def app_build(self):
"""Gets the app_build of this LogFlowDevice. # noqa: E501
The app's build number, e.g. 42.
# noqa: E501
:return: The app_build of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._app_build
@app_build.setter
def app_build(self, app_build):
"""Sets the app_build of this LogFlowDevice.
The app's build number, e.g. 42.
# noqa: E501
:param app_build: The app_build of this LogFlowDevice. # noqa: E501
:type: string
"""
if app_build is None:
raise ValueError("Invalid value for `app_build`, must not be `None`") # noqa: E501
self._app_build = app_build
@property
def app_namespace(self):
"""Gets the app_namespace of this LogFlowDevice. # noqa: E501
The bundle identifier, package identifier, or namespace, depending on what the individual plattforms use, .e.g com.microsoft.example.
# noqa: E501
:return: The app_namespace of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._app_namespace
@app_namespace.setter
def app_namespace(self, app_namespace):
"""Sets the app_namespace of this LogFlowDevice.
The bundle identifier, package identifier, or namespace, depending on what the individual plattforms use, .e.g com.microsoft.example.
# noqa: E501
:param app_namespace: The app_namespace of this LogFlowDevice. # noqa: E501
:type: string
"""
self._app_namespace = app_namespace
@property
def live_update_release_label(self):
"""Gets the live_update_release_label of this LogFlowDevice. # noqa: E501
Label that is used to identify application code 'version' released via Live Update beacon running on device
# noqa: E501
:return: The live_update_release_label of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._live_update_release_label
@live_update_release_label.setter
def live_update_release_label(self, live_update_release_label):
"""Sets the live_update_release_label of this LogFlowDevice.
Label that is used to identify application code 'version' released via Live Update beacon running on device
# noqa: E501
:param live_update_release_label: The live_update_release_label of this LogFlowDevice. # noqa: E501
:type: string
"""
self._live_update_release_label = live_update_release_label
@property
def live_update_deployment_key(self):
"""Gets the live_update_deployment_key of this LogFlowDevice. # noqa: E501
Identifier of environment that current application release belongs to, deployment key then maps to environment like Production, Staging.
# noqa: E501
:return: The live_update_deployment_key of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._live_update_deployment_key
@live_update_deployment_key.setter
def live_update_deployment_key(self, live_update_deployment_key):
"""Sets the live_update_deployment_key of this LogFlowDevice.
Identifier of environment that current application release belongs to, deployment key then maps to environment like Production, Staging.
# noqa: E501
:param live_update_deployment_key: The live_update_deployment_key of this LogFlowDevice. # noqa: E501
:type: string
"""
self._live_update_deployment_key = live_update_deployment_key
@property
def live_update_package_hash(self):
"""Gets the live_update_package_hash of this LogFlowDevice. # noqa: E501
Hash of all files (ReactNative or Cordova) deployed to device via LiveUpdate beacon. Helps identify the Release version on device or need to download updates in future.
# noqa: E501
:return: The live_update_package_hash of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._live_update_package_hash
@live_update_package_hash.setter
def live_update_package_hash(self, live_update_package_hash):
"""Sets the live_update_package_hash of this LogFlowDevice.
Hash of all files (ReactNative or Cordova) deployed to device via LiveUpdate beacon. Helps identify the Release version on device or need to download updates in future.
# noqa: E501
:param live_update_package_hash: The live_update_package_hash of this LogFlowDevice. # noqa: E501
:type: string
"""
self._live_update_package_hash = live_update_package_hash
@property
def wrapper_runtime_version(self):
"""Gets the wrapper_runtime_version of this LogFlowDevice. # noqa: E501
Version of the wrapper technology framework (Xamarin runtime version or ReactNative or Cordova etc...). See wrapper_sdk_name to see if this version refers to Xamarin or ReactNative or other.
# noqa: E501
:return: The wrapper_runtime_version of this LogFlowDevice. # noqa: E501
:rtype: string
"""
return self._wrapper_runtime_version
@wrapper_runtime_version.setter
def wrapper_runtime_version(self, wrapper_runtime_version):
"""Sets the wrapper_runtime_version of this LogFlowDevice.
Version of the wrapper technology framework (Xamarin runtime version or ReactNative or Cordova etc...). See wrapper_sdk_name to see if this version refers to Xamarin or ReactNative or other.
# noqa: E501
:param wrapper_runtime_version: The wrapper_runtime_version of this LogFlowDevice. # noqa: E501
:type: string
"""
self._wrapper_runtime_version = wrapper_runtime_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LogFlowDevice):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.070423
| 505
| 0.642404
|
90f76bb529c5d9ef06d67a765113bf84c805446a
| 10,747
|
py
|
Python
|
ems/Enquiry_record.py
|
pushpa-kumari123/Enquiry_Management_System
|
4c9cb4daaaa46bb32812391cf97851a48a05aef0
|
[
"Apache-2.0"
] | null | null | null |
ems/Enquiry_record.py
|
pushpa-kumari123/Enquiry_Management_System
|
4c9cb4daaaa46bb32812391cf97851a48a05aef0
|
[
"Apache-2.0"
] | null | null | null |
ems/Enquiry_record.py
|
pushpa-kumari123/Enquiry_Management_System
|
4c9cb4daaaa46bb32812391cf97851a48a05aef0
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import pymysql as db
my_window = Tk()
my_window.title("our final project/login/Enquiry record")
my_window.geometry("1366x768")
my_window.configure(bg="grey")
my_window.resizable(1, 1)
L1=Label(my_window,text="ENQUIRY MANAGEMENT SYSTEM",bg="lavender",fg="blue",font=("Algerian",40))
L1.pack(fill=X)
def backf() :
my_window.destroy()
import Enquiry_page
def search() :
swindow=Toplevel()
swindow.geometry("800x600+30+30")
TableMargin = Frame(swindow, height="768", width="500")
TableMargin.place(x=60, y=60)
scrollbarx = Scrollbar(TableMargin, orient=HORIZONTAL)
scrollbary = Scrollbar(TableMargin, orient=VERTICAL)
tree = ttk.Treeview(TableMargin, columns=("slno", "a", "b", "c", "d", "e", "f"), height=23,
yscrollcommand=scrollbary.set,
xscrollcommand=scrollbarx.set)
scrollbary.config(command=tree.yview)
scrollbary.pack(side=RIGHT, fill=Y)
scrollbarx.config(command=tree.xview)
scrollbarx.pack(side=BOTTOM, fill=X)
tree.heading('slno', text="sl_no", anchor=W)
tree.heading('a', text="Full_name", anchor=W)
tree.heading('b', text="email", anchor=W)
tree.heading('c', text="qualification", anchor=W)
tree.heading('d', text="course_name", anchor=W)
tree.heading('e', text="Fee", anchor=W)
tree.heading('f', text="Duration", anchor=W)
tree.column('#0', minwidth=10, width=10)
tree.column('#1', minwidth=20, width=30)
tree.column('#2', minwidth=80, width=100)
tree.column('#3', minwidth=10, width=20)
tree.column('#4', minwidth=15, width=20)
tree.column('#5', minwidth=30, width=100)
tree.column('#6', minwidth=20, width=80)
tree.pack()
# table code end
if v7.get()=="name":
query = "select * from enquiry where `Full Name`='%s'"%(v10.get())
if v7.get() == "email":
query = "select * from enquiry where email ='%s'" % (v10.get())
if v7.get() == "qualification":
query = "select * from enquiry where qualification ='%s'" % (v10.get())
if v7.get() == "course":
query = "select * from enquiry where course_name='%s'" % (v10.get())
con = db.connect("localhost", "root", "", "enquiry")
cur = con.cursor()
cur.execute(query)
row = cur.fetchone()
while row is not None:
tree.insert('', 'end', values=(row))
row = cur.fetchone()
cur.close()
con.close()
L1=Label(my_window,text="Enquiry Record",bg="light pink",fg="blue",font=("arial black",20),width="18")
L1.place(x=450,y=75)
v1 = StringVar()
v2 = StringVar()
v3 = StringVar()
v4 = StringVar()
v5 = StringVar()
v6 = StringVar()
v7 = StringVar()
v10 = StringVar()
v13 = StringVar()
def insertdata():
r1 = v1.get()
r2 = v2.get()
r3 = v3.get()
r4 = v4.get()
r5 = v5.get()
r6 = v6.get()
query = "INSERT INTO `enquiry`(`Full Name`, `email`, `qualification`, `course_name`, `fee`, `duration`) VALUES ('%s','%s','%s','%s','%s','%s')" % (
r1, r2,r3,r4,r5,r6)
con = db.connect("localhost", "root", "", "enquiry")
cur = con.cursor()
cur.execute(query)
con.commit()
v1.set("")
v2.set("")
v3.set("")
v4.set("")
v5.set("")
v6.set("")
messagebox.showinfo("info", " Enquiry Record insertion successfull")
cur.close()
con.close()
# table code
TableMargin = Frame(my_window, height="768", width="1000")
TableMargin.place(x=300,y=125)
scrollbarx = Scrollbar(TableMargin, orient=HORIZONTAL)
scrollbary = Scrollbar(TableMargin, orient=VERTICAL)
tree = ttk.Treeview(TableMargin, columns=("slno","a", "b", "c", "d","e","f"), height=23, yscrollcommand=scrollbary.set,
xscrollcommand=scrollbarx.set)
scrollbary.config(command=tree.yview)
scrollbary.pack(side=RIGHT, fill=Y)
scrollbarx.config(command=tree.xview)
scrollbarx.pack(side=BOTTOM, fill=X)
tree.heading('slno', text="sl_no", anchor=W)
tree.heading('a', text="Full_name", anchor=W)
tree.heading('b', text="email", anchor=W)
tree.heading('c', text="qualification", anchor=W)
tree.heading('d', text="course_name", anchor=W)
tree.heading('e', text="fee.", anchor=W)
tree.heading('f', text="duration", anchor=W)
tree.column('#0', minwidth=10, width=10)
tree.column('#1', minwidth=20, width=30)
tree.column('#2', minwidth=80, width=100)
tree.column('#3', minwidth=10, width=20)
tree.column('#4', minwidth=15, width=20)
tree.column('#5', minwidth=30, width=100)
tree.column('#6', minwidth=20, width=80)
tree.pack()
# table code end
query = "select * from enquiry"
con = db.connect("localhost", "root", "", "enquiry")
cur = con.cursor()
cur.execute(query)
row = cur.fetchone()
while row is not None:
tree.insert('', 'end', values=(row))
row = cur.fetchone()
cur.close()
con.close()
def updatefinal():
tree.delete(*tree.get_children())
conn = db.connect("localhost", "root", "", "enquiry")
cursor = conn.cursor()
query1 = "UPDATE `enquiry` SET `Full Name`='%s',`email`='%s',`qualification`='%s',`course_name`='%s',`fee`='%s',`duration`='%s' WHERE `slno`=%d" % (
v1.get(), v2.get(),v3.get(),v4.get(),v5.get(),v6.get(),( slno))
cursor.execute(query1)
conn.commit()
cursor.execute("select * from enquiry")
fetch = cursor.fetchall()
for data in fetch:
tree.insert('', 'end', values=(data))
cursor.close()
conn.close()
def updatedata():
global UpdateWindow,slno,name,email,qualification,course_name,fee,duration
if not tree.selection():
result = messagebox.showwarning('update', 'Please Select Something First!')
else:
UpdateWindow = Toplevel()
UpdateWindow.title("Enquiry Update Panel")
UpdateWindow.geometry("1000x668")
F2 = Frame(UpdateWindow, height=600, width=800, bg="grey")
F2.place(x=40, y=40)
L01 = Label(F2, text="Update Enquiry Record", bg="pink", fg="red", font=("arial black", 15), width=20)
L01.place(x=250, y=40)
L2 = Label(F2, text="*Full Name", bg="light green", fg="white", font=("arial black", 14), width=18)
L2.place(x=100, y=100)
E1 = Entry(F2, width=40, font="40", bd=5, textvariable=v1)
E1.place(x=340, y=100)
L6 = Label(F2, text="*Email", bg="light green", fg="white", font=("arial black", 14), width=18)
L6.place(x=100, y=150)
E5 = Entry(F2, width=40, font="40", bd=5, textvariable=v2)
E5.place(x=340, y=150)
L2 = Label(F2, text=" Required Qualification", bg="light green", fg="white", font=("arial black", 14),
width=18)
L2.place(x=100, y=200)
CB = ttk.Combobox(F2, value=("Diploma", "B.tech", "Graduation", "ITI", "BCA"), width="52",
textvariable=v3)
CB.place(x=370, y=200)
L3 = Label(F2, text="course Interested", bg="light green", fg="white", font=("arial black", 14),
width=18)
L3.place(x=100, y=250)
CB1 = ttk.Combobox(F2, value=("Python", "Java", "C++", "O level", "PHP"), width="52", textvariable=v4)
CB1.place(x=370, y=250)
L7 = Label(F2, text="course Fee", bg="light green", fg="white", font=("arial black", 14), width=18)
L7.place(x=100, y=300)
E1 = Entry(F2, width=40, font="40", bd=5, textvariable=v5)
E1.place(x=340, y=300)
L8 = Label(F2, text="course Duration", bg="light green", fg="white", font=("arial black", 14), width=18)
L8.place(x=100, y=350)
E8 = Entry(F2, width=40, font="40", bd=5, textvariable=v6)
E8.place(x=340, y=350)
B3 = Button(F2, text="Back", font=("Arial black", 15), bg="orange", fg="black", command=backf)
B3.place(x=450, y=400)
b3 = Button(F2, text="Final update", font=("Arial black", 15), bg="pink", fg="black", bd=5,
activebackground="grey", command=updatefinal)
b3.place(x=600, y=400)
# get the data from database to New window form
curItem = tree.focus()
contents = (tree.item(curItem))
selecteditem = contents['values']
conn = db.connect("localhost", "root", "", "enquiry")
cursor = conn.cursor()
slno = selecteditem[0]
cursor.execute("select * from enquiry where slno=%d" %(selecteditem[0]))
row = cursor.fetchone()
v1.set(row[1])
v2.set(row[2])
v3.set(row[3])
v4.set(row[4])
v5.set(row[5])
v6.set(row[6])
# get the data from database to New window form
def del1():
if not tree.selection():
result = messagebox.showwarning('delete', 'Please Select Something First!')
else:
result = messagebox.askquestion('delete', 'Are you sure you want to delete this record?')
if result == 'yes':
curItem = tree.focus()
contents =(tree.item(curItem))
selecteditem = contents['values']
tree.delete(curItem)
conn = db.connect("localhost","root","","enquiry")
cursor = conn.cursor()
cursor.execute("DELETE FROM enquiry WHERE slno = %d" % selecteditem[0])
conn.commit()
cursor.close()
conn.close()
B2=Button(my_window,text="Delete",bd=5,font=("Arial black",15),bg="light grey",fg="black",width=8,command=del1,activebackground="grey",activeforeground="blue")
B2.place(x=10,y=170)
B1=Button(my_window,text="Update",bd=5,command=updatedata,font=("Arial black",15),bg="light grey",fg="black",width=8,activebackground="grey",activeforeground="blue")
B1.place(x=10,y=250)
B1=Button(my_window,text="Search by :",bd=5,font=("Arial black",15),bg="light grey",fg="black",width=10,activebackground="grey",activeforeground="blue")
B1.place(x=10,y=330)
CB9 = ttk.Combobox(my_window, value=("name", "email","qualification","course"), width="21",textvariable=v7)
CB9.place(x=10, y=390)
B3=Button(my_window,text="Go",bd=5,command=search,font=("Arial black",15),bg="light grey",fg="black",width=5,activebackground="grey",activeforeground="blue")
B3.place(x=10,y=470)
E7 = Entry(my_window, width=20, font="12", textvariable=v10, bd=5)
E7.place(x=10, y=430)
F1=Frame(my_window,height=50,width=1366,bg="#ffff00")
F1.place(x=0,y=650)
L7=Label(F1,text="Designed & Developed by : ",fg="red",bg="#ffff00",font=("Algerian",18),width="35")
L7.place(x=550,y=8)
L8=Label(F1,text="Pushpa Kumari",bg="#ffff00",fg="black",font=("arial black",13),width="20")
L8.place(x=1000,y=10)
my_window.mainloop()
| 37.058621
| 166
| 0.602494
|
256d8d8608c9c1638da672bdfbd7f6fd73633cab
| 82
|
py
|
Python
|
example/new/apps.py
|
mozumder/django-mozumder
|
887ce303249eac2d77de062fd57023dbc4b782dd
|
[
"MIT"
] | 1
|
2020-06-13T06:12:16.000Z
|
2020-06-13T06:12:16.000Z
|
example/new/apps.py
|
mozumder/django-mozumder
|
887ce303249eac2d77de062fd57023dbc4b782dd
|
[
"MIT"
] | 4
|
2020-06-18T03:53:29.000Z
|
2021-06-09T17:56:12.000Z
|
example/new/apps.py
|
mozumder/django-mozumder
|
887ce303249eac2d77de062fd57023dbc4b782dd
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class NewConfig(AppConfig):
name = 'new'
| 11.714286
| 33
| 0.719512
|
269fd507ea3010854d7a8bac07ce5b15c8a2d550
| 2,092
|
py
|
Python
|
web3_multicall/multicall.py
|
BrunoMazorra/web3_multicall_blocknumber
|
2f12f6b6bb9853b10db90b968f5b0b75a9b1a7b4
|
[
"MIT"
] | null | null | null |
web3_multicall/multicall.py
|
BrunoMazorra/web3_multicall_blocknumber
|
2f12f6b6bb9853b10db90b968f5b0b75a9b1a7b4
|
[
"MIT"
] | null | null | null |
web3_multicall/multicall.py
|
BrunoMazorra/web3_multicall_blocknumber
|
2f12f6b6bb9853b10db90b968f5b0b75a9b1a7b4
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------ Imports ----------------------------------------------------------- #
# System
from typing import Optional, List
# Pip
from web3_wrapped_contract import WrappedContract
from web3.eth import Eth
from web3.contract import ContractFunction
# Local
from ._abi import abi
from ._utils import Function
from .models import AggregateResult, FunctionResult, Network
# -------------------------------------------------------------------------------------------------------------------------------- #
# --------------------------------------------------- class: MulticallContract --------------------------------------------------- #
class Multicall(WrappedContract):
# --------------------------------------------------------- Init --------------------------------------------------------- #
def __init__(
self,
eth: Eth,
address: Optional[str] = None
):
super().__init__(
eth,
address or Network(eth.chain_id).multicall_adddress,
abi
)
# ---------------------------------------------------- Public methods ---------------------------------------------------- #
def aggregate(
self,
calls: List[ContractFunction],
blockNumber
) -> AggregateResult:
funcs = [Function(call) for call in calls]
block_number, outputs = self.functions.aggregate(
[[func.address, func.data] for func in funcs]
).call({},blockNumber)
return AggregateResult(
block_number=block_number,
results=[
FunctionResult(
contract_address=func.address,
function_name=func.name,
inputs=func.inputs,
results=list(func.decode_output(output))
)
for func, output in zip(funcs, outputs)
]
)
# -------------------------------------------------------------------------------------------------------------------------------- #
| 32.184615
| 132
| 0.383365
|
28c2a1782a17ec321ce04dfa5cd91734af78eb7c
| 972
|
py
|
Python
|
scripts/deepsolar/deepsolar_test.py
|
thejeshgn/data
|
945af2e1269b914ff1e3169880169161f4137a58
|
[
"Apache-2.0"
] | 25
|
2020-07-18T04:44:57.000Z
|
2022-03-03T14:44:20.000Z
|
scripts/deepsolar/deepsolar_test.py
|
thejeshgn/data
|
945af2e1269b914ff1e3169880169161f4137a58
|
[
"Apache-2.0"
] | 276
|
2020-05-09T00:57:50.000Z
|
2022-03-29T23:27:38.000Z
|
scripts/deepsolar/deepsolar_test.py
|
thejeshgn/data
|
945af2e1269b914ff1e3169880169161f4137a58
|
[
"Apache-2.0"
] | 59
|
2020-05-09T00:45:30.000Z
|
2022-03-28T16:03:53.000Z
|
'''
Unit tests for deepsolar.py
Usage: python3 deepsolar_test.py
'''
import unittest
import os
import tempfile
from .deepsolar import write_csv
module_dir_ = os.path.dirname(__file__)
class TestDeepSolar(unittest.TestCase):
def test_write_csv(self):
with tempfile.TemporaryDirectory() as tmp_dir:
test_input = os.path.join(module_dir_, 'test_data/test_data.csv')
test_csv = os.path.join(tmp_dir, 'test_csv.csv')
write_csv(test_input, test_csv)
expected_csv = os.path.join(module_dir_,
'test_data/test_data_expected.csv')
with open(test_csv, 'r') as test:
test_str: str = test.read()
with open(expected_csv, 'r') as expected:
expected_str: str = expected.read()
self.assertEqual(test_str, expected_str)
os.remove(test_csv)
if __name__ == '__main__':
unittest.main()
| 29.454545
| 77
| 0.617284
|
d51ac2c8d329b828006307c9952c7e73519b06b0
| 3,683
|
py
|
Python
|
tests/conftest.py
|
yaniv1984/2021
|
31d55d8861edc044627cc36898bebbaa3983129b
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
yaniv1984/2021
|
31d55d8861edc044627cc36898bebbaa3983129b
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
yaniv1984/2021
|
31d55d8861edc044627cc36898bebbaa3983129b
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import allure
import pytest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from pages.about_page import AboutPage
from pages.forgot_password_page import ForgotPasswordPage
from pages.login_page import LoginPage
from pages.project_edit_page import ProjectEditPage
from pages.project_type_page import ProjectTypePage
from pages.projects_page import ProjectsPage
from pages.templates_page import TemplatesPage
from utils.config_parser import ConfigParserIni
from utils.config_parser import AllureEnvironmentParser
# reads parameters from pytest command line
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="chrome", help="browser that the automation will run in")
@pytest.fixture(scope="session")
# instantiates ini file parses object
def prep_properties():
config_reader = ConfigParserIni("props.ini")
return config_reader
@pytest.fixture(autouse=True)
# fetch browser kind and base url then writes a dictionary of key-value pair into allure's environment.properties file
def write_allure_enviorment(prep_properties):
yield
env_parser = AllureEnvironmentParser("environment.properties")
env_parser.write_to_allure_env({"browser": browser, "base_url": base_url})
# https://stackoverflow.com/a/61433141/4515129
@pytest.fixture
# Instantiates Page Objects
def pages():
about_page = AboutPage(driver)
projects_page = ProjectsPage(driver)
forgot_password_page = ForgotPasswordPage(driver)
login_page = LoginPage(driver)
project_type_page = ProjectTypePage(driver)
templates_page = TemplatesPage(driver)
project_edit_page = ProjectEditPage(driver)
return locals()
@pytest.fixture(autouse=True)
# Performs setup and tear down
def create_driver(write_allure_enviorment, prep_properties, request):
global browser, base_url, driver
browser = request.config.option.browser
base_url = prep_properties.config_section_dict("Base Url")["base_url"]
if browser == "firefox":
driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())
elif browser == "remote":
capabilities = {
"browserName": "chrome",
"browserVersion": "86.0",
"selenoid:options": {
"enableVNC": True,
"enableVideo": False
}
}
driver = webdriver.Remote(command_executor="http://localhost:4444/wd/hub", desired_capabilities=capabilities)
elif browser == "chrome_headless":
opts = webdriver.ChromeOptions()
opts.add_argument("--headless")
opts.add_argument("--disable-dev-shm-usage")
opts.add_argument("--no-sandbox")
driver = webdriver.Chrome(ChromeDriverManager().install(), options=opts)
else:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.implicitly_wait(5)
driver.maximize_window()
driver.get(base_url)
yield
if request.node.rep_call.failed:
screenshot_name = 'screenshot on failure: %s' % datetime.now().strftime('%d/%m/%Y, %H:%M:%S')
allure.attach(driver.get_screenshot_as_png(), name=screenshot_name,
attachment_type=allure.attachment_type.PNG)
driver.quit()
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
| 35.757282
| 118
| 0.732827
|
0a3fde05b95e825ae20826453ce033634163c0bd
| 5,691
|
py
|
Python
|
tests/ui/test_ui.py
|
stranac/voice-skill-sdk
|
8bfbbedf36ed4e4b2ff865deffe4dee804d57031
|
[
"MIT"
] | null | null | null |
tests/ui/test_ui.py
|
stranac/voice-skill-sdk
|
8bfbbedf36ed4e4b2ff865deffe4dee804d57031
|
[
"MIT"
] | null | null | null |
tests/ui/test_ui.py
|
stranac/voice-skill-sdk
|
8bfbbedf36ed4e4b2ff865deffe4dee804d57031
|
[
"MIT"
] | null | null | null |
#
# voice-skill-sdk
#
# (C) 2021, Deutsche Telekom AG
#
# This file is distributed under the terms of the MIT license.
# For details see the file LICENSE in the top directory.
#
import json
import asyncio
import pathlib
import datetime
from datetime import date
from fastapi.testclient import TestClient
from skill_sdk import ui
from skill_sdk.utils import util
from skill_sdk.__version__ import __spi_version__
LOCALHOST = "http://localhost"
def test_list_intents_empty(app):
r = TestClient(app).get("/intents")
assert r.status_code == 200
assert r.json() == []
def test_models(monkeypatch):
param = ui.Parameter(**dict(name="Parameter", type="str", values=None))
assert json.loads(param.json()) == {
"name": "Parameter",
"type": "str",
"required": False,
"sample": None,
"values": [],
}
intent = ui.Intent(**dict(name="Intent"))
assert json.loads(intent.json()) == {
"name": "Intent",
"implementation": ["impl", "handle_Intent"],
"parameters": [],
}
def handle(i: int, t: str, f: float, dt: date):
...
with util.mock_date_today(datetime.date(2100, 12, 31)):
intent = ui.Intent.from_callable("Test_Intent", handle)
assert json.loads(intent.json()) == {
"name": "Test_Intent",
"implementation": ["test_ui", "handle"],
"parameters": [
{"name": "i", "type": "int", "required": True, "sample": 42, "values": []},
{
"name": "t",
"type": "str",
"required": True,
"sample": "string value",
"values": [],
},
{
"name": "f",
"type": "float",
"required": True,
"sample": 42.0242,
"values": [],
},
{
"name": "dt",
"type": "date",
"required": True,
"sample": "2100-12-31",
"values": [],
},
],
}
def test_list_intents(app):
@app.intent_handler("Test_Intent")
def handle(i: int, t: str, f: float, dt: date):
...
r = TestClient(app).get("/intents")
assert r.status_code == 200
assert r.json() == [
{
"name": "Test_Intent",
"implementation": ["test_ui", "handle"],
"parameters": [
{
"name": "i",
"type": "int",
"required": True,
"sample": ui.samples("int"),
"values": [],
},
{
"name": "t",
"type": "str",
"required": True,
"sample": ui.samples("str"),
"values": [],
},
{
"name": "f",
"type": "float",
"required": True,
"sample": ui.samples("float"),
"values": [],
},
{
"name": "dt",
"type": "date",
"required": True,
"sample": str(ui.samples("date")),
"values": [],
},
],
}
]
def test_list_types(app):
assert TestClient(app).get("/types").json() == [
"bool",
"int",
"float",
"complex",
"str",
"timedelta",
"datetime",
"date",
"time",
"TimeRange",
"TimeSet",
"AttributeV2",
"typing.List[str]",
]
def test_worker_attach(mocker, app):
# Workaround for Python 3.7 that has no AsyncMock
worker_mock = mocker.patch.object(
ui.notifier, "worker", return_value=asyncio.Future()
)
client = TestClient(app)
with client:
worker_mock.assert_called_once()
with client:
with client.websocket_connect("/logs") as websocket:
# TODO:
pass
def test_if_ui_generated():
"""Tests files existence, not real UI unit test"""
ui_root = pathlib.Path(ui.__file__).parent
required_files = [
(ui_root / "index.html").exists(),
len(list((ui_root / "css").glob("app.*.css"))) == 1,
len(list((ui_root / "css").glob("chunk-vendors.*.css"))) == 1,
len(list((ui_root / "js").glob("app.*.js"))) == 1,
len(list((ui_root / "js").glob("chunk-vendors.*.js"))) == 1,
]
assert all((_ for _ in required_files))
def test_spi_version():
"""SPI Version is hardcoded into the TestIntent.vue"""
ui_root = pathlib.Path(ui.__file__).parent
assert [
js
for js in (ui_root / "js").glob("app.*.js")
if f'spiVersion:"{__spi_version__}"' in js.read_text()
] != []
def test_from_callable_with_list():
from typing import List
def handle_string(s: str):
...
intent = ui.Intent.from_callable("Test_Intent", handle_string)
assert (
repr(intent) == "Intent(name='Test_Intent', "
"implementation=('test_ui', 'handle_string'), "
"parameters=[Parameter(name='s', type='str', required=True, sample='string value', values=[])])"
)
def handle_list(s: List[str]):
...
intent = ui.Intent.from_callable("Test_Intent", handle_list)
assert (
repr(intent) == "Intent(name='Test_Intent', "
"implementation=('test_ui', 'handle_list'), "
"parameters=[Parameter(name='s', type='typing.List[str]', required=True, sample=['string value'], values=[])])"
)
| 26.84434
| 119
| 0.486909
|
a50681e25921f669a3699a9de1c1bbc761519571
| 5,651
|
py
|
Python
|
msize.py
|
soniyanaik1177/Virtua-Try-On-
|
d104003a33dfc82083e5956804632cf1a83eeb79
|
[
"MIT"
] | 8
|
2021-05-29T11:11:20.000Z
|
2022-02-17T04:38:41.000Z
|
msize.py
|
chefpr7/Try-First
|
efaf7636cc0ff06dcec70ee300462d931c4e9d5c
|
[
"MIT"
] | 14
|
2021-05-28T05:53:39.000Z
|
2022-03-12T01:05:21.000Z
|
msize.py
|
chefpr7/Try-First
|
efaf7636cc0ff06dcec70ee300462d931c4e9d5c
|
[
"MIT"
] | 9
|
2021-05-20T09:05:57.000Z
|
2022-02-17T04:38:47.000Z
|
import cv2
from math import sqrt
import numpy as np
from parser import get
def pose_parse(file_path):
MODE = "COCO"
protoFile = "./pose_deploy_linevec.prototxt"
weightsFile = "./pose_iter_440000.caffemodel"
nPoints = 18
POSE_PAIRS = [ [1,0],[1,2],[1,5],[2,3],[3,4],[5,6],[6,7],[1,8],[8,9],[9,10],[1,11],[11,12],[12,13],[0,14],[0,15],[14,16],[15,17]]
frame = cv2.imread(file_path)
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
frameCopy = np.copy(frame) #................................
threshold = 0.1 #...........................................
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
inWidth = 192
inHeight = 256
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
output = net.forward()
H = output.shape[2]
W = output.shape[3]
a = []
points = [] #.......................................
for i in range(nPoints):
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
a.append(x);
a.append(y);
#a.append(prob)
return a
# ==============================================================================
def men_size_predict(file_path, input_height, unit):
pt = pose_parse(file_path)
xc = []
yc = []
for i in range(18):
# print(pt[2*i], pt[2*i+1])
xc.append(pt[2*i])
yc.append(pt[2*i+1])
xc = np.array(xc)
yc = np.array(yc)
#=============================scaling_factor===============================
input_height = float(input_height)
if unit=='inch':
input_height = input_height*2.54
calculated_height = abs((yc[11]+yc[14])/2 - yc[0])
sf = input_height / calculated_height
#=============================Men Size Chart=============================
s = sqrt((xc[5] - xc[2])**2 + (yc[5] - yc[2])**2)
w = sqrt((xc[11] - xc[8])**2 + (yc[11] - yc[8])**2)
neck = round(s/2 * sf, 1)
chest = round(s*1.4 * sf, 1)
sleeve = round((sqrt((xc[1] - xc[5])**2 + (yc[5] - yc[1])**2) + sqrt((xc[6] - xc[5])**2 + (yc[6] - yc[5])**2) + sqrt((xc[7] - xc[6])**2 + (yc[7] - yc[6])**2))/2 * sf,1)
waist = round(w*1.6 * sf,1)
hip = round(w*2 * sf , 1)
inseam = round((sqrt((xc[11] - xc[12])**2 + (yc[12] - yc[12])**2) + sqrt((xc[12] - xc[13])**2 + (yc[12] - yc[13])**2)) * 0.91 *sf , 1)
dims = [neck, chest, sleeve, waist, hip, inseam]
# print(neck)
# print(chest)
# print(sleeve)
# print(waist)
# print(hip)
# print(inseam)
# ================================ Classification on basis of size chart ===============
sample_size=[]
if neck <= 39:
sample_size.append(1)
elif neck > 39 and neck <= 42:
sample_size.append(2)
elif neck > 42 and neck <= 44:
sample_size.append(3)
else:
sample_size.append(4)
if chest <= 98:
sample_size.append(1)
elif chest > 98 and chest <= 106:
sample_size.append(2)
elif chest > 106 and chest <= 113:
sample_size.append(3)
else:
sample_size.append(4)
if sleeve <= 86.5:
sample_size.append(1)
elif sleeve > 86.5 and sleeve <= 89:
sample_size.append(2)
elif sleeve > 89 and sleeve <= 91.5:
sample_size.append(3)
else:
sample_size.append(4)
if waist <= 86.5:
sample_size.append(1)
elif waist > 86.5 and waist <= 89:
sample_size.append(2)
elif waist > 89 and waist <= 91.5:
sample_size.append(3)
else:
sample_size.append(4)
if hip <= 102:
sample_size.append(1)
elif hip > 102 and hip <= 108:
sample_size.append(2)
elif hip > 108 and hip <= 116.5:
sample_size.append(3)
else:
sample_size.append(4)
if inseam <= 77.5:
sample_size.append(1)
elif inseam > 79 and inseam <= 81:
sample_size.append(2)
elif inseam > 81 and inseam <= 82.5:
sample_size.append(3)
else:
sample_size.append(4)
def most_frequent(List):
return max(set(List), key = List.count)
f = (most_frequent(sample_size))
if f == 1:
ans = 'S'
elif f == 2:
ans = 'M'
elif f == 3:
ans = 'L'
else:
ans = 'XL'
# print( sample_size )
# print( ans)
return ans
#=======================================================================
#=============================Key Point Labels===========================
# {0, "Nose"},
# {1, "Neck"},
# {2, "RShoulder"},
# {3, "RElbow"},
# {4, "RWrist"},
# {5, "LShoulder"},
# {6, "LElbow"},
# {7, "LWrist"},
# {8, "MidHip"},
# {9, "RHip"},
# {10, "RKnee"},
# {11, "RAnkle"},
# {12, "LHip"},
# {13, "LKnee"},
# {14, "LAnkle"},
# {15, "REye"},
# {16, "LEye"},
# {17, "REar"},
# {18, "LEar"}
#========================================================================
# dicti = {"pose_keypoints":a}
# people = []
# people.append(dicti)
# dicti = {"people":people}
# import json
# get()
# with open("./static/Database/val/pose/"+person_name+"_keypoints.json", "w") as outfile:
# json.dump(dicti, outfile)
| 26.530516
| 172
| 0.470359
|
b3fd92eb299e5d401471f6e0359f893af93f2f69
| 11,296
|
py
|
Python
|
tools/aquery_differ/aquery_differ_v2.py
|
zhengwei143/bazel
|
90e3bb9cca56a379c9f3524bc7fdb66558baab79
|
[
"Apache-2.0"
] | 2
|
2022-03-20T05:04:46.000Z
|
2022-03-20T05:05:40.000Z
|
tools/aquery_differ/aquery_differ_v2.py
|
zhengwei143/bazel
|
90e3bb9cca56a379c9f3524bc7fdb66558baab79
|
[
"Apache-2.0"
] | 1
|
2021-01-01T19:12:37.000Z
|
2021-01-02T12:26:27.000Z
|
tools/aquery_differ/aquery_differ_v2.py
|
zhengwei143/bazel
|
90e3bb9cca56a379c9f3524bc7fdb66558baab79
|
[
"Apache-2.0"
] | 1
|
2020-11-03T04:46:04.000Z
|
2020-11-03T04:46:04.000Z
|
# Lint as: python2, python3
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Command line diffing tool that compares two bazel aquery invocations.
This makes use of aquery v2 proto format.
This script compares the proto or textproto output of two bazel aquery
invocations. For each set of output files of an action, it compares the command
lines that generated the files.
Example usage:
1. Prepare 2 aquery output files:
bazel aquery //path/to:target_one --output=textproto > \
/path/to/output_one.textproto
bazel aquery //path/to:target_two --output=textproto > \
/path/to/output_two.textproto
2. Run the differ from a bazel repo:
bazel run //tools/aquery_differ:aquery_differ_v2 -- \
--before=/path/to/output_one.textproto \
--after=/path/to/output_two.textproto \
--input_type=textproto \
--attrs=cmdline \
--attrs=inputs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import difflib
import os
import sys
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from absl import app
from absl import flags
from six.moves import map
from google.protobuf import text_format
from src.main.protobuf import analysis_v2_pb2
from tools.aquery_differ.resolvers.dep_set_resolver import DepSetResolver
from tools.aquery_differ.resolvers.path_fragment_resolver import PathFragmentResolver
# pylint: disable=g-import-not-at-top
# resource lib isn't available on Windows.
if os.name != "nt":
import resource
# pylint: enable=g-import-not-at-top
flags.DEFINE_string("before", None, "Aquery output before the change")
flags.DEFINE_string("after", None, "Aquery output after the change")
flags.DEFINE_enum(
"input_type", "proto", ["proto", "textproto"],
"The format of the aquery proto input. One of 'proto' and 'textproto.")
flags.DEFINE_multi_enum("attrs", ["cmdline"], ["inputs", "cmdline"],
"Attributes of the actions to be compared.")
flags.DEFINE_integer(
"max_mem_alloc_mb", 3072,
"Amount of max memory available for aquery_differ, in MB.")
flags.mark_flag_as_required("before")
flags.mark_flag_as_required("after")
WHITE = "\033[37m%s\033[0m"
CYAN = "\033[36m%s\033[0m"
RED = "\033[31m%s\033[0m"
GREEN = "\033[32m%s\033[0m"
def _colorize(line):
"""Add color to the input string."""
if not sys.stdout.isatty():
return line
if line.startswith("+++") or line.startswith("---"):
return WHITE % line
if line.startswith("@@"):
return CYAN % line
if line.startswith("+"):
return GREEN % line
if line.startswith("-"):
return RED % line
return line
def _print_diff(output_files, before_val, after_val, attr, before_file,
after_file):
diff = "\n".join(
map(_colorize, [
s.strip("\n") for s in difflib.unified_diff(before_val, after_val,
before_file, after_file)
]))
print(("[%s]\n"
"Difference in the action that generates the following output(s):"
"\n\t%s\n%s\n") % (attr, "\n\t".join(output_files.split()), diff))
def _map_artifact_id_to_path(artifacts, path_fragments):
path_fragment_resolver = PathFragmentResolver(path_fragments)
return {
artifact.id: path_fragment_resolver.resolve(artifact.path_fragment_id)
for artifact in artifacts
}
def _map_action_index_to_output_files(actions, artifacts):
"""Constructs a map from action index to output files.
Args:
actions: a list of actions from the action graph container
artifacts: a map {artifact_id: artifact path}
Returns:
A map from action index (in action graph container) to a string of
concatenated output artifacts paths.
"""
action_index_to_output_files = {}
for i, action in enumerate(actions):
output_files = " ".join(
sorted([artifacts[output_id] for output_id in action.output_ids]))
action_index_to_output_files[i] = output_files
return action_index_to_output_files
# output files -> input artifacts
def _map_output_files_to_input_artifacts(action_graph_container,
artifact_id_to_path,
action_index_to_output_files):
"""Constructs a map from output files to input artifacts.
Args:
action_graph_container: the full action graph container object
artifact_id_to_path: a map {artifact_id: artifact path}
action_index_to_output_files: a map from action index (in action graph
container) to a string of concatenated output artifacts paths.
Returns:
A map from output files (string of concatenated output artifacts paths) to a
list of input artifacts.
"""
actions = action_graph_container.actions
dep_set_of_files = action_graph_container.dep_set_of_files
id_to_dep_set = {dep_set.id: dep_set for dep_set in dep_set_of_files}
dep_set_resolver = DepSetResolver(dep_set_of_files, artifact_id_to_path)
output_files_to_input_artifacts = {}
for i, action in enumerate(actions):
input_artifacts = []
for dep_set_id in action.input_dep_set_ids:
input_artifacts.extend(
dep_set_resolver.resolve(id_to_dep_set[dep_set_id]))
output_files_to_input_artifacts[action_index_to_output_files[i]] = list(
sorted(input_artifacts))
return output_files_to_input_artifacts
# output files -> command line
def _map_output_files_to_command_line(actions, action_index_to_output_files):
"""Constructs a map from output files to command line.
Args:
actions: a list of actions from the action graph container
action_index_to_output_files: a map from action index (in action graph
container) to a string of concatenated output artifacts paths.
Returns:
A map from output files (string of concatenated output artifacts paths)
to the command line (a list of arguments).
"""
output_files_to_command_line = {}
for i, action in enumerate(actions):
output_files_to_command_line[
action_index_to_output_files[i]] = action.arguments
return output_files_to_command_line
def _aquery_diff(before_proto, after_proto, attrs, before_file, after_file):
"""Returns differences between command lines that generate same outputs."""
found_difference = False
artifacts_before = _map_artifact_id_to_path(before_proto.artifacts,
before_proto.path_fragments)
artifacts_after = _map_artifact_id_to_path(after_proto.artifacts,
after_proto.path_fragments)
action_to_output_files_before = _map_action_index_to_output_files(
before_proto.actions, artifacts_before)
action_to_output_files_after = _map_action_index_to_output_files(
after_proto.actions, artifacts_after)
# There's a 1-to-1 mapping between action and outputs
output_files_before = set(action_to_output_files_before.values())
output_files_after = set(action_to_output_files_after.values())
before_after_diff = output_files_before - output_files_after
after_before_diff = output_files_after - output_files_before
if before_after_diff:
print(("Aquery output 'before' change contains an action that generates "
"the following outputs that aquery output 'after' change doesn't:"
"\n%s\n") % "\n".join(before_after_diff))
found_difference = True
if after_before_diff:
print(("Aquery output 'after' change contains an action that generates "
"the following outputs that aquery output 'before' change doesn't:"
"\n%s\n") % "\n".join(after_before_diff))
found_difference = True
if "cmdline" in attrs:
output_to_command_line_before = _map_output_files_to_command_line(
before_proto.actions, action_to_output_files_before)
output_to_command_line_after = _map_output_files_to_command_line(
after_proto.actions, action_to_output_files_after)
for output_files in output_to_command_line_before:
arguments = output_to_command_line_before[output_files]
after_arguments = output_to_command_line_after.get(output_files, None)
if after_arguments and arguments != after_arguments:
_print_diff(output_files, arguments, after_arguments, "cmdline",
before_file, after_file)
found_difference = True
if "inputs" in attrs:
output_to_input_files_before = _map_output_files_to_input_artifacts(
before_proto, artifacts_before, action_to_output_files_before)
output_to_input_files_after = _map_output_files_to_input_artifacts(
after_proto, artifacts_after, action_to_output_files_after)
for output_files in output_to_input_files_before:
before_inputs = output_to_input_files_before[output_files]
after_inputs = output_to_input_files_after.get(output_files, None)
if after_inputs and before_inputs != after_inputs:
_print_diff(output_files, before_inputs, after_inputs, "inputs",
before_file, after_file)
found_difference = True
if not found_difference:
print("No difference")
def to_absolute_path(path):
path = os.path.expanduser(path)
if os.path.isabs(path):
return path
else:
if "BUILD_WORKING_DIRECTORY" in os.environ:
return os.path.join(os.environ["BUILD_WORKING_DIRECTORY"], path)
else:
return path
def main(unused_argv):
before_file = to_absolute_path(flags.FLAGS.before)
after_file = to_absolute_path(flags.FLAGS.after)
input_type = flags.FLAGS.input_type
attrs = flags.FLAGS.attrs
max_mem_alloc_mb = flags.FLAGS.max_mem_alloc_mb
# resource lib isn't available on Windows.
if os.name != "nt":
max_heap_bytes = max_mem_alloc_mb * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (max_heap_bytes, max_heap_bytes))
before_proto = analysis_v2_pb2.ActionGraphContainer()
after_proto = analysis_v2_pb2.ActionGraphContainer()
try:
if input_type == "proto":
with open(before_file, "rb") as f:
before_proto.ParseFromString(f.read())
with open(after_file, "rb") as f:
after_proto.ParseFromString(f.read())
else:
with open(before_file, "r") as f:
before_text = f.read()
text_format.Merge(before_text, before_proto)
with open(after_file, "r") as f:
after_text = f.read()
text_format.Merge(after_text, after_proto)
_aquery_diff(before_proto, after_proto, attrs, before_file, after_file)
except MemoryError:
print(
"aquery_differ is known to cause OOM issue with large inputs. More details: b/154620006.",
file=sys.stderr)
print(
"Max mem space of {}MB exceeded".format(max_mem_alloc_mb),
file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
app.run(main)
| 36.794788
| 98
| 0.729108
|
9b42d2b0fc978c57d23fbb30cd47a3b92a4ac8e3
| 8,245
|
py
|
Python
|
train.py
|
huzuohuyou/cnn-text-classification-tf
|
8ef64a5ad937444ac56e8195fe3a8e039da5d240
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
huzuohuyou/cnn-text-classification-tf
|
8ef64a5ad937444ac56e8195fe3a8e039da5d240
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
huzuohuyou/cnn-text-classification-tf
|
8ef64a5ad937444ac56e8195fe3a8e039da5d240
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
# Parameters
# ==================================================
# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Data Preparation
# ==================================================
# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# Split train/test set
# TODO: This is very crude, should use cross-validation
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
del x, y, x_shuffled, y_shuffled
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
# Training
# ==================================================
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
l2_reg_lambda=FLAGS.l2_reg_lambda)
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar("loss", cnn.loss)
acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
# Generate batches
batches = data_helpers.batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
#
| 43.624339
| 124
| 0.661249
|
6452393409d253fffb3ef9edbf74ddd171fd11c5
| 891
|
py
|
Python
|
district_info/migrations/0003_auto_20200413_1218.py
|
Shovon588/corona-info
|
e4ac5b79239b31842e8ed6c7b38a95caecadfc43
|
[
"MIT"
] | null | null | null |
district_info/migrations/0003_auto_20200413_1218.py
|
Shovon588/corona-info
|
e4ac5b79239b31842e8ed6c7b38a95caecadfc43
|
[
"MIT"
] | 5
|
2021-03-19T01:55:54.000Z
|
2021-09-22T18:53:08.000Z
|
district_info/migrations/0003_auto_20200413_1218.py
|
Shovon588/corona-info
|
e4ac5b79239b31842e8ed6c7b38a95caecadfc43
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-04-13 06:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('district_info', '0002_totalinfo'),
]
operations = [
migrations.AlterField(
model_name='caseinfo',
name='cases',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='totalinfo',
name='new_case',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='totalinfo',
name='new_death',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='totalinfo',
name='new_recovery',
field=models.PositiveIntegerField(default=0),
),
]
| 26.205882
| 57
| 0.573513
|
0db3dc8ba95e100845fd84bf4b1994dabd144c99
| 4,229
|
py
|
Python
|
anki_vector/opengl/__init__.py
|
Johnnydaszhu/vector-python-sdk
|
8086ab128dcd0819e3e3e8a64a95552ceb9ae0ca
|
[
"Apache-2.0"
] | 1
|
2019-01-24T13:49:00.000Z
|
2019-01-24T13:49:00.000Z
|
anki_vector/opengl/__init__.py
|
Johnnydaszhu/vector-python-sdk
|
8086ab128dcd0819e3e3e8a64a95552ceb9ae0ca
|
[
"Apache-2.0"
] | null | null | null |
anki_vector/opengl/__init__.py
|
Johnnydaszhu/vector-python-sdk
|
8086ab128dcd0819e3e3e8a64a95552ceb9ae0ca
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides 3D classes for running the OpenGL Viewer.
It should be launched in a separate process to allow Vector to run freely while
the viewer is rendering.
It uses PyOpenGL, a Python OpenGL 3D graphics library which is available on most
platforms. It also depends on the Pillow library for image processing.
Warning:
This package requires Python to have the PyOpenGL package installed, along
with an implementation of GLUT (OpenGL Utility Toolkit).
To install the Python packages on Mac and Linux do ``python3 -m pip install --user "anki_vector[3dviewer]"``
To install the Python packages on Windows do ``py -3 -m pip install --user "anki_vector[3dviewer]"``
On Windows and Linux you must also install freeglut (macOS / OSX has one
preinstalled).
On Linux: ``sudo apt-get install freeglut3``
On Windows: Go to http://freeglut.sourceforge.net/ to get a ``freeglut.dll``
file. It's included in any of the `Windows binaries` downloads. Place the DLL
next to your Python script, or install it somewhere in your PATH to allow any
script to use it."
"""
import multiprocessing as mp
from . import opengl_viewer
def main(close_event: mp.Event,
input_intent_queue: mp.Queue,
nav_map_queue: mp.Queue,
world_frame_queue: mp.Queue,
extra_render_function_queue: mp.Queue,
user_data_queue: mp.Queue):
"""Run the 3D Viewer window. This is intended to run on a background process.
.. code-block:: python
import multiprocessing as mp
from anki_vector import opengl
ctx = mp.get_context('spawn')
close_event = ctx.Event()
input_intent_queue = ctx.Queue(maxsize=10)
nav_map_queue = ctx.Queue(maxsize=10)
world_frame_queue = ctx.Queue(maxsize=10)
extra_render_function_queue = ctx.Queue(maxsize=1)
user_data_queue = ctx.Queue()
process = ctx.Process(target=opengl.main,
args=(close_event,
input_intent_queue,
nav_map_queue,
world_frame_queue,
extra_render_function_queue,
user_data_queue),
daemon=True)
process.start()
:param close_event: Used to notify each process when done rendering.
:type close_event: multiprocessing.Event
:param input_intent_queue: Sends key commands from the 3D viewer process to the main process.
:type input_intent_queue: multiprocessing.Queue
:param nav_map_queue: Updates the 3D viewer process with the latest navigation map.
:type nav_map_queue: multiprocessing.Queue
:param world_frame_queue: Provides the 3D viewer with details about the world.
:type world_frame_queue: multiprocessing.Queue
:param extra_render_function_queue: Functions to be executed in the 3D viewer process.
:type extra_render_function_queue: multiprocessing.Queue
:param user_data_queue: A queue that may be used outside the SDK to pass information to the viewer process.
May be used by ``extra_render_function_queue`` functions.
"""
viewer = opengl_viewer.OpenGLViewer(close_event,
input_intent_queue,
nav_map_queue,
world_frame_queue,
extra_render_function_queue,
user_data_queue)
viewer.run()
__all__ = ['main']
| 41.871287
| 112
| 0.663277
|
3c901874624c849a6bded3860ec3bba9f5d3d0e4
| 3,165
|
py
|
Python
|
tests/integration/modules/test_config.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | 1
|
2021-09-06T00:14:04.000Z
|
2021-09-06T00:14:04.000Z
|
tests/integration/modules/test_config.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | 2
|
2021-04-30T21:17:57.000Z
|
2021-12-13T20:40:23.000Z
|
tests/integration/modules/test_config.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Validate the config system
"""
from __future__ import absolute_import
import pytest
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
@pytest.mark.windows_whitelisted
class ConfigTest(ModuleCase):
"""
Test config routines
"""
@skipIf(True, "SLOWTEST skip")
def test_valid_file_proto(self):
"""
test config.valid_file_proto
"""
self.assertTrue(self.run_function("config.valid_fileproto", ["salt://"]))
self.assertTrue(self.run_function("config.valid_fileproto", ["file://"]))
self.assertTrue(self.run_function("config.valid_fileproto", ["http://"]))
self.assertTrue(self.run_function("config.valid_fileproto", ["https://"]))
self.assertTrue(self.run_function("config.valid_fileproto", ["ftp://"]))
self.assertTrue(self.run_function("config.valid_fileproto", ["s3://"]))
self.assertTrue(self.run_function("config.valid_fileproto", ["swift://"]))
self.assertFalse(self.run_function("config.valid_fileproto", ["cheese://"]))
@skipIf(True, "SLOWTEST skip")
def test_backup_mode(self):
"""
test config.backup_mode
"""
self.assertEqual(self.run_function("config.backup_mode", ["minion"]), "minion")
@skipIf(True, "SLOWTEST skip")
def test_manage_mode(self):
"""
test config.manage_mode
"""
# This function is generally only used with cross calls, the yaml
# interpreter is breaking it for remote calls
# The correct standard is the four digit form.
self.assertEqual(self.run_function("config.manage_mode", ['"775"']), "0775")
self.assertEqual(self.run_function("config.manage_mode", ['"1775"']), "1775")
self.assertEqual(self.run_function("config.manage_mode", ['"0775"']), "0775")
self.assertEqual(self.run_function("config.manage_mode", ['"01775"']), "1775")
self.assertEqual(self.run_function("config.manage_mode", ['"0"']), "0000")
self.assertEqual(self.run_function("config.manage_mode", ["775"]), "0775")
self.assertEqual(self.run_function("config.manage_mode", ["1775"]), "1775")
self.assertEqual(self.run_function("config.manage_mode", ["0"]), "0000")
@skipIf(True, "SLOWTEST skip")
def test_option(self):
"""
test config.option
"""
# Minion opt
self.assertEqual(
self.run_function("config.option", ["master_port"]),
self.get_config("minion")["master_port"],
)
# pillar conf opt
self.assertEqual(self.run_function("config.option", ["ext_spam"]), "eggs")
@skipIf(True, "SLOWTEST skip")
def test_get(self):
"""
Test option.get
"""
# Check pillar get
self.assertEqual(self.run_function("config.get", ["level1:level2"]), "foo")
# Check master config
self.assertEqual(
self.run_function("config.get", ["config_opt:layer2"]), "kosher"
)
# Check minion config
self.assertEqual(self.run_function("config.get", ["config_test:spam"]), "eggs")
| 38.13253
| 87
| 0.62812
|
f716eee3eb58697b715967ba75f76f3d236c3384
| 3,353
|
py
|
Python
|
minigrid_basics/examples/rw_four_directions.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
minigrid_basics/examples/rw_four_directions.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
minigrid_basics/examples/rw_four_directions.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Example that uses Gym-Minigrid, a custom environment, and custom actions.
Gym-Minigrid has a larger action space that is not standard in reinforcement
learning. By default, the actions are {rotate left, rotate right, forward, pick
up object, drop object, toggle/activate object, done}. This example uses a class
overridden to have the standard 4 directional actions: {left, right, up, down}.
Here we have a random agent interacting with the environment. In this case, we
also use a custom environment, which is likely what one will do in their
research. We are writing the agent observations to the disk just as a simple way
to get some feedback of what is going on.
Sample run:
```
python -m minigrid_basics.examples.rw_four_directions \
--gin_bindings="MonMiniGridEnv.stochasticity=0.1"
```
"""
import os
from absl import app
from absl import flags
import gin
import gym
import gym_minigrid # pylint: disable=unused-import
from gym_minigrid.wrappers import RGBImgObsWrapper
import matplotlib.pylab as plt
import tensorflow as tf
from minigrid_basics.custom_wrappers import tabular_wrapper # pylint: disable=unused-import
from minigrid_basics.envs import mon_minigrid
FLAGS = flags.FLAGS
flags.DEFINE_string('file_path', '/tmp/rw_four_directions',
'Path in which we will save the observations.')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override default parameter values '
'(e.g. "MonMiniGridEnv.stochasticity=0.1").')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
gin.parse_config_files_and_bindings(
[os.path.join(mon_minigrid.GIN_FILES_PREFIX, 'classic_fourrooms.gin')],
bindings=FLAGS.gin_bindings,
skip_unknown=False)
env_id = mon_minigrid.register_environment()
env = gym.make(env_id)
env = RGBImgObsWrapper(env) # Get pixel observations
# Get tabular observation and drop the 'mission' field:
env = tabular_wrapper.TabularWrapper(env, get_rgb=True)
env.reset()
num_frames = 0
max_num_frames = 500
if not tf.io.gfile.exists(FLAGS.file_path):
tf.io.gfile.makedirs(FLAGS.file_path)
undisc_return = 0
while num_frames < max_num_frames:
# Act randomly
obs, reward, done, _ = env.step(env.action_space.sample())
undisc_return += reward
num_frames += 1
print('t:', num_frames, ' s:', obs['state'])
# Draw environment frame just for simple visualization
plt.imshow(obs['image'])
path = os.path.join(FLAGS.file_path, 'obs_{}.png'.format(num_frames))
plt.savefig(path)
plt.clf()
if done:
break
print('Undiscounted return: %.2f' % undisc_return)
env.close()
if __name__ == '__main__':
app.run(main)
| 31.632075
| 92
| 0.737548
|
7af1cbb8ca3461575452971173f7d03c09c7a421
| 43,887
|
py
|
Python
|
chia/consensus/blockchain.py
|
Starcoder0x/chia-blockchain
|
5b2f5772780d0370f76b0134db6a7fdc7af42862
|
[
"Apache-2.0"
] | 1
|
2022-03-20T14:52:23.000Z
|
2022-03-20T14:52:23.000Z
|
chia/consensus/blockchain.py
|
zcomputerwiz/experiments-blockchain
|
841754b44494451a9e3e537575eeec431fe533d1
|
[
"Apache-2.0"
] | 3
|
2022-03-21T22:00:11.000Z
|
2022-03-21T22:00:40.000Z
|
chia/consensus/blockchain.py
|
zcomputerwiz/experiments-blockchain
|
841754b44494451a9e3e537575eeec431fe533d1
|
[
"Apache-2.0"
] | 1
|
2022-03-20T14:51:39.000Z
|
2022-03-20T14:51:39.000Z
|
import asyncio
import dataclasses
import logging
import multiprocessing
import traceback
from concurrent.futures.process import ProcessPoolExecutor
from enum import Enum
from multiprocessing.context import BaseContext
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
from clvm.casts import int_from_bytes
from chia.consensus.block_body_validation import validate_block_body
from chia.consensus.block_header_validation import validate_unfinished_header_block
from chia.consensus.block_record import BlockRecord
from chia.consensus.blockchain_interface import BlockchainInterface
from chia.consensus.constants import ConsensusConstants
from chia.consensus.cost_calculator import NPCResult
from chia.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from chia.consensus.find_fork_point import find_fork_point_in_chain
from chia.consensus.full_block_to_block_record import block_to_block_record
from chia.consensus.multiprocess_validation import (
PreValidationResult,
_run_generator,
pre_validate_blocks_multiprocessing,
)
from chia.full_node.block_height_map import BlockHeightMap
from chia.full_node.block_store import BlockStore
from chia.full_node.coin_store import CoinStore
from chia.full_node.hint_store import HintStore
from chia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from chia.types.block_protocol import BlockInfo
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import SerializedProgram
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.blockchain_format.vdf import VDFInfo
from chia.types.coin_record import CoinRecord
from chia.types.condition_opcodes import ConditionOpcode
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.header_block import HeaderBlock
from chia.types.unfinished_block import UnfinishedBlock
from chia.types.unfinished_header_block import UnfinishedHeaderBlock
from chia.types.weight_proof import SubEpochChallengeSegment
from chia.util.errors import ConsensusError, Err
from chia.util.generator_tools import get_block_header, tx_removals_and_additions
from chia.util.ints import uint16, uint32, uint64, uint128
from chia.util.setproctitle import getproctitle, setproctitle
from chia.util.streamable import recurse_jsonify
log = logging.getLogger(__name__)
class ReceiveBlockResult(Enum):
"""
When Blockchain.receive_block(b) is called, one of these results is returned,
showing whether the block was added to the chain (extending the peak),
and if not, why it was not added.
"""
NEW_PEAK = 1 # Added to the peak of the blockchain
ADDED_AS_ORPHAN = 2 # Added as an orphan/stale block (not a new peak of the chain)
INVALID_BLOCK = 3 # Block was not added because it was invalid
ALREADY_HAVE_BLOCK = 4 # Block is already present in this blockchain
DISCONNECTED_BLOCK = 5 # Block's parent (previous pointer) is not in this blockchain
class Blockchain(BlockchainInterface):
constants: ConsensusConstants
constants_json: Dict
# peak of the blockchain
_peak_height: Optional[uint32]
# All blocks in peak path are guaranteed to be included, can include orphan blocks
__block_records: Dict[bytes32, BlockRecord]
# all hashes of blocks in block_record by height, used for garbage collection
__heights_in_cache: Dict[uint32, Set[bytes32]]
# maps block height (of the current heaviest chain) to block hash and sub
# epoch summaries
__height_map: BlockHeightMap
# Unspent Store
coin_store: CoinStore
# Store
block_store: BlockStore
# Used to verify blocks in parallel
pool: ProcessPoolExecutor
# Set holding seen compact proofs, in order to avoid duplicates.
_seen_compact_proofs: Set[Tuple[VDFInfo, uint32]]
# Whether blockchain is shut down or not
_shut_down: bool
# Lock to prevent simultaneous reads and writes
lock: asyncio.Lock
compact_proof_lock: asyncio.Lock
hint_store: HintStore
@staticmethod
async def create(
coin_store: CoinStore,
block_store: BlockStore,
consensus_constants: ConsensusConstants,
hint_store: HintStore,
blockchain_dir: Path,
reserved_cores: int,
multiprocessing_context: Optional[BaseContext] = None,
):
"""
Initializes a blockchain with the BlockRecords from disk, assuming they have all been
validated. Uses the genesis block given in override_constants, or as a fallback,
in the consensus constants config.
"""
self = Blockchain()
self.lock = asyncio.Lock() # External lock handled by full node
self.compact_proof_lock = asyncio.Lock()
cpu_count = multiprocessing.cpu_count()
if cpu_count > 61:
cpu_count = 61 # Windows Server 2016 has an issue https://bugs.python.org/issue26903
num_workers = max(cpu_count - reserved_cores, 1)
self.pool = ProcessPoolExecutor(
max_workers=num_workers,
mp_context=multiprocessing_context,
initializer=setproctitle,
initargs=(f"{getproctitle()}_worker",),
)
log.info(f"Started {num_workers} processes for block validation")
self.constants = consensus_constants
self.coin_store = coin_store
self.block_store = block_store
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
self._shut_down = False
await self._load_chain_from_store(blockchain_dir)
self._seen_compact_proofs = set()
self.hint_store = hint_store
return self
def shut_down(self):
self._shut_down = True
self.pool.shutdown(wait=True)
async def _load_chain_from_store(self, blockchain_dir):
"""
Initializes the state of the Blockchain class from the database.
"""
self.__height_map = await BlockHeightMap.create(blockchain_dir, self.block_store.db_wrapper)
self.__block_records = {}
self.__heights_in_cache = {}
block_records, peak = await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE)
for block in block_records.values():
self.add_block_record(block)
if len(block_records) == 0:
assert peak is None
self._peak_height = None
return
assert peak is not None
self._peak_height = self.block_record(peak).height
assert self.__height_map.contains_height(self._peak_height)
assert not self.__height_map.contains_height(self._peak_height + 1)
def get_peak(self) -> Optional[BlockRecord]:
"""
Return the peak of the blockchain
"""
if self._peak_height is None:
return None
return self.height_to_block_record(self._peak_height)
async def get_full_peak(self) -> Optional[FullBlock]:
if self._peak_height is None:
return None
""" Return list of FullBlocks that are peaks"""
# TODO: address hint error and remove ignore
# error: Argument 1 to "get_full_block" of "BlockStore" has incompatible type "Optional[bytes32]";
# expected "bytes32" [arg-type]
block = await self.block_store.get_full_block(self.height_to_hash(self._peak_height)) # type: ignore[arg-type]
assert block is not None
return block
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
return await self.block_store.get_full_block(header_hash)
async def receive_block(
self,
block: FullBlock,
pre_validation_result: PreValidationResult,
fork_point_with_peak: Optional[uint32] = None,
) -> Tuple[
ReceiveBlockResult,
Optional[Err],
Optional[uint32],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
"""
This method must be called under the blockchain lock
Adds a new block into the blockchain, if it's valid and connected to the current
blockchain, regardless of whether it is the child of a head, or another block.
Returns a header if block is added to head. Returns an error if the block is
invalid. Also returns the fork height, in the case of a new peak.
Args:
block: The FullBlock to be validated.
pre_validation_result: A result of successful pre validation
fork_point_with_peak: The fork point, for efficiency reasons, if None, it will be recomputed
Returns:
The result of adding the block to the blockchain (NEW_PEAK, ADDED_AS_ORPHAN, INVALID_BLOCK,
DISCONNECTED_BLOCK, ALREDY_HAVE_BLOCK)
An optional error if the result is not NEW_PEAK or ADDED_AS_ORPHAN
A fork point if the result is NEW_PEAK
A list of changes to the coin store, and changes to hints, if the result is NEW_PEAK
"""
genesis: bool = block.height == 0
if self.contains_block(block.header_hash):
return ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None, ([], {})
if not self.contains_block(block.prev_header_hash) and not genesis:
return (ReceiveBlockResult.DISCONNECTED_BLOCK, Err.INVALID_PREV_BLOCK_HASH, None, ([], {}))
if not genesis and (self.block_record(block.prev_header_hash).height + 1) != block.height:
return ReceiveBlockResult.INVALID_BLOCK, Err.INVALID_HEIGHT, None, ([], {})
npc_result: Optional[NPCResult] = pre_validation_result.npc_result
required_iters = pre_validation_result.required_iters
if pre_validation_result.error is not None:
return ReceiveBlockResult.INVALID_BLOCK, Err(pre_validation_result.error), None, ([], {})
assert required_iters is not None
error_code, _ = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
block.height,
npc_result,
fork_point_with_peak,
self.get_block_generator,
# If we did not already validate the signature, validate it now
validate_signature=not pre_validation_result.validated_signature,
)
if error_code is not None:
return ReceiveBlockResult.INVALID_BLOCK, error_code, None, ([], {})
block_record = block_to_block_record(
self.constants,
self,
required_iters,
block,
None,
)
# Always add the block to the database
async with self.block_store.db_wrapper.lock:
try:
header_hash: bytes32 = block.header_hash
# Perform the DB operations to update the state, and rollback if something goes wrong
await self.block_store.db_wrapper.begin_transaction()
await self.block_store.add_full_block(header_hash, block, block_record)
fork_height, peak_height, records, (coin_record_change, hint_changes) = await self._reconsider_peak(
block_record, genesis, fork_point_with_peak, npc_result
)
await self.block_store.db_wrapper.commit_transaction()
# Then update the memory cache. It is important that this task is not cancelled and does not throw
self.add_block_record(block_record)
for fetched_block_record in records:
self.__height_map.update_height(
fetched_block_record.height,
fetched_block_record.header_hash,
fetched_block_record.sub_epoch_summary_included,
)
if peak_height is not None:
self._peak_height = peak_height
await self.__height_map.maybe_flush()
except BaseException as e:
self.block_store.rollback_cache_block(header_hash)
await self.block_store.db_wrapper.rollback_transaction()
log.error(
f"Error while adding block {block.header_hash} height {block.height},"
f" rolling back: {traceback.format_exc()} {e}"
)
raise
if fork_height is not None:
# new coin records added
assert coin_record_change is not None
return ReceiveBlockResult.NEW_PEAK, None, fork_height, (coin_record_change, hint_changes)
else:
return ReceiveBlockResult.ADDED_AS_ORPHAN, None, None, ([], {})
def get_hint_list(self, npc_result: NPCResult) -> List[Tuple[bytes32, bytes]]:
h_list = []
for npc in npc_result.npc_list:
for opcode, conditions in npc.conditions:
if opcode == ConditionOpcode.CREATE_COIN:
for condition in conditions:
if len(condition.vars) > 2 and condition.vars[2] != b"":
puzzle_hash, amount_bin = condition.vars[0], condition.vars[1]
amount = int_from_bytes(amount_bin)
# TODO: address hint error and remove ignore
# error: Argument 2 to "Coin" has incompatible type "bytes"; expected "bytes32"
# [arg-type]
coin_id = Coin(npc.coin_name, puzzle_hash, amount).name() # type: ignore[arg-type]
h_list.append((coin_id, condition.vars[2]))
return h_list
async def _reconsider_peak(
self,
block_record: BlockRecord,
genesis: bool,
fork_point_with_peak: Optional[uint32],
npc_result: Optional[NPCResult],
) -> Tuple[
Optional[uint32],
Optional[uint32],
List[BlockRecord],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
"""
When a new block is added, this is called, to check if the new block is the new peak of the chain.
This also handles reorgs by reverting blocks which are not in the heaviest chain.
It returns the height of the fork between the previous chain and the new chain, or returns
None if there was no update to the heaviest chain.
"""
peak = self.get_peak()
lastest_coin_state: Dict[bytes32, CoinRecord] = {}
hint_coin_state: Dict[bytes, Dict[bytes32, CoinRecord]] = {}
if genesis:
if peak is None:
block: Optional[FullBlock] = await self.block_store.get_full_block(block_record.header_hash)
assert block is not None
if npc_result is not None:
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
tx_removals, tx_additions = [], []
if block.is_transaction_block():
assert block.foliage_transaction_block is not None
added = await self.coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
else:
added, _ = [], []
await self.block_store.set_in_chain([(block_record.header_hash,)])
await self.block_store.set_peak(block_record.header_hash)
return uint32(0), uint32(0), [block_record], (added, {})
return None, None, [], ([], {})
assert peak is not None
if block_record.weight > peak.weight:
# Find the fork. if the block is just being appended, it will return the peak
# If no blocks in common, returns -1, and reverts all blocks
if block_record.prev_hash == peak.header_hash:
fork_height: int = peak.height
elif fork_point_with_peak is not None:
fork_height = fork_point_with_peak
else:
fork_height = find_fork_point_in_chain(self, block_record, peak)
if block_record.prev_hash != peak.header_hash:
roll_changes: List[CoinRecord] = await self.coin_store.rollback_to_block(fork_height)
for coin_record in roll_changes:
lastest_coin_state[coin_record.name] = coin_record
# Rollback sub_epoch_summaries
self.__height_map.rollback(fork_height)
await self.block_store.rollback(fork_height)
# Collect all blocks from fork point to new peak
blocks_to_add: List[Tuple[FullBlock, BlockRecord]] = []
curr = block_record.header_hash
while fork_height < 0 or curr != self.height_to_hash(uint32(fork_height)):
fetched_full_block: Optional[FullBlock] = await self.block_store.get_full_block(curr)
fetched_block_record: Optional[BlockRecord] = await self.block_store.get_block_record(curr)
assert fetched_full_block is not None
assert fetched_block_record is not None
blocks_to_add.append((fetched_full_block, fetched_block_record))
if fetched_full_block.height == 0:
# Doing a full reorg, starting at height 0
break
curr = fetched_block_record.prev_hash
records_to_add = []
for fetched_full_block, fetched_block_record in reversed(blocks_to_add):
records_to_add.append(fetched_block_record)
if fetched_full_block.is_transaction_block():
if fetched_block_record.header_hash == block_record.header_hash:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, npc_result
)
else:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, None
)
assert fetched_full_block.foliage_transaction_block is not None
added_rec = await self.coin_store.new_block(
fetched_full_block.height,
fetched_full_block.foliage_transaction_block.timestamp,
fetched_full_block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
removed_rec: List[Optional[CoinRecord]] = [
await self.coin_store.get_coin_record(name) for name in tx_removals
]
# Set additions first, then removals in order to handle ephemeral coin state
# Add in height order is also required
record: Optional[CoinRecord]
for record in added_rec:
assert record
lastest_coin_state[record.name] = record
for record in removed_rec:
assert record
lastest_coin_state[record.name] = record
if npc_res is not None:
hint_list: List[Tuple[bytes32, bytes]] = self.get_hint_list(npc_res)
await self.hint_store.add_hints(hint_list)
# There can be multiple coins for the same hint
for coin_id, hint in hint_list:
key = hint
if key not in hint_coin_state:
hint_coin_state[key] = {}
hint_coin_state[key][coin_id] = lastest_coin_state[coin_id]
await self.block_store.set_in_chain([(br.header_hash,) for br in records_to_add])
# Changes the peak to be the new peak
await self.block_store.set_peak(block_record.header_hash)
return (
uint32(max(fork_height, 0)),
block_record.height,
records_to_add,
(list(lastest_coin_state.values()), hint_coin_state),
)
# This is not a heavier block than the heaviest we have seen, so we don't change the coin set
return None, None, [], ([], {})
async def get_tx_removals_and_additions(
self, block: FullBlock, npc_result: Optional[NPCResult] = None
) -> Tuple[List[bytes32], List[Coin], Optional[NPCResult]]:
if block.is_transaction_block():
if block.transactions_generator is not None:
if npc_result is None:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
assert block_generator is not None
npc_result = get_name_puzzle_conditions(
block_generator,
self.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.constants.COST_PER_BYTE,
mempool_mode=False,
height=block.height,
)
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
return tx_removals, tx_additions, npc_result
else:
return [], [], None
else:
return [], [], None
def get_next_difficulty(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.DIFFICULTY_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[1]
def get_next_slot_iters(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.SUB_SLOT_ITERS_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[0]
async def get_sp_and_ip_sub_slots(
self, header_hash: bytes32
) -> Optional[Tuple[Optional[EndOfSubSlotBundle], Optional[EndOfSubSlotBundle]]]:
block: Optional[FullBlock] = await self.block_store.get_full_block(header_hash)
if block is None:
return None
curr_br: BlockRecord = self.block_record(block.header_hash)
is_overflow = curr_br.overflow
curr: Optional[FullBlock] = block
assert curr is not None
while True:
if curr_br.first_in_sub_slot:
curr = await self.block_store.get_full_block(curr_br.header_hash)
assert curr is not None
break
if curr_br.height == 0:
break
curr_br = self.block_record(curr_br.prev_hash)
if len(curr.finished_sub_slots) == 0:
# This means we got to genesis and still no sub-slots
return None, None
ip_sub_slot = curr.finished_sub_slots[-1]
if not is_overflow:
# Pos sub-slot is the same as infusion sub slot
return None, ip_sub_slot
if len(curr.finished_sub_slots) > 1:
# Have both sub-slots
return curr.finished_sub_slots[-2], ip_sub_slot
prev_curr: Optional[FullBlock] = await self.block_store.get_full_block(curr.prev_header_hash)
if prev_curr is None:
assert curr.height == 0
prev_curr = curr
prev_curr_br = self.block_record(curr.header_hash)
else:
prev_curr_br = self.block_record(curr.prev_header_hash)
assert prev_curr_br is not None
while prev_curr_br.height > 0:
if prev_curr_br.first_in_sub_slot:
prev_curr = await self.block_store.get_full_block(prev_curr_br.header_hash)
assert prev_curr is not None
break
prev_curr_br = self.block_record(prev_curr_br.prev_hash)
if len(prev_curr.finished_sub_slots) == 0:
return None, ip_sub_slot
return prev_curr.finished_sub_slots[-1], ip_sub_slot
def get_recent_reward_challenges(self) -> List[Tuple[bytes32, uint128]]:
peak = self.get_peak()
if peak is None:
return []
recent_rc: List[Tuple[bytes32, uint128]] = []
curr: Optional[BlockRecord] = peak
while curr is not None and len(recent_rc) < 2 * self.constants.MAX_SUB_SLOT_BLOCKS:
if curr != peak:
recent_rc.append((curr.reward_infusion_new_challenge, curr.total_iters))
if curr.first_in_sub_slot:
assert curr.finished_reward_slot_hashes is not None
sub_slot_total_iters = curr.ip_sub_slot_total_iters(self.constants)
# Start from the most recent
for rc in reversed(curr.finished_reward_slot_hashes):
if sub_slot_total_iters < curr.sub_slot_iters:
break
recent_rc.append((rc, sub_slot_total_iters))
sub_slot_total_iters = uint128(sub_slot_total_iters - curr.sub_slot_iters)
curr = self.try_block_record(curr.prev_hash)
return list(reversed(recent_rc))
async def validate_unfinished_block(
self, block: UnfinishedBlock, npc_result: Optional[NPCResult], skip_overflow_ss_validation=True
) -> PreValidationResult:
if (
not self.contains_block(block.prev_header_hash)
and not block.prev_header_hash == self.constants.GENESIS_CHALLENGE
):
return PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None, False)
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
b"",
)
prev_b = self.try_block_record(unfinished_header_block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(unfinished_header_block.finished_sub_slots) > 0, prev_b, self
)
required_iters, error = validate_unfinished_header_block(
self.constants,
self,
unfinished_header_block,
False,
difficulty,
sub_slot_iters,
skip_overflow_ss_validation,
)
if error is not None:
return PreValidationResult(uint16(error.code.value), None, None, False)
prev_height = (
-1
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE
else self.block_record(block.prev_header_hash).height
)
error_code, cost_result = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
uint32(prev_height + 1),
npc_result,
None,
self.get_block_generator,
validate_signature=False, # Signature was already validated before calling this method, no need to validate
)
if error_code is not None:
return PreValidationResult(uint16(error_code.value), None, None, False)
return PreValidationResult(None, required_iters, cost_result, False)
async def pre_validate_blocks_multiprocessing(
self,
blocks: List[FullBlock],
npc_results: Dict[uint32, NPCResult], # A cache of the result of running CLVM, optional (you can use {})
batch_size: int = 4,
wp_summaries: Optional[List[SubEpochSummary]] = None,
*,
validate_signatures: bool,
) -> List[PreValidationResult]:
return await pre_validate_blocks_multiprocessing(
self.constants,
self.constants_json,
self,
blocks,
self.pool,
True,
npc_results,
self.get_block_generator,
batch_size,
wp_summaries,
validate_signatures=validate_signatures,
)
async def run_generator(self, unfinished_block: bytes, generator: BlockGenerator, height: uint32) -> NPCResult:
task = asyncio.get_running_loop().run_in_executor(
self.pool,
_run_generator,
self.constants_json,
unfinished_block,
bytes(generator),
height,
)
npc_result_bytes = await task
if npc_result_bytes is None:
raise ConsensusError(Err.UNKNOWN)
ret = NPCResult.from_bytes(npc_result_bytes)
if ret.error is not None:
raise ConsensusError(ret.error)
return ret
def contains_block(self, header_hash: bytes32) -> bool:
"""
True if we have already added this block to the chain. This may return false for orphan blocks
that we have added but no longer keep in memory.
"""
return header_hash in self.__block_records
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self.__block_records[header_hash]
def height_to_block_record(self, height: uint32) -> BlockRecord:
header_hash = self.height_to_hash(height)
# TODO: address hint error and remove ignore
# error: Argument 1 to "block_record" of "Blockchain" has incompatible type "Optional[bytes32]"; expected
# "bytes32" [arg-type]
return self.block_record(header_hash) # type: ignore[arg-type]
def get_ses_heights(self) -> List[uint32]:
return self.__height_map.get_ses_heights()
def get_ses(self, height: uint32) -> SubEpochSummary:
return self.__height_map.get_ses(height)
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
if not self.__height_map.contains_height(height):
return None
return self.__height_map.get_hash(height)
def contains_height(self, height: uint32) -> bool:
return self.__height_map.contains_height(height)
def get_peak_height(self) -> Optional[uint32]:
return self._peak_height
async def warmup(self, fork_point: uint32):
"""
Loads blocks into the cache. The blocks loaded include all blocks from
fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.
Args:
fork_point: the last block height to load in the cache
"""
if self._peak_height is None:
return None
block_records = await self.block_store.get_block_records_in_range(
max(fork_point - self.constants.BLOCKS_CACHE_SIZE, uint32(0)), fork_point
)
for block_record in block_records.values():
self.add_block_record(block_record)
def clean_block_record(self, height: int):
"""
Clears all block records in the cache which have block_record < height.
Args:
height: Minimum height that we need to keep in the cache
"""
if height < 0:
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while blocks_to_remove is not None and height >= 0:
for header_hash in blocks_to_remove:
del self.__block_records[header_hash] # remove from blocks
del self.__heights_in_cache[uint32(height)] # remove height from heights in cache
if height == 0:
break
height = height - 1
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
def clean_block_records(self):
"""
Cleans the cache so that we only maintain relevant blocks. This removes
block records that have height < peak - BLOCKS_CACHE_SIZE.
These blocks are necessary for calculating future difficulty adjustments.
"""
if len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE:
return None
assert self._peak_height is not None
if self._peak_height - self.constants.BLOCKS_CACHE_SIZE < 0:
return None
self.clean_block_record(self._peak_height - self.constants.BLOCKS_CACHE_SIZE)
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return await self.block_store.get_block_records_in_range(start, stop)
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
hashes = []
for height in range(start, stop + 1):
if self.contains_height(uint32(height)):
# TODO: address hint error and remove ignore
# error: Incompatible types in assignment (expression has type "Optional[bytes32]", variable has
# type "bytes32") [assignment]
header_hash: bytes32 = self.height_to_hash(uint32(height)) # type: ignore[assignment]
hashes.append(header_hash)
blocks: List[FullBlock] = []
for hash in hashes.copy():
block = self.block_store.block_cache.get(hash)
if block is not None:
blocks.append(block)
hashes.remove(hash)
blocks_on_disk: List[FullBlock] = await self.block_store.get_blocks_by_hash(hashes)
blocks.extend(blocks_on_disk)
header_blocks: Dict[bytes32, HeaderBlock] = {}
for block in blocks:
if self.height_to_hash(block.height) != block.header_hash:
raise ValueError(f"Block at {block.header_hash} is no longer in the blockchain (it's in a fork)")
if tx_filter is False:
header = get_block_header(block, [], [])
else:
tx_additions: List[CoinRecord] = [
c for c in (await self.coin_store.get_coins_added_at_height(block.height)) if not c.coinbase
]
removed: List[CoinRecord] = await self.coin_store.get_coins_removed_at_height(block.height)
header = get_block_header(
block, [record.coin for record in tx_additions], [record.coin.name() for record in removed]
)
header_blocks[header.header_hash] = header
return header_blocks
async def get_header_block_by_height(
self, height: int, header_hash: bytes32, tx_filter: bool = True
) -> Optional[HeaderBlock]:
header_dict: Dict[bytes32, HeaderBlock] = await self.get_header_blocks_in_range(height, height, tx_filter)
if len(header_dict) == 0:
return None
if header_hash not in header_dict:
return None
return header_dict[header_hash]
async def get_block_records_at(self, heights: List[uint32], batch_size=900) -> List[BlockRecord]:
"""
gets block records by height (only blocks that are part of the chain)
"""
records: List[BlockRecord] = []
hashes = []
assert batch_size < 999 # sqlite in python 3.7 has a limit on 999 variables in queries
for height in heights:
hashes.append(self.height_to_hash(height))
if len(hashes) > batch_size:
# TODO: address hint error and remove ignore
# error: Argument 1 to "get_block_records_by_hash" of "BlockStore" has incompatible type
# "List[Optional[bytes32]]"; expected "List[bytes32]" [arg-type]
res = await self.block_store.get_block_records_by_hash(hashes) # type: ignore[arg-type]
records.extend(res)
hashes = []
if len(hashes) > 0:
# TODO: address hint error and remove ignore
# error: Argument 1 to "get_block_records_by_hash" of "BlockStore" has incompatible type
# "List[Optional[bytes32]]"; expected "List[bytes32]" [arg-type]
res = await self.block_store.get_block_records_by_hash(hashes) # type: ignore[arg-type]
records.extend(res)
return records
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
if header_hash in self.__block_records:
return self.__block_records[header_hash]
return await self.block_store.get_block_record(header_hash)
def remove_block_record(self, header_hash: bytes32):
sbr = self.block_record(header_hash)
del self.__block_records[header_hash]
self.__heights_in_cache[sbr.height].remove(header_hash)
def add_block_record(self, block_record: BlockRecord):
"""
Adds a block record to the cache.
"""
self.__block_records[block_record.header_hash] = block_record
if block_record.height not in self.__heights_in_cache.keys():
self.__heights_in_cache[block_record.height] = set()
self.__heights_in_cache[block_record.height].add(block_record.header_hash)
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
):
return await self.block_store.persist_sub_epoch_challenge_segments(ses_block_hash, segments)
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
segments: Optional[List[SubEpochChallengeSegment]] = await self.block_store.get_sub_epoch_challenge_segments(
ses_block_hash
)
if segments is None:
return None
return segments
# Returns 'True' if the info is already in the set, otherwise returns 'False' and stores it.
def seen_compact_proofs(self, vdf_info: VDFInfo, height: uint32) -> bool:
pot_tuple = (vdf_info, height)
if pot_tuple in self._seen_compact_proofs:
return True
# Periodically cleanup to keep size small. TODO: make this smarter, like FIFO.
if len(self._seen_compact_proofs) > 10000:
self._seen_compact_proofs.clear()
self._seen_compact_proofs.add(pot_tuple)
return False
async def get_block_generator(
self, block: BlockInfo, additional_blocks: Dict[bytes32, FullBlock] = None
) -> Optional[BlockGenerator]:
if additional_blocks is None:
additional_blocks = {}
ref_list = block.transactions_generator_ref_list
if block.transactions_generator is None:
assert len(ref_list) == 0
return None
if len(ref_list) == 0:
return BlockGenerator(block.transactions_generator, [], [])
result: List[SerializedProgram] = []
previous_block_hash = block.prev_header_hash
if (
self.try_block_record(previous_block_hash)
and self.height_to_hash(self.block_record(previous_block_hash).height) == previous_block_hash
):
# We are not in a reorg, no need to look up alternate header hashes
# (we can get them from height_to_hash)
if self.block_store.db_wrapper.db_version == 2:
# in the v2 database, we can look up blocks by height directly
# (as long as we're in the main chain)
result = await self.block_store.get_generators_at(block.transactions_generator_ref_list)
else:
for ref_height in block.transactions_generator_ref_list:
header_hash = self.height_to_hash(ref_height)
# if ref_height is invalid, this block should have failed with
# FUTURE_GENERATOR_REFS before getting here
assert header_hash is not None
ref_gen = await self.block_store.get_generator(header_hash)
if ref_gen is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(ref_gen)
else:
# First tries to find the blocks in additional_blocks
reorg_chain: Dict[uint32, FullBlock] = {}
curr = block
additional_height_dict = {}
while curr.prev_header_hash in additional_blocks:
prev: FullBlock = additional_blocks[curr.prev_header_hash]
additional_height_dict[prev.height] = prev
if isinstance(curr, FullBlock):
assert curr.height == prev.height + 1
reorg_chain[prev.height] = prev
curr = prev
peak: Optional[BlockRecord] = self.get_peak()
if self.contains_block(curr.prev_header_hash) and peak is not None:
# Then we look up blocks up to fork point one at a time, backtracking
previous_block_hash = curr.prev_header_hash
prev_block_record = await self.block_store.get_block_record(previous_block_hash)
prev_block = await self.block_store.get_full_block(previous_block_hash)
assert prev_block is not None
assert prev_block_record is not None
fork = find_fork_point_in_chain(self, peak, prev_block_record)
curr_2: Optional[FullBlock] = prev_block
assert curr_2 is not None and isinstance(curr_2, FullBlock)
reorg_chain[curr_2.height] = curr_2
while curr_2.height > fork and curr_2.height > 0:
curr_2 = await self.block_store.get_full_block(curr_2.prev_header_hash)
assert curr_2 is not None
reorg_chain[curr_2.height] = curr_2
for ref_height in block.transactions_generator_ref_list:
if ref_height in reorg_chain:
ref_block = reorg_chain[ref_height]
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(ref_block.transactions_generator)
else:
if ref_height in additional_height_dict:
ref_block = additional_height_dict[ref_height]
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(ref_block.transactions_generator)
else:
header_hash = self.height_to_hash(ref_height)
if header_hash is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
gen = await self.block_store.get_generator(header_hash)
if gen is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(gen)
assert len(result) == len(ref_list)
return BlockGenerator(block.transactions_generator, result, [])
| 45.525934
| 120
| 0.635245
|
1b7d7f23ed51892d5d963e44d8a40d42147743a0
| 4,388
|
py
|
Python
|
django/contrib/gis/geos/point.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | 3
|
2016-07-08T23:49:32.000Z
|
2018-04-15T22:55:01.000Z
|
django/contrib/gis/geos/point.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | 27
|
2017-02-05T15:57:04.000Z
|
2018-04-15T22:57:26.000Z
|
django/contrib/gis/geos/point.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | null | null | null |
from ctypes import c_uint
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos import prototypes as capi
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, float, long)) and isinstance(y, (int, float, long)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, (int, float, long)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, i.next())
capi.cs_sety(cs, 0, i.next())
if ndim == 3: capi.cs_setz(cs, 0, i.next())
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty: return 0
if self.hasz: return 3
else: return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
| 32.264706
| 88
| 0.563355
|
bca9932734a77534ec6ab840c658fb894b65a76d
| 1,501
|
py
|
Python
|
setup.py
|
usnistgov/VTSG
|
f4477a78ec19f7e9757da0321cb5a69428e358cf
|
[
"MIT"
] | null | null | null |
setup.py
|
usnistgov/VTSG
|
f4477a78ec19f7e9757da0321cb5a69428e358cf
|
[
"MIT"
] | 1
|
2022-01-31T22:22:55.000Z
|
2022-01-31T22:22:55.000Z
|
setup.py
|
usnistgov/VTSG
|
f4477a78ec19f7e9757da0321cb5a69428e358cf
|
[
"MIT"
] | null | null | null |
# *modified "Tue Feb 15 09:02:23 2022" *by "Paul E. Black"
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
"""Utility function to read the README file.
Used for the long_description. It's nice, because now 1) we have a top level
README file and 2) it's easier to type in the README file than to put a raw string in below ..."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="vulnerability test suite generator",
version="3",
packages=['vuln_test_suite_gen'],
scripts=['vtsg.py'],
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires=['jinja2'],
package_data={
# If any package contains *.txt or *.xml files, include them:
'vuln_test_suite_gen': ['*.txt', '*.xml'],
},
# metadata for upload to PyPI
author="Bertrand Stivalet",
author_email="bertrand.stivalet@gmail.com",
description="Collection of vulnerable and fixed synthetic test cases expressing specific flaws.",
license="MIT",
keywords="flaws vulnerability generator",
long_description=read('README.md'),
# could also include long_description, download_url, classifiers, etc.
)
# end of setup.py
| 34.906977
| 102
| 0.696869
|
67fc679bb8b7b3ad3459eea1031754fc096d73e5
| 138,443
|
py
|
Python
|
tests/helpers/test_event.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2021-11-22T22:37:43.000Z
|
2022-03-17T00:55:28.000Z
|
tests/helpers/test_event.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 14
|
2022-01-13T04:27:21.000Z
|
2022-03-06T20:30:43.000Z
|
tests/helpers/test_event.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2022-01-02T18:49:54.000Z
|
2022-01-25T02:03:54.000Z
|
"""Test event helpers."""
# pylint: disable=protected-access
import asyncio
from datetime import date, datetime, timedelta
from unittest.mock import patch
from astral import LocationInfo
import astral.sun
import jinja2
import pytest
from homeassistant.components import sun
from homeassistant.const import MATCH_ALL
import homeassistant.core as ha
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.entity_registry import EVENT_ENTITY_REGISTRY_UPDATED
from homeassistant.helpers.event import (
TrackStates,
TrackTemplate,
TrackTemplateResult,
async_call_later,
async_track_point_in_time,
async_track_point_in_utc_time,
async_track_same_state,
async_track_state_added_domain,
async_track_state_change,
async_track_state_change_event,
async_track_state_change_filtered,
async_track_state_removed_domain,
async_track_sunrise,
async_track_sunset,
async_track_template,
async_track_template_result,
async_track_time_change,
async_track_time_interval,
async_track_utc_time_change,
track_point_in_utc_time,
)
from homeassistant.helpers.template import Template, result_as_boolean
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
DEFAULT_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
async def test_track_point_in_time(hass):
"""Test track point in time."""
before_birthday = datetime(1985, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
birthday_paulus = datetime(1986, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
after_birthday = datetime(1987, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
runs = []
async_track_point_in_utc_time(
hass, callback(lambda x: runs.append(x)), birthday_paulus
)
async_fire_time_changed(hass, before_birthday)
await hass.async_block_till_done()
assert len(runs) == 0
async_fire_time_changed(hass, birthday_paulus)
await hass.async_block_till_done()
assert len(runs) == 1
# A point in time tracker will only fire once, this should do nothing
async_fire_time_changed(hass, birthday_paulus)
await hass.async_block_till_done()
assert len(runs) == 1
async_track_point_in_utc_time(
hass, callback(lambda x: runs.append(x)), birthday_paulus
)
async_fire_time_changed(hass, after_birthday)
await hass.async_block_till_done()
assert len(runs) == 2
unsub = async_track_point_in_time(
hass, callback(lambda x: runs.append(x)), birthday_paulus
)
unsub()
async_fire_time_changed(hass, after_birthday)
await hass.async_block_till_done()
assert len(runs) == 2
async def test_track_point_in_time_drift_rearm(hass):
"""Test tasks with the time rolling backwards."""
specific_runs = []
now = dt_util.utcnow()
time_that_will_not_match_right_away = datetime(
now.year + 1, 5, 24, 21, 59, 55, tzinfo=dt_util.UTC
)
async_track_point_in_utc_time(
hass,
callback(lambda x: specific_runs.append(x)),
time_that_will_not_match_right_away,
)
async_fire_time_changed(
hass,
datetime(now.year + 1, 5, 24, 21, 59, 00, tzinfo=dt_util.UTC),
fire_all=True,
)
await hass.async_block_till_done()
assert len(specific_runs) == 0
async_fire_time_changed(
hass,
datetime(now.year + 1, 5, 24, 21, 59, 55, tzinfo=dt_util.UTC),
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async def test_track_state_change_from_to_state_match(hass):
"""Test track_state_change with from and to state matchers."""
from_and_to_state_runs = []
only_from_runs = []
only_to_runs = []
match_all_runs = []
no_to_from_specified_runs = []
def from_and_to_state_callback(entity_id, old_state, new_state):
from_and_to_state_runs.append(1)
def only_from_state_callback(entity_id, old_state, new_state):
only_from_runs.append(1)
def only_to_state_callback(entity_id, old_state, new_state):
only_to_runs.append(1)
def match_all_callback(entity_id, old_state, new_state):
match_all_runs.append(1)
def no_to_from_specified_callback(entity_id, old_state, new_state):
no_to_from_specified_runs.append(1)
async_track_state_change(
hass, "light.Bowl", from_and_to_state_callback, "on", "off"
)
async_track_state_change(hass, "light.Bowl", only_from_state_callback, "on", None)
async_track_state_change(
hass, "light.Bowl", only_to_state_callback, None, ["off", "standby"]
)
async_track_state_change(
hass, "light.Bowl", match_all_callback, MATCH_ALL, MATCH_ALL
)
async_track_state_change(hass, "light.Bowl", no_to_from_specified_callback)
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(from_and_to_state_runs) == 0
assert len(only_from_runs) == 0
assert len(only_to_runs) == 0
assert len(match_all_runs) == 1
assert len(no_to_from_specified_runs) == 1
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(from_and_to_state_runs) == 1
assert len(only_from_runs) == 1
assert len(only_to_runs) == 1
assert len(match_all_runs) == 2
assert len(no_to_from_specified_runs) == 2
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(from_and_to_state_runs) == 1
assert len(only_from_runs) == 1
assert len(only_to_runs) == 1
assert len(match_all_runs) == 3
assert len(no_to_from_specified_runs) == 3
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(from_and_to_state_runs) == 1
assert len(only_from_runs) == 1
assert len(only_to_runs) == 1
assert len(match_all_runs) == 3
assert len(no_to_from_specified_runs) == 3
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(from_and_to_state_runs) == 2
assert len(only_from_runs) == 2
assert len(only_to_runs) == 2
assert len(match_all_runs) == 4
assert len(no_to_from_specified_runs) == 4
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(from_and_to_state_runs) == 2
assert len(only_from_runs) == 2
assert len(only_to_runs) == 2
assert len(match_all_runs) == 4
assert len(no_to_from_specified_runs) == 4
async def test_track_state_change(hass):
"""Test track_state_change."""
# 2 lists to track how often our callbacks get called
specific_runs = []
wildcard_runs = []
wildercard_runs = []
def specific_run_callback(entity_id, old_state, new_state):
specific_runs.append(1)
# This is the rare use case
async_track_state_change(hass, "light.Bowl", specific_run_callback, "on", "off")
@ha.callback
def wildcard_run_callback(entity_id, old_state, new_state):
wildcard_runs.append((old_state, new_state))
# This is the most common use case
async_track_state_change(hass, "light.Bowl", wildcard_run_callback)
async def wildercard_run_callback(entity_id, old_state, new_state):
wildercard_runs.append((old_state, new_state))
async_track_state_change(hass, MATCH_ALL, wildercard_run_callback)
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 0
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
assert wildcard_runs[-1][0] is None
assert wildcard_runs[-1][1] is not None
# Set same state should not trigger a state change/listener
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 0
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
# State change off -> on
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 2
assert len(wildercard_runs) == 2
# State change off -> off
hass.states.async_set("light.Bowl", "off", {"some_attr": 1})
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 3
assert len(wildercard_runs) == 3
# State change off -> on
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 4
assert len(wildercard_runs) == 4
hass.states.async_remove("light.bowl")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 5
assert len(wildercard_runs) == 5
assert wildcard_runs[-1][0] is not None
assert wildcard_runs[-1][1] is None
assert wildercard_runs[-1][0] is not None
assert wildercard_runs[-1][1] is None
# Set state for different entity id
hass.states.async_set("switch.kitchen", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 5
assert len(wildercard_runs) == 6
async def test_async_track_state_change_filtered(hass):
"""Test async_track_state_change_filtered."""
single_entity_id_tracker = []
multiple_entity_id_tracker = []
@ha.callback
def single_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
single_entity_id_tracker.append((old_state, new_state))
@ha.callback
def multiple_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
multiple_entity_id_tracker.append((old_state, new_state))
@ha.callback
def callback_that_throws(event):
raise ValueError
track_single = async_track_state_change_filtered(
hass, TrackStates(False, {"light.bowl"}, None), single_run_callback
)
assert track_single.listeners == {
"all": False,
"domains": None,
"entities": {"light.bowl"},
}
track_multi = async_track_state_change_filtered(
hass, TrackStates(False, {"light.bowl"}, {"switch"}), multiple_run_callback
)
assert track_multi.listeners == {
"all": False,
"domains": {"switch"},
"entities": {"light.bowl"},
}
track_throws = async_track_state_change_filtered(
hass, TrackStates(False, {"light.bowl"}, {"switch"}), callback_that_throws
)
assert track_throws.listeners == {
"all": False,
"domains": {"switch"},
"entities": {"light.bowl"},
}
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert single_entity_id_tracker[-1][0] is None
assert single_entity_id_tracker[-1][1] is not None
assert len(multiple_entity_id_tracker) == 1
assert multiple_entity_id_tracker[-1][0] is None
assert multiple_entity_id_tracker[-1][1] is not None
# Set same state should not trigger a state change/listener
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(multiple_entity_id_tracker) == 1
# State change off -> on
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 2
assert len(multiple_entity_id_tracker) == 2
# State change off -> off
hass.states.async_set("light.Bowl", "off", {"some_attr": 1})
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 3
assert len(multiple_entity_id_tracker) == 3
# State change off -> on
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 4
assert len(multiple_entity_id_tracker) == 4
hass.states.async_remove("light.bowl")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 5
assert single_entity_id_tracker[-1][0] is not None
assert single_entity_id_tracker[-1][1] is None
assert len(multiple_entity_id_tracker) == 5
assert multiple_entity_id_tracker[-1][0] is not None
assert multiple_entity_id_tracker[-1][1] is None
# Set state for different entity id
hass.states.async_set("switch.kitchen", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 5
assert len(multiple_entity_id_tracker) == 6
track_single.async_remove()
# Ensure unsubing the listener works
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 5
assert len(multiple_entity_id_tracker) == 7
assert track_multi.listeners == {
"all": False,
"domains": {"switch"},
"entities": {"light.bowl"},
}
track_multi.async_update_listeners(TrackStates(False, {"light.bowl"}, None))
assert track_multi.listeners == {
"all": False,
"domains": None,
"entities": {"light.bowl"},
}
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(multiple_entity_id_tracker) == 8
hass.states.async_set("switch.kitchen", "off")
await hass.async_block_till_done()
assert len(multiple_entity_id_tracker) == 8
track_multi.async_update_listeners(TrackStates(True, None, None))
hass.states.async_set("switch.kitchen", "off")
await hass.async_block_till_done()
assert len(multiple_entity_id_tracker) == 8
hass.states.async_set("switch.any", "off")
await hass.async_block_till_done()
assert len(multiple_entity_id_tracker) == 9
track_multi.async_remove()
track_throws.async_remove()
async def test_async_track_state_change_event(hass):
"""Test async_track_state_change_event."""
single_entity_id_tracker = []
multiple_entity_id_tracker = []
@ha.callback
def single_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
single_entity_id_tracker.append((old_state, new_state))
@ha.callback
def multiple_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
multiple_entity_id_tracker.append((old_state, new_state))
@ha.callback
def callback_that_throws(event):
raise ValueError
unsub_single = async_track_state_change_event(
hass, ["light.Bowl"], single_run_callback
)
unsub_multi = async_track_state_change_event(
hass, ["light.Bowl", "switch.kitchen"], multiple_run_callback
)
unsub_throws = async_track_state_change_event(
hass, ["light.Bowl", "switch.kitchen"], callback_that_throws
)
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert single_entity_id_tracker[-1][0] is None
assert single_entity_id_tracker[-1][1] is not None
assert len(multiple_entity_id_tracker) == 1
assert multiple_entity_id_tracker[-1][0] is None
assert multiple_entity_id_tracker[-1][1] is not None
# Set same state should not trigger a state change/listener
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(multiple_entity_id_tracker) == 1
# State change off -> on
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 2
assert len(multiple_entity_id_tracker) == 2
# State change off -> off
hass.states.async_set("light.Bowl", "off", {"some_attr": 1})
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 3
assert len(multiple_entity_id_tracker) == 3
# State change off -> on
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 4
assert len(multiple_entity_id_tracker) == 4
hass.states.async_remove("light.bowl")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 5
assert single_entity_id_tracker[-1][0] is not None
assert single_entity_id_tracker[-1][1] is None
assert len(multiple_entity_id_tracker) == 5
assert multiple_entity_id_tracker[-1][0] is not None
assert multiple_entity_id_tracker[-1][1] is None
# Set state for different entity id
hass.states.async_set("switch.kitchen", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 5
assert len(multiple_entity_id_tracker) == 6
unsub_single()
# Ensure unsubing the listener works
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 5
assert len(multiple_entity_id_tracker) == 7
unsub_multi()
unsub_throws()
async def test_async_track_state_change_event_with_empty_list(hass):
"""Test async_track_state_change_event passing an empty list of entities."""
unsub_single = async_track_state_change_event(
hass, [], ha.callback(lambda event: None)
)
unsub_single2 = async_track_state_change_event(
hass, [], ha.callback(lambda event: None)
)
unsub_single2()
unsub_single()
async def test_async_track_state_added_domain(hass):
"""Test async_track_state_added_domain."""
single_entity_id_tracker = []
multiple_entity_id_tracker = []
@ha.callback
def single_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
single_entity_id_tracker.append((old_state, new_state))
@ha.callback
def multiple_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
multiple_entity_id_tracker.append((old_state, new_state))
@ha.callback
def callback_that_throws(event):
raise ValueError
unsub_single = async_track_state_added_domain(hass, "light", single_run_callback)
unsub_multi = async_track_state_added_domain(
hass, ["light", "switch"], multiple_run_callback
)
unsub_throws = async_track_state_added_domain(
hass, ["light", "switch"], callback_that_throws
)
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert single_entity_id_tracker[-1][0] is None
assert single_entity_id_tracker[-1][1] is not None
assert len(multiple_entity_id_tracker) == 1
assert multiple_entity_id_tracker[-1][0] is None
assert multiple_entity_id_tracker[-1][1] is not None
# Set same state should not trigger a state change/listener
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(multiple_entity_id_tracker) == 1
# State change off -> on - nothing added so no trigger
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(multiple_entity_id_tracker) == 1
# State change off -> off - nothing added so no trigger
hass.states.async_set("light.Bowl", "off", {"some_attr": 1})
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(multiple_entity_id_tracker) == 1
# Removing state does not trigger
hass.states.async_remove("light.bowl")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(multiple_entity_id_tracker) == 1
# Set state for different entity id
hass.states.async_set("switch.kitchen", "on")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(multiple_entity_id_tracker) == 2
unsub_single()
# Ensure unsubing the listener works
hass.states.async_set("light.new", "off")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(multiple_entity_id_tracker) == 3
unsub_multi()
unsub_throws()
async def test_async_track_state_added_domain_with_empty_list(hass):
"""Test async_track_state_added_domain passing an empty list of domains."""
unsub_single = async_track_state_added_domain(
hass, [], ha.callback(lambda event: None)
)
unsub_single2 = async_track_state_added_domain(
hass, [], ha.callback(lambda event: None)
)
unsub_single2()
unsub_single()
async def test_async_track_state_removed_domain_with_empty_list(hass):
"""Test async_track_state_removed_domain passing an empty list of domains."""
unsub_single = async_track_state_removed_domain(
hass, [], ha.callback(lambda event: None)
)
unsub_single2 = async_track_state_removed_domain(
hass, [], ha.callback(lambda event: None)
)
unsub_single2()
unsub_single()
async def test_async_track_state_removed_domain(hass):
"""Test async_track_state_removed_domain."""
single_entity_id_tracker = []
multiple_entity_id_tracker = []
@ha.callback
def single_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
single_entity_id_tracker.append((old_state, new_state))
@ha.callback
def multiple_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
multiple_entity_id_tracker.append((old_state, new_state))
@ha.callback
def callback_that_throws(event):
raise ValueError
unsub_single = async_track_state_removed_domain(hass, "light", single_run_callback)
unsub_multi = async_track_state_removed_domain(
hass, ["light", "switch"], multiple_run_callback
)
unsub_throws = async_track_state_removed_domain(
hass, ["light", "switch"], callback_that_throws
)
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
hass.states.async_remove("light.Bowl")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert single_entity_id_tracker[-1][1] is None
assert single_entity_id_tracker[-1][0] is not None
assert len(multiple_entity_id_tracker) == 1
assert multiple_entity_id_tracker[-1][1] is None
assert multiple_entity_id_tracker[-1][0] is not None
# Added and than removed (light)
hass.states.async_set("light.Bowl", "on")
hass.states.async_remove("light.Bowl")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 2
assert len(multiple_entity_id_tracker) == 2
# Added and than removed (light)
hass.states.async_set("light.Bowl", "off")
hass.states.async_remove("light.Bowl")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 3
assert len(multiple_entity_id_tracker) == 3
# Added and than removed (light)
hass.states.async_set("light.Bowl", "off", {"some_attr": 1})
hass.states.async_remove("light.Bowl")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 4
assert len(multiple_entity_id_tracker) == 4
# Added and than removed (switch)
hass.states.async_set("switch.kitchen", "on")
hass.states.async_remove("switch.kitchen")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 4
assert len(multiple_entity_id_tracker) == 5
unsub_single()
# Ensure unsubing the listener works
hass.states.async_set("light.new", "off")
hass.states.async_remove("light.new")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 4
assert len(multiple_entity_id_tracker) == 6
unsub_multi()
unsub_throws()
async def test_async_track_state_removed_domain_match_all(hass):
"""Test async_track_state_removed_domain with a match_all."""
single_entity_id_tracker = []
match_all_entity_id_tracker = []
@ha.callback
def single_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
single_entity_id_tracker.append((old_state, new_state))
@ha.callback
def match_all_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
match_all_entity_id_tracker.append((old_state, new_state))
unsub_single = async_track_state_removed_domain(hass, "light", single_run_callback)
unsub_match_all = async_track_state_removed_domain(
hass, MATCH_ALL, match_all_run_callback
)
hass.states.async_set("light.new", "off")
hass.states.async_remove("light.new")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(match_all_entity_id_tracker) == 1
hass.states.async_set("switch.new", "off")
hass.states.async_remove("switch.new")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(match_all_entity_id_tracker) == 2
unsub_match_all()
unsub_single()
hass.states.async_set("switch.new", "off")
hass.states.async_remove("switch.new")
await hass.async_block_till_done()
assert len(single_entity_id_tracker) == 1
assert len(match_all_entity_id_tracker) == 2
async def test_track_template(hass):
"""Test tracking template."""
specific_runs = []
wildcard_runs = []
wildercard_runs = []
template_condition = Template("{{states.switch.test.state == 'on'}}", hass)
template_condition_var = Template(
"{{states.switch.test.state == 'on' and test == 5}}", hass
)
hass.states.async_set("switch.test", "off")
def specific_run_callback(entity_id, old_state, new_state):
specific_runs.append(1)
async_track_template(hass, template_condition, specific_run_callback)
@ha.callback
def wildcard_run_callback(entity_id, old_state, new_state):
wildcard_runs.append((old_state, new_state))
async_track_template(hass, template_condition, wildcard_run_callback)
async def wildercard_run_callback(entity_id, old_state, new_state):
wildercard_runs.append((old_state, new_state))
async_track_template(
hass, template_condition_var, wildercard_run_callback, {"test": 5}
)
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 2
assert len(wildercard_runs) == 2
template_iterate = Template("{{ (states.switch | length) > 0 }}", hass)
iterate_calls = []
@ha.callback
def iterate_callback(entity_id, old_state, new_state):
iterate_calls.append((entity_id, old_state, new_state))
async_track_template(hass, template_iterate, iterate_callback)
await hass.async_block_till_done()
hass.states.async_set("switch.new", "on")
await hass.async_block_till_done()
assert len(iterate_calls) == 1
assert iterate_calls[0][0] == "switch.new"
assert iterate_calls[0][1] is None
assert iterate_calls[0][2].state == "on"
async def test_track_template_error(hass, caplog):
"""Test tracking template with error."""
template_error = Template("{{ (states.switch | lunch) > 0 }}", hass)
error_calls = []
@ha.callback
def error_callback(entity_id, old_state, new_state):
error_calls.append((entity_id, old_state, new_state))
async_track_template(hass, template_error, error_callback)
await hass.async_block_till_done()
hass.states.async_set("switch.new", "on")
await hass.async_block_till_done()
assert not error_calls
assert "lunch" in caplog.text
assert "TemplateAssertionError" in caplog.text
caplog.clear()
with patch.object(Template, "async_render") as render:
render.return_value = "ok"
hass.states.async_set("switch.not_exist", "off")
await hass.async_block_till_done()
assert "no filter named 'lunch'" not in caplog.text
assert "TemplateAssertionError" not in caplog.text
async def test_track_template_error_can_recover(hass, caplog):
"""Test tracking template with error."""
hass.states.async_set("switch.data_system", "cow", {"opmode": 0})
template_error = Template(
"{{ states.sensor.data_system.attributes['opmode'] == '0' }}", hass
)
error_calls = []
@ha.callback
def error_callback(entity_id, old_state, new_state):
error_calls.append((entity_id, old_state, new_state))
async_track_template(hass, template_error, error_callback)
await hass.async_block_till_done()
assert not error_calls
hass.states.async_remove("switch.data_system")
assert "UndefinedError" in caplog.text
hass.states.async_set("switch.data_system", "cow", {"opmode": 0})
caplog.clear()
assert "UndefinedError" not in caplog.text
async def test_track_template_time_change(hass, caplog):
"""Test tracking template with time change."""
template_error = Template("{{ utcnow().minute % 2 == 0 }}", hass)
calls = []
@ha.callback
def error_callback(entity_id, old_state, new_state):
calls.append((entity_id, old_state, new_state))
start_time = dt_util.utcnow() + timedelta(hours=24)
time_that_will_not_match_right_away = start_time.replace(minute=1, second=0)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
async_track_template(hass, template_error, error_callback)
await hass.async_block_till_done()
assert not calls
first_time = start_time.replace(minute=2, second=0)
with patch("homeassistant.util.dt.utcnow", return_value=first_time):
async_fire_time_changed(hass, first_time)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0] == (None, None, None)
async def test_track_template_result(hass):
"""Test tracking template."""
specific_runs = []
wildcard_runs = []
wildercard_runs = []
template_condition = Template("{{states.sensor.test.state}}", hass)
template_condition_var = Template(
"{{(states.sensor.test.state|int) + test }}", hass
)
def specific_run_callback(event, updates):
track_result = updates.pop()
specific_runs.append(int(track_result.result))
async_track_template_result(
hass, [TrackTemplate(template_condition, None)], specific_run_callback
)
@ha.callback
def wildcard_run_callback(event, updates):
track_result = updates.pop()
wildcard_runs.append(
(int(track_result.last_result or 0), int(track_result.result))
)
async_track_template_result(
hass, [TrackTemplate(template_condition, None)], wildcard_run_callback
)
async def wildercard_run_callback(event, updates):
track_result = updates.pop()
wildercard_runs.append(
(int(track_result.last_result or 0), int(track_result.result))
)
async_track_template_result(
hass,
[TrackTemplate(template_condition_var, {"test": 5})],
wildercard_run_callback,
)
await hass.async_block_till_done()
hass.states.async_set("sensor.test", 5)
await hass.async_block_till_done()
assert specific_runs == [5]
assert wildcard_runs == [(0, 5)]
assert wildercard_runs == [(0, 10)]
hass.states.async_set("sensor.test", 30)
await hass.async_block_till_done()
assert specific_runs == [5, 30]
assert wildcard_runs == [(0, 5), (5, 30)]
assert wildercard_runs == [(0, 10), (10, 35)]
hass.states.async_set("sensor.test", 30)
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 2
assert len(wildercard_runs) == 2
hass.states.async_set("sensor.test", 5)
await hass.async_block_till_done()
assert len(specific_runs) == 3
assert len(wildcard_runs) == 3
assert len(wildercard_runs) == 3
hass.states.async_set("sensor.test", 5)
await hass.async_block_till_done()
assert len(specific_runs) == 3
assert len(wildcard_runs) == 3
assert len(wildercard_runs) == 3
hass.states.async_set("sensor.test", 20)
await hass.async_block_till_done()
assert len(specific_runs) == 4
assert len(wildcard_runs) == 4
assert len(wildercard_runs) == 4
async def test_track_template_result_none(hass):
"""Test tracking template."""
specific_runs = []
wildcard_runs = []
wildercard_runs = []
template_condition = Template("{{state_attr('sensor.test', 'battery')}}", hass)
template_condition_var = Template(
"{{(state_attr('sensor.test', 'battery')|int) + test }}", hass
)
def specific_run_callback(event, updates):
track_result = updates.pop()
result = int(track_result.result) if track_result.result is not None else None
specific_runs.append(result)
async_track_template_result(
hass, [TrackTemplate(template_condition, None)], specific_run_callback
)
@ha.callback
def wildcard_run_callback(event, updates):
track_result = updates.pop()
last_result = (
int(track_result.last_result)
if track_result.last_result is not None
else None
)
result = int(track_result.result) if track_result.result is not None else None
wildcard_runs.append((last_result, result))
async_track_template_result(
hass, [TrackTemplate(template_condition, None)], wildcard_run_callback
)
async def wildercard_run_callback(event, updates):
track_result = updates.pop()
last_result = (
int(track_result.last_result)
if track_result.last_result is not None
else None
)
result = int(track_result.result) if track_result.result is not None else None
wildercard_runs.append((last_result, result))
async_track_template_result(
hass,
[TrackTemplate(template_condition_var, {"test": 5})],
wildercard_run_callback,
)
await hass.async_block_till_done()
hass.states.async_set("sensor.test", "-")
await hass.async_block_till_done()
assert specific_runs == [None]
assert wildcard_runs == [(None, None)]
assert wildercard_runs == [(None, 5)]
hass.states.async_set("sensor.test", "-", {"battery": 5})
await hass.async_block_till_done()
assert specific_runs == [None, 5]
assert wildcard_runs == [(None, None), (None, 5)]
assert wildercard_runs == [(None, 5), (5, 10)]
async def test_track_template_result_super_template(hass):
"""Test tracking template with super template listening to same entity."""
specific_runs = []
specific_runs_availability = []
wildcard_runs = []
wildcard_runs_availability = []
wildercard_runs = []
wildercard_runs_availability = []
template_availability = Template("{{ is_number(states('sensor.test')) }}", hass)
template_condition = Template("{{states.sensor.test.state}}", hass)
template_condition_var = Template(
"{{(states.sensor.test.state|int) + test }}", hass
)
def specific_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition:
specific_runs.append(int(track_result.result))
elif track_result.template is template_availability:
specific_runs_availability.append(track_result.result)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition, None),
],
specific_run_callback,
has_super_template=True,
)
@ha.callback
def wildcard_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition:
wildcard_runs.append(
(int(track_result.last_result or 0), int(track_result.result))
)
elif track_result.template is template_availability:
wildcard_runs_availability.append(track_result.result)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition, None),
],
wildcard_run_callback,
has_super_template=True,
)
async def wildercard_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition_var:
wildercard_runs.append(
(int(track_result.last_result or 0), int(track_result.result))
)
elif track_result.template is template_availability:
wildercard_runs_availability.append(track_result.result)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition_var, {"test": 5}),
],
wildercard_run_callback,
has_super_template=True,
)
await hass.async_block_till_done()
hass.states.async_set("sensor.test", "unavailable")
await hass.async_block_till_done()
assert specific_runs_availability == [False]
assert wildcard_runs_availability == [False]
assert wildercard_runs_availability == [False]
assert specific_runs == []
assert wildcard_runs == []
assert wildercard_runs == []
hass.states.async_set("sensor.test", 5)
await hass.async_block_till_done()
assert specific_runs_availability == [False, True]
assert wildcard_runs_availability == [False, True]
assert wildercard_runs_availability == [False, True]
assert specific_runs == [5]
assert wildcard_runs == [(0, 5)]
assert wildercard_runs == [(0, 10)]
hass.states.async_set("sensor.test", "unknown")
await hass.async_block_till_done()
assert specific_runs_availability == [False, True, False]
assert wildcard_runs_availability == [False, True, False]
assert wildercard_runs_availability == [False, True, False]
hass.states.async_set("sensor.test", 30)
await hass.async_block_till_done()
assert specific_runs_availability == [False, True, False, True]
assert wildcard_runs_availability == [False, True, False, True]
assert wildercard_runs_availability == [False, True, False, True]
assert specific_runs == [5, 30]
assert wildcard_runs == [(0, 5), (5, 30)]
assert wildercard_runs == [(0, 10), (10, 35)]
hass.states.async_set("sensor.test", "other")
await hass.async_block_till_done()
hass.states.async_set("sensor.test", 30)
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 2
assert len(wildercard_runs) == 2
assert len(specific_runs_availability) == 6
assert len(wildcard_runs_availability) == 6
assert len(wildercard_runs_availability) == 6
hass.states.async_set("sensor.test", 30)
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 2
assert len(wildercard_runs) == 2
assert len(specific_runs_availability) == 6
assert len(wildcard_runs_availability) == 6
assert len(wildercard_runs_availability) == 6
hass.states.async_set("sensor.test", 31)
await hass.async_block_till_done()
assert len(specific_runs) == 3
assert len(wildcard_runs) == 3
assert len(wildercard_runs) == 3
assert len(specific_runs_availability) == 6
assert len(wildcard_runs_availability) == 6
assert len(wildercard_runs_availability) == 6
async def test_track_template_result_super_template_initially_false(hass):
"""Test tracking template with super template listening to same entity."""
specific_runs = []
specific_runs_availability = []
wildcard_runs = []
wildcard_runs_availability = []
wildercard_runs = []
wildercard_runs_availability = []
template_availability = Template("{{ is_number(states('sensor.test')) }}", hass)
template_condition = Template("{{states.sensor.test.state}}", hass)
template_condition_var = Template(
"{{(states.sensor.test.state|int) + test }}", hass
)
# Make the super template initially false
hass.states.async_set("sensor.test", "unavailable")
await hass.async_block_till_done()
def specific_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition:
specific_runs.append(int(track_result.result))
elif track_result.template is template_availability:
specific_runs_availability.append(track_result.result)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition, None),
],
specific_run_callback,
has_super_template=True,
)
@ha.callback
def wildcard_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition:
wildcard_runs.append(
(int(track_result.last_result or 0), int(track_result.result))
)
elif track_result.template is template_availability:
wildcard_runs_availability.append(track_result.result)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition, None),
],
wildcard_run_callback,
has_super_template=True,
)
async def wildercard_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition_var:
wildercard_runs.append(
(int(track_result.last_result or 0), int(track_result.result))
)
elif track_result.template is template_availability:
wildercard_runs_availability.append(track_result.result)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition_var, {"test": 5}),
],
wildercard_run_callback,
has_super_template=True,
)
await hass.async_block_till_done()
assert specific_runs_availability == []
assert wildcard_runs_availability == []
assert wildercard_runs_availability == []
assert specific_runs == []
assert wildcard_runs == []
assert wildercard_runs == []
hass.states.async_set("sensor.test", 5)
await hass.async_block_till_done()
assert specific_runs_availability == [True]
assert wildcard_runs_availability == [True]
assert wildercard_runs_availability == [True]
assert specific_runs == [5]
assert wildcard_runs == [(0, 5)]
assert wildercard_runs == [(0, 10)]
hass.states.async_set("sensor.test", "unknown")
await hass.async_block_till_done()
assert specific_runs_availability == [True, False]
assert wildcard_runs_availability == [True, False]
assert wildercard_runs_availability == [True, False]
hass.states.async_set("sensor.test", 30)
await hass.async_block_till_done()
assert specific_runs_availability == [True, False, True]
assert wildcard_runs_availability == [True, False, True]
assert wildercard_runs_availability == [True, False, True]
assert specific_runs == [5, 30]
assert wildcard_runs == [(0, 5), (5, 30)]
assert wildercard_runs == [(0, 10), (10, 35)]
hass.states.async_set("sensor.test", "other")
await hass.async_block_till_done()
hass.states.async_set("sensor.test", 30)
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 2
assert len(wildercard_runs) == 2
assert len(specific_runs_availability) == 5
assert len(wildcard_runs_availability) == 5
assert len(wildercard_runs_availability) == 5
hass.states.async_set("sensor.test", 30)
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 2
assert len(wildercard_runs) == 2
assert len(specific_runs_availability) == 5
assert len(wildcard_runs_availability) == 5
assert len(wildercard_runs_availability) == 5
hass.states.async_set("sensor.test", 31)
await hass.async_block_till_done()
assert len(specific_runs) == 3
assert len(wildcard_runs) == 3
assert len(wildercard_runs) == 3
assert len(specific_runs_availability) == 5
assert len(wildcard_runs_availability) == 5
assert len(wildercard_runs_availability) == 5
@pytest.mark.parametrize(
"availability_template",
[
"{{ states('sensor.test2') != 'unavailable' }}",
"{% if states('sensor.test2') != 'unavailable' -%} true {%- else -%} false {%- endif %}",
"{% if states('sensor.test2') != 'unavailable' -%} 1 {%- else -%} 0 {%- endif %}",
"{% if states('sensor.test2') != 'unavailable' -%} yes {%- else -%} no {%- endif %}",
"{% if states('sensor.test2') != 'unavailable' -%} on {%- else -%} off {%- endif %}",
"{% if states('sensor.test2') != 'unavailable' -%} enable {%- else -%} disable {%- endif %}",
# This will throw when sensor.test2 is not "unavailable"
"{% if states('sensor.test2') != 'unavailable' -%} {{'a' + 5}} {%- else -%} false {%- endif %}",
],
)
async def test_track_template_result_super_template_2(hass, availability_template):
"""Test tracking template with super template listening to different entities."""
specific_runs = []
specific_runs_availability = []
wildcard_runs = []
wildcard_runs_availability = []
wildercard_runs = []
wildercard_runs_availability = []
template_availability = Template(availability_template)
template_condition = Template("{{states.sensor.test.state}}", hass)
template_condition_var = Template(
"{{(states.sensor.test.state|int) + test }}", hass
)
def _super_template_as_boolean(result):
if isinstance(result, TemplateError):
return True
return result_as_boolean(result)
def specific_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition:
specific_runs.append(int(track_result.result))
elif track_result.template is template_availability:
specific_runs_availability.append(
_super_template_as_boolean(track_result.result)
)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition, None),
],
specific_run_callback,
has_super_template=True,
)
@ha.callback
def wildcard_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition:
wildcard_runs.append(
(int(track_result.last_result or 0), int(track_result.result))
)
elif track_result.template is template_availability:
wildcard_runs_availability.append(
_super_template_as_boolean(track_result.result)
)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition, None),
],
wildcard_run_callback,
has_super_template=True,
)
async def wildercard_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition_var:
wildercard_runs.append(
(int(track_result.last_result or 0), int(track_result.result))
)
elif track_result.template is template_availability:
wildercard_runs_availability.append(
_super_template_as_boolean(track_result.result)
)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition_var, {"test": 5}),
],
wildercard_run_callback,
has_super_template=True,
)
await hass.async_block_till_done()
hass.states.async_set("sensor.test2", "unavailable")
await hass.async_block_till_done()
assert specific_runs_availability == [False]
assert wildcard_runs_availability == [False]
assert wildercard_runs_availability == [False]
assert specific_runs == []
assert wildcard_runs == []
assert wildercard_runs == []
hass.states.async_set("sensor.test", 5)
hass.states.async_set("sensor.test2", "available")
await hass.async_block_till_done()
assert specific_runs_availability == [False, True]
assert wildcard_runs_availability == [False, True]
assert wildercard_runs_availability == [False, True]
assert specific_runs == [5]
assert wildcard_runs == [(0, 5)]
assert wildercard_runs == [(0, 10)]
hass.states.async_set("sensor.test2", "unknown")
await hass.async_block_till_done()
assert specific_runs_availability == [False, True]
assert wildcard_runs_availability == [False, True]
assert wildercard_runs_availability == [False, True]
hass.states.async_set("sensor.test2", "available")
hass.states.async_set("sensor.test", 30)
await hass.async_block_till_done()
assert specific_runs_availability == [False, True]
assert wildcard_runs_availability == [False, True]
assert wildercard_runs_availability == [False, True]
assert specific_runs == [5, 30]
assert wildcard_runs == [(0, 5), (5, 30)]
assert wildercard_runs == [(0, 10), (10, 35)]
@pytest.mark.parametrize(
"availability_template",
[
"{{ states('sensor.test2') != 'unavailable' }}",
"{% if states('sensor.test2') != 'unavailable' -%} true {%- else -%} false {%- endif %}",
"{% if states('sensor.test2') != 'unavailable' -%} 1 {%- else -%} 0 {%- endif %}",
"{% if states('sensor.test2') != 'unavailable' -%} yes {%- else -%} no {%- endif %}",
"{% if states('sensor.test2') != 'unavailable' -%} on {%- else -%} off {%- endif %}",
"{% if states('sensor.test2') != 'unavailable' -%} enable {%- else -%} disable {%- endif %}",
# This will throw when sensor.test2 is not "unavailable"
"{% if states('sensor.test2') != 'unavailable' -%} {{'a' + 5}} {%- else -%} false {%- endif %}",
],
)
async def test_track_template_result_super_template_2_initially_false(
hass, availability_template
):
"""Test tracking template with super template listening to different entities."""
specific_runs = []
specific_runs_availability = []
wildcard_runs = []
wildcard_runs_availability = []
wildercard_runs = []
wildercard_runs_availability = []
template_availability = Template(availability_template)
template_condition = Template("{{states.sensor.test.state}}", hass)
template_condition_var = Template(
"{{(states.sensor.test.state|int) + test }}", hass
)
hass.states.async_set("sensor.test2", "unavailable")
await hass.async_block_till_done()
def _super_template_as_boolean(result):
if isinstance(result, TemplateError):
return True
return result_as_boolean(result)
def specific_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition:
specific_runs.append(int(track_result.result))
elif track_result.template is template_availability:
specific_runs_availability.append(
_super_template_as_boolean(track_result.result)
)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition, None),
],
specific_run_callback,
has_super_template=True,
)
@ha.callback
def wildcard_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition:
wildcard_runs.append(
(int(track_result.last_result or 0), int(track_result.result))
)
elif track_result.template is template_availability:
wildcard_runs_availability.append(
_super_template_as_boolean(track_result.result)
)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition, None),
],
wildcard_run_callback,
has_super_template=True,
)
async def wildercard_run_callback(event, updates):
for track_result in updates:
if track_result.template is template_condition_var:
wildercard_runs.append(
(int(track_result.last_result or 0), int(track_result.result))
)
elif track_result.template is template_availability:
wildercard_runs_availability.append(
_super_template_as_boolean(track_result.result)
)
async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_condition_var, {"test": 5}),
],
wildercard_run_callback,
has_super_template=True,
)
await hass.async_block_till_done()
assert specific_runs_availability == []
assert wildcard_runs_availability == []
assert wildercard_runs_availability == []
assert specific_runs == []
assert wildcard_runs == []
assert wildercard_runs == []
hass.states.async_set("sensor.test", 5)
hass.states.async_set("sensor.test2", "available")
await hass.async_block_till_done()
assert specific_runs_availability == [True]
assert wildcard_runs_availability == [True]
assert wildercard_runs_availability == [True]
assert specific_runs == [5]
assert wildcard_runs == [(0, 5)]
assert wildercard_runs == [(0, 10)]
hass.states.async_set("sensor.test2", "unknown")
await hass.async_block_till_done()
assert specific_runs_availability == [True]
assert wildcard_runs_availability == [True]
assert wildercard_runs_availability == [True]
hass.states.async_set("sensor.test2", "available")
hass.states.async_set("sensor.test", 30)
await hass.async_block_till_done()
assert specific_runs_availability == [True]
assert wildcard_runs_availability == [True]
assert wildercard_runs_availability == [True]
assert specific_runs == [5, 30]
assert wildcard_runs == [(0, 5), (5, 30)]
assert wildercard_runs == [(0, 10), (10, 35)]
async def test_track_template_result_complex(hass):
"""Test tracking template."""
specific_runs = []
template_complex_str = """
{% if states("sensor.domain") == "light" %}
{{ states.light | map(attribute='entity_id') | list }}
{% elif states("sensor.domain") == "lock" %}
{{ states.lock | map(attribute='entity_id') | list }}
{% elif states("sensor.domain") == "single_binary_sensor" %}
{{ states("binary_sensor.single") }}
{% else %}
{{ states | map(attribute='entity_id') | list }}
{% endif %}
"""
template_complex = Template(template_complex_str, hass)
def specific_run_callback(event, updates):
specific_runs.append(updates.pop().result)
hass.states.async_set("light.one", "on")
hass.states.async_set("lock.one", "locked")
info = async_track_template_result(
hass,
[TrackTemplate(template_complex, None, timedelta(seconds=0))],
specific_run_callback,
)
await hass.async_block_till_done()
assert info.listeners == {
"all": True,
"domains": set(),
"entities": set(),
"time": False,
}
hass.states.async_set("sensor.domain", "light")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert specific_runs[0] == ["light.one"]
assert info.listeners == {
"all": False,
"domains": {"light"},
"entities": {"sensor.domain"},
"time": False,
}
hass.states.async_set("sensor.domain", "lock")
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert specific_runs[1] == ["lock.one"]
assert info.listeners == {
"all": False,
"domains": {"lock"},
"entities": {"sensor.domain"},
"time": False,
}
hass.states.async_set("sensor.domain", "all")
await hass.async_block_till_done()
assert len(specific_runs) == 3
assert "light.one" in specific_runs[2]
assert "lock.one" in specific_runs[2]
assert "sensor.domain" in specific_runs[2]
assert info.listeners == {
"all": True,
"domains": set(),
"entities": set(),
"time": False,
}
hass.states.async_set("sensor.domain", "light")
await hass.async_block_till_done()
assert len(specific_runs) == 4
assert specific_runs[3] == ["light.one"]
assert info.listeners == {
"all": False,
"domains": {"light"},
"entities": {"sensor.domain"},
"time": False,
}
hass.states.async_set("light.two", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 5
assert "light.one" in specific_runs[4]
assert "light.two" in specific_runs[4]
assert "sensor.domain" not in specific_runs[4]
assert info.listeners == {
"all": False,
"domains": {"light"},
"entities": {"sensor.domain"},
"time": False,
}
hass.states.async_set("light.three", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 6
assert "light.one" in specific_runs[5]
assert "light.two" in specific_runs[5]
assert "light.three" in specific_runs[5]
assert "sensor.domain" not in specific_runs[5]
assert info.listeners == {
"all": False,
"domains": {"light"},
"entities": {"sensor.domain"},
"time": False,
}
hass.states.async_set("sensor.domain", "lock")
await hass.async_block_till_done()
assert len(specific_runs) == 7
assert specific_runs[6] == ["lock.one"]
assert info.listeners == {
"all": False,
"domains": {"lock"},
"entities": {"sensor.domain"},
"time": False,
}
hass.states.async_set("sensor.domain", "single_binary_sensor")
await hass.async_block_till_done()
assert len(specific_runs) == 8
assert specific_runs[7] == "unknown"
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {"binary_sensor.single", "sensor.domain"},
"time": False,
}
hass.states.async_set("binary_sensor.single", "binary_sensor_on")
await hass.async_block_till_done()
assert len(specific_runs) == 9
assert specific_runs[8] == "binary_sensor_on"
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {"binary_sensor.single", "sensor.domain"},
"time": False,
}
hass.states.async_set("sensor.domain", "lock")
await hass.async_block_till_done()
assert len(specific_runs) == 10
assert specific_runs[9] == ["lock.one"]
assert info.listeners == {
"all": False,
"domains": {"lock"},
"entities": {"sensor.domain"},
"time": False,
}
async def test_track_template_result_with_wildcard(hass):
"""Test tracking template with a wildcard."""
specific_runs = []
template_complex_str = r"""
{% for state in states %}
{% if state.entity_id | regex_match('.*\.office_') %}
{{ state.entity_id }}={{ state.state }}
{% endif %}
{% endfor %}
"""
template_complex = Template(template_complex_str, hass)
def specific_run_callback(event, updates):
specific_runs.append(updates.pop().result)
hass.states.async_set("cover.office_drapes", "closed")
hass.states.async_set("cover.office_window", "closed")
hass.states.async_set("cover.office_skylight", "open")
info = async_track_template_result(
hass, [TrackTemplate(template_complex, None)], specific_run_callback
)
await hass.async_block_till_done()
hass.states.async_set("cover.office_window", "open")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert info.listeners == {
"all": True,
"domains": set(),
"entities": set(),
"time": False,
}
assert "cover.office_drapes=closed" in specific_runs[0]
assert "cover.office_window=open" in specific_runs[0]
assert "cover.office_skylight=open" in specific_runs[0]
async def test_track_template_result_with_group(hass):
"""Test tracking template with a group."""
hass.states.async_set("sensor.power_1", 0)
hass.states.async_set("sensor.power_2", 200.2)
hass.states.async_set("sensor.power_3", 400.4)
hass.states.async_set("sensor.power_4", 800.8)
assert await async_setup_component(
hass,
"group",
{"group": {"power_sensors": "sensor.power_1,sensor.power_2,sensor.power_3"}},
)
await hass.async_block_till_done()
assert hass.states.get("group.power_sensors")
assert hass.states.get("group.power_sensors").state
specific_runs = []
template_complex_str = r"""
{{ states.group.power_sensors.attributes.entity_id | expand | map(attribute='state')|map('float')|sum }}
"""
template_complex = Template(template_complex_str, hass)
def specific_run_callback(event, updates):
specific_runs.append(updates.pop().result)
info = async_track_template_result(
hass, [TrackTemplate(template_complex, None)], specific_run_callback
)
await hass.async_block_till_done()
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {
"group.power_sensors",
"sensor.power_1",
"sensor.power_2",
"sensor.power_3",
},
"time": False,
}
hass.states.async_set("sensor.power_1", 100.1)
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert specific_runs[0] == 100.1 + 200.2 + 400.4
hass.states.async_set("sensor.power_3", 0)
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert specific_runs[1] == 100.1 + 200.2 + 0
with patch(
"homeassistant.config.load_yaml_config_file",
return_value={
"group": {
"power_sensors": "sensor.power_1,sensor.power_2,sensor.power_3,sensor.power_4",
}
},
):
await hass.services.async_call("group", "reload")
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert specific_runs[-1] == 100.1 + 200.2 + 0 + 800.8
async def test_track_template_result_and_conditional(hass):
"""Test tracking template with an and conditional."""
specific_runs = []
hass.states.async_set("light.a", "off")
hass.states.async_set("light.b", "off")
template_str = '{% if states.light.a.state == "on" and states.light.b.state == "on" %}on{% else %}off{% endif %}'
template = Template(template_str, hass)
def specific_run_callback(event, updates):
specific_runs.append(updates.pop().result)
info = async_track_template_result(
hass, [TrackTemplate(template, None)], specific_run_callback
)
await hass.async_block_till_done()
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {"light.a"},
"time": False,
}
hass.states.async_set("light.b", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 0
hass.states.async_set("light.a", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert specific_runs[0] == "on"
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {"light.a", "light.b"},
"time": False,
}
hass.states.async_set("light.b", "off")
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert specific_runs[1] == "off"
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {"light.a", "light.b"},
"time": False,
}
hass.states.async_set("light.a", "off")
await hass.async_block_till_done()
assert len(specific_runs) == 2
hass.states.async_set("light.b", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 2
hass.states.async_set("light.a", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 3
assert specific_runs[2] == "on"
async def test_track_template_result_iterator(hass):
"""Test tracking template."""
iterator_runs = []
@ha.callback
def iterator_callback(event, updates):
iterator_runs.append(updates.pop().result)
async_track_template_result(
hass,
[
TrackTemplate(
Template(
"""
{% for state in states.sensor %}
{% if state.state == 'on' %}
{{ state.entity_id }},
{% endif %}
{% endfor %}
""",
hass,
),
None,
timedelta(seconds=0),
)
],
iterator_callback,
)
await hass.async_block_till_done()
hass.states.async_set("sensor.test", 5)
await hass.async_block_till_done()
assert iterator_runs == [""]
filter_runs = []
@ha.callback
def filter_callback(event, updates):
filter_runs.append(updates.pop().result)
info = async_track_template_result(
hass,
[
TrackTemplate(
Template(
"""{{ states.sensor|selectattr("state","equalto","on")
|join(",", attribute="entity_id") }}""",
hass,
),
None,
timedelta(seconds=0),
)
],
filter_callback,
)
await hass.async_block_till_done()
assert info.listeners == {
"all": False,
"domains": {"sensor"},
"entities": set(),
"time": False,
}
hass.states.async_set("sensor.test", 6)
await hass.async_block_till_done()
assert filter_runs == [""]
assert iterator_runs == [""]
hass.states.async_set("sensor.new", "on")
await hass.async_block_till_done()
assert iterator_runs == ["", "sensor.new,"]
assert filter_runs == ["", "sensor.new"]
async def test_track_template_result_errors(hass, caplog):
"""Test tracking template with errors in the template."""
template_syntax_error = Template("{{states.switch", hass)
template_not_exist = Template("{{states.switch.not_exist.state }}", hass)
syntax_error_runs = []
not_exist_runs = []
@ha.callback
def syntax_error_listener(event, updates):
track_result = updates.pop()
syntax_error_runs.append(
(
event,
track_result.template,
track_result.last_result,
track_result.result,
)
)
async_track_template_result(
hass, [TrackTemplate(template_syntax_error, None)], syntax_error_listener
)
await hass.async_block_till_done()
assert len(syntax_error_runs) == 0
assert "TemplateSyntaxError" in caplog.text
@ha.callback
def not_exist_runs_error_listener(event, updates):
template_track = updates.pop()
not_exist_runs.append(
(
event,
template_track.template,
template_track.last_result,
template_track.result,
)
)
async_track_template_result(
hass,
[TrackTemplate(template_not_exist, None)],
not_exist_runs_error_listener,
)
await hass.async_block_till_done()
assert len(syntax_error_runs) == 0
assert len(not_exist_runs) == 0
hass.states.async_set("switch.not_exist", "off")
await hass.async_block_till_done()
assert len(not_exist_runs) == 1
assert not_exist_runs[0][0].data.get("entity_id") == "switch.not_exist"
assert not_exist_runs[0][1] == template_not_exist
assert not_exist_runs[0][2] is None
assert not_exist_runs[0][3] == "off"
hass.states.async_set("switch.not_exist", "on")
await hass.async_block_till_done()
assert len(syntax_error_runs) == 1
assert len(not_exist_runs) == 2
assert not_exist_runs[1][0].data.get("entity_id") == "switch.not_exist"
assert not_exist_runs[1][1] == template_not_exist
assert not_exist_runs[1][2] == "off"
assert not_exist_runs[1][3] == "on"
with patch.object(Template, "async_render") as render:
render.side_effect = TemplateError(jinja2.TemplateError())
hass.states.async_set("switch.not_exist", "off")
await hass.async_block_till_done()
assert len(not_exist_runs) == 3
assert not_exist_runs[2][0].data.get("entity_id") == "switch.not_exist"
assert not_exist_runs[2][1] == template_not_exist
assert not_exist_runs[2][2] == "on"
assert isinstance(not_exist_runs[2][3], TemplateError)
async def test_static_string(hass):
"""Test a static string."""
template_refresh = Template("{{ 'static' }}", hass)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates.pop().result)
info = async_track_template_result(
hass, [TrackTemplate(template_refresh, None)], refresh_listener
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == ["static"]
async def test_track_template_rate_limit(hass):
"""Test template rate limit."""
template_refresh = Template("{{ states | count }}", hass)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates.pop().result)
info = async_track_template_result(
hass,
[TrackTemplate(template_refresh, None, timedelta(seconds=0.1))],
refresh_listener,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == [0]
hass.states.async_set("sensor.one", "any")
await hass.async_block_till_done()
assert refresh_runs == [0]
info.async_refresh()
assert refresh_runs == [0, 1]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1]
next_time = dt_util.utcnow() + timedelta(seconds=0.125)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2]
hass.states.async_set("sensor.three", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2]
hass.states.async_set("sensor.four", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2]
next_time = dt_util.utcnow() + timedelta(seconds=0.125 * 2)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2, 4]
hass.states.async_set("sensor.five", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2, 4]
async def test_track_template_rate_limit_super(hass):
"""Test template rate limit with super template."""
template_availability = Template(
"{{ states('sensor.one') != 'unavailable' }}", hass
)
template_refresh = Template("{{ states | count }}", hass)
availability_runs = []
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
for track_result in updates:
if track_result.template is template_refresh:
refresh_runs.append(track_result.result)
elif track_result.template is template_availability:
availability_runs.append(track_result.result)
info = async_track_template_result(
hass,
[
TrackTemplate(template_availability, None),
TrackTemplate(template_refresh, None, timedelta(seconds=0.1)),
],
refresh_listener,
has_super_template=True,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == [0]
hass.states.async_set("sensor.one", "any")
await hass.async_block_till_done()
assert refresh_runs == [0]
info.async_refresh()
assert refresh_runs == [0, 1]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1]
hass.states.async_set("sensor.one", "unavailable")
await hass.async_block_till_done()
assert refresh_runs == [0, 1]
next_time = dt_util.utcnow() + timedelta(seconds=0.125)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [0, 1]
hass.states.async_set("sensor.three", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1]
hass.states.async_set("sensor.four", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1]
# The super template renders as true -> trigger rerendering of all templates
hass.states.async_set("sensor.one", "available")
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 4]
next_time = dt_util.utcnow() + timedelta(seconds=0.125 * 2)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 4]
hass.states.async_set("sensor.five", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 4]
async def test_track_template_rate_limit_super_2(hass):
"""Test template rate limit with rate limited super template."""
# Somewhat forced example of a rate limited template
template_availability = Template("{{ states | count % 2 == 1 }}", hass)
template_refresh = Template("{{ states | count }}", hass)
availability_runs = []
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
for track_result in updates:
if track_result.template is template_refresh:
refresh_runs.append(track_result.result)
elif track_result.template is template_availability:
availability_runs.append(track_result.result)
info = async_track_template_result(
hass,
[
TrackTemplate(template_availability, None, timedelta(seconds=0.1)),
TrackTemplate(template_refresh, None, timedelta(seconds=0.1)),
],
refresh_listener,
has_super_template=True,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == []
hass.states.async_set("sensor.one", "any")
await hass.async_block_till_done()
assert refresh_runs == []
info.async_refresh()
assert refresh_runs == [1]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
assert refresh_runs == [1]
next_time = dt_util.utcnow() + timedelta(seconds=0.125)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [1]
hass.states.async_set("sensor.three", "any")
await hass.async_block_till_done()
assert refresh_runs == [1]
hass.states.async_set("sensor.four", "any")
await hass.async_block_till_done()
assert refresh_runs == [1]
hass.states.async_set("sensor.five", "any")
await hass.async_block_till_done()
assert refresh_runs == [1]
next_time = dt_util.utcnow() + timedelta(seconds=0.125 * 2)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [1, 5]
hass.states.async_set("sensor.six", "any")
await hass.async_block_till_done()
assert refresh_runs == [1, 5]
async def test_track_template_rate_limit_super_3(hass):
"""Test template with rate limited super template."""
# Somewhat forced example of a rate limited template
template_availability = Template("{{ states | count % 2 == 1 }}", hass)
template_refresh = Template("{{ states | count }}", hass)
availability_runs = []
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
for track_result in updates:
if track_result.template is template_refresh:
refresh_runs.append(track_result.result)
elif track_result.template is template_availability:
availability_runs.append(track_result.result)
info = async_track_template_result(
hass,
[
TrackTemplate(template_availability, None, timedelta(seconds=0.1)),
TrackTemplate(template_refresh, None),
],
refresh_listener,
has_super_template=True,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == []
hass.states.async_set("sensor.one", "any")
await hass.async_block_till_done()
assert refresh_runs == []
info.async_refresh()
assert refresh_runs == [1]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
# The super template is rate limited so stuck at `True`
assert refresh_runs == [1, 2]
next_time = dt_util.utcnow() + timedelta(seconds=0.125)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [1, 2]
hass.states.async_set("sensor.three", "any")
await hass.async_block_till_done()
# The super template is rate limited so stuck at `False`
assert refresh_runs == [1, 2]
hass.states.async_set("sensor.four", "any")
await hass.async_block_till_done()
assert refresh_runs == [1, 2]
hass.states.async_set("sensor.five", "any")
await hass.async_block_till_done()
assert refresh_runs == [1, 2]
next_time = dt_util.utcnow() + timedelta(seconds=0.125 * 2)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [1, 2, 5]
hass.states.async_set("sensor.six", "any")
await hass.async_block_till_done()
assert refresh_runs == [1, 2, 5, 6]
hass.states.async_set("sensor.seven", "any")
await hass.async_block_till_done()
assert refresh_runs == [1, 2, 5, 6, 7]
async def test_track_template_rate_limit_suppress_listener(hass):
"""Test template rate limit will suppress the listener during the rate limit."""
template_refresh = Template("{{ states | count }}", hass)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates.pop().result)
info = async_track_template_result(
hass,
[TrackTemplate(template_refresh, None, timedelta(seconds=0.1))],
refresh_listener,
)
await hass.async_block_till_done()
info.async_refresh()
assert info.listeners == {
"all": True,
"domains": set(),
"entities": set(),
"time": False,
}
await hass.async_block_till_done()
assert refresh_runs == [0]
hass.states.async_set("sensor.one", "any")
await hass.async_block_till_done()
assert refresh_runs == [0]
info.async_refresh()
assert refresh_runs == [0, 1]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
# Should be suppressed during the rate limit
assert info.listeners == {
"all": False,
"domains": set(),
"entities": set(),
"time": False,
}
assert refresh_runs == [0, 1]
next_time = dt_util.utcnow() + timedelta(seconds=0.125)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
# Rate limit released and the all listener returns
assert info.listeners == {
"all": True,
"domains": set(),
"entities": set(),
"time": False,
}
assert refresh_runs == [0, 1, 2]
hass.states.async_set("sensor.three", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2]
hass.states.async_set("sensor.four", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2]
# Rate limit hit and the all listener is shut off
assert info.listeners == {
"all": False,
"domains": set(),
"entities": set(),
"time": False,
}
next_time = dt_util.utcnow() + timedelta(seconds=0.125 * 2)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
# Rate limit released and the all listener returns
assert info.listeners == {
"all": True,
"domains": set(),
"entities": set(),
"time": False,
}
assert refresh_runs == [0, 1, 2, 4]
hass.states.async_set("sensor.five", "any")
await hass.async_block_till_done()
# Rate limit hit and the all listener is shut off
assert info.listeners == {
"all": False,
"domains": set(),
"entities": set(),
"time": False,
}
assert refresh_runs == [0, 1, 2, 4]
async def test_track_template_rate_limit_five(hass):
"""Test template rate limit of 5 seconds."""
template_refresh = Template("{{ states | count }}", hass)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates.pop().result)
info = async_track_template_result(
hass,
[TrackTemplate(template_refresh, None, timedelta(seconds=5))],
refresh_listener,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == [0]
hass.states.async_set("sensor.one", "any")
await hass.async_block_till_done()
assert refresh_runs == [0]
info.async_refresh()
assert refresh_runs == [0, 1]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1]
hass.states.async_set("sensor.three", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1]
async def test_track_template_has_default_rate_limit(hass):
"""Test template has a rate limit by default."""
hass.states.async_set("sensor.zero", "any")
template_refresh = Template("{{ states | list | count }}", hass)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates.pop().result)
info = async_track_template_result(
hass,
[TrackTemplate(template_refresh, None)],
refresh_listener,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == [1]
hass.states.async_set("sensor.one", "any")
await hass.async_block_till_done()
assert refresh_runs == [1]
info.async_refresh()
assert refresh_runs == [1, 2]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
assert refresh_runs == [1, 2]
hass.states.async_set("sensor.three", "any")
await hass.async_block_till_done()
assert refresh_runs == [1, 2]
async def test_track_template_unavailable_states_has_default_rate_limit(hass):
"""Test template watching for unavailable states has a rate limit by default."""
hass.states.async_set("sensor.zero", "unknown")
template_refresh = Template(
"{{ states | selectattr('state', 'in', ['unavailable', 'unknown', 'none']) | list | count }}",
hass,
)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates.pop().result)
info = async_track_template_result(
hass,
[TrackTemplate(template_refresh, None)],
refresh_listener,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == [1]
hass.states.async_set("sensor.one", "unknown")
await hass.async_block_till_done()
assert refresh_runs == [1]
info.async_refresh()
assert refresh_runs == [1, 2]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
assert refresh_runs == [1, 2]
hass.states.async_set("sensor.three", "unknown")
await hass.async_block_till_done()
assert refresh_runs == [1, 2]
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == [1, 2, 3]
info.async_remove()
async def test_specifically_referenced_entity_is_not_rate_limited(hass):
"""Test template rate limit of 5 seconds."""
hass.states.async_set("sensor.one", "none")
template_refresh = Template('{{ states | count }}_{{ states("sensor.one") }}', hass)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates.pop().result)
info = async_track_template_result(
hass,
[TrackTemplate(template_refresh, None, timedelta(seconds=5))],
refresh_listener,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == ["1_none"]
hass.states.async_set("sensor.one", "any")
await hass.async_block_till_done()
assert refresh_runs == ["1_none", "1_any"]
info.async_refresh()
assert refresh_runs == ["1_none", "1_any"]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
assert refresh_runs == ["1_none", "1_any"]
hass.states.async_set("sensor.three", "any")
await hass.async_block_till_done()
assert refresh_runs == ["1_none", "1_any"]
hass.states.async_set("sensor.one", "none")
await hass.async_block_till_done()
assert refresh_runs == ["1_none", "1_any", "3_none"]
info.async_remove()
async def test_track_two_templates_with_different_rate_limits(hass):
"""Test two templates with different rate limits."""
template_one = Template("{{ (states | count) + 0 }}", hass)
template_five = Template("{{ states | count }}", hass)
refresh_runs = {
template_one: [],
template_five: [],
}
@ha.callback
def refresh_listener(event, updates):
for update in updates:
refresh_runs[update.template].append(update.result)
info = async_track_template_result(
hass,
[
TrackTemplate(template_one, None, timedelta(seconds=0.1)),
TrackTemplate(template_five, None, timedelta(seconds=5)),
],
refresh_listener,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs[template_one] == [0]
assert refresh_runs[template_five] == [0]
hass.states.async_set("sensor.one", "any")
await hass.async_block_till_done()
assert refresh_runs[template_one] == [0]
assert refresh_runs[template_five] == [0]
info.async_refresh()
assert refresh_runs[template_one] == [0, 1]
assert refresh_runs[template_five] == [0, 1]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
assert refresh_runs[template_one] == [0, 1]
assert refresh_runs[template_five] == [0, 1]
next_time = dt_util.utcnow() + timedelta(seconds=0.125 * 1)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert refresh_runs[template_one] == [0, 1, 2]
assert refresh_runs[template_five] == [0, 1]
hass.states.async_set("sensor.three", "any")
await hass.async_block_till_done()
assert refresh_runs[template_one] == [0, 1, 2]
assert refresh_runs[template_five] == [0, 1]
hass.states.async_set("sensor.four", "any")
await hass.async_block_till_done()
assert refresh_runs[template_one] == [0, 1, 2]
assert refresh_runs[template_five] == [0, 1]
hass.states.async_set("sensor.five", "any")
await hass.async_block_till_done()
assert refresh_runs[template_one] == [0, 1, 2]
assert refresh_runs[template_five] == [0, 1]
info.async_remove()
async def test_string(hass):
"""Test a string."""
template_refresh = Template("no_template", hass)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates.pop().result)
info = async_track_template_result(
hass, [TrackTemplate(template_refresh, None)], refresh_listener
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == ["no_template"]
async def test_track_template_result_refresh_cancel(hass):
"""Test cancelling and refreshing result."""
template_refresh = Template("{{states.switch.test.state == 'on' and now() }}", hass)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates.pop().result)
info = async_track_template_result(
hass, [TrackTemplate(template_refresh, None)], refresh_listener
)
await hass.async_block_till_done()
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert refresh_runs == [False]
assert len(refresh_runs) == 1
info.async_refresh()
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert len(refresh_runs) == 2
assert refresh_runs[0] != refresh_runs[1]
info.async_remove()
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert len(refresh_runs) == 2
template_refresh = Template("{{ value }}", hass)
refresh_runs = []
info = async_track_template_result(
hass,
[TrackTemplate(template_refresh, {"value": "duck"})],
refresh_listener,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == ["duck"]
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == ["duck"]
async def test_async_track_template_result_multiple_templates(hass):
"""Test tracking multiple templates."""
template_1 = Template("{{ states.switch.test.state == 'on' }}")
template_2 = Template("{{ states.switch.test.state == 'on' }}")
template_3 = Template("{{ states.switch.test.state == 'off' }}")
template_4 = Template(
"{{ states.binary_sensor | map(attribute='entity_id') | list }}"
)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates)
async_track_template_result(
hass,
[
TrackTemplate(template_1, None),
TrackTemplate(template_2, None),
TrackTemplate(template_3, None),
TrackTemplate(template_4, None),
],
refresh_listener,
)
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert refresh_runs == [
[
TrackTemplateResult(template_1, None, True),
TrackTemplateResult(template_2, None, True),
TrackTemplateResult(template_3, None, False),
]
]
refresh_runs = []
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert refresh_runs == [
[
TrackTemplateResult(template_1, True, False),
TrackTemplateResult(template_2, True, False),
TrackTemplateResult(template_3, False, True),
]
]
refresh_runs = []
hass.states.async_set("binary_sensor.test", "off")
await hass.async_block_till_done()
assert refresh_runs == [
[TrackTemplateResult(template_4, None, ["binary_sensor.test"])]
]
async def test_async_track_template_result_multiple_templates_mixing_domain(hass):
"""Test tracking multiple templates when tracking entities and an entire domain."""
template_1 = Template("{{ states.switch.test.state == 'on' }}")
template_2 = Template("{{ states.switch.test.state == 'on' }}")
template_3 = Template("{{ states.switch.test.state == 'off' }}")
template_4 = Template("{{ states.switch | map(attribute='entity_id') | list }}")
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates)
async_track_template_result(
hass,
[
TrackTemplate(template_1, None),
TrackTemplate(template_2, None),
TrackTemplate(template_3, None),
TrackTemplate(template_4, None, timedelta(seconds=0)),
],
refresh_listener,
)
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert refresh_runs == [
[
TrackTemplateResult(template_1, None, True),
TrackTemplateResult(template_2, None, True),
TrackTemplateResult(template_3, None, False),
TrackTemplateResult(template_4, None, ["switch.test"]),
]
]
refresh_runs = []
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert refresh_runs == [
[
TrackTemplateResult(template_1, True, False),
TrackTemplateResult(template_2, True, False),
TrackTemplateResult(template_3, False, True),
]
]
refresh_runs = []
hass.states.async_set("binary_sensor.test", "off")
await hass.async_block_till_done()
assert refresh_runs == []
refresh_runs = []
hass.states.async_set("switch.new", "off")
await hass.async_block_till_done()
assert refresh_runs == [
[
TrackTemplateResult(
template_4, ["switch.test"], ["switch.new", "switch.test"]
)
]
]
async def test_async_track_template_result_raise_on_template_error(hass):
"""Test that we raise as soon as we encounter a failed template."""
with pytest.raises(TemplateError):
async_track_template_result(
hass,
[
TrackTemplate(
Template(
"{{ states.switch | function_that_does_not_exist | list }}"
),
None,
),
],
ha.callback(lambda event, updates: None),
raise_on_template_error=True,
)
async def test_track_template_with_time(hass):
"""Test tracking template with time."""
hass.states.async_set("switch.test", "on")
specific_runs = []
template_complex = Template("{{ states.switch.test.state and now() }}", hass)
def specific_run_callback(event, updates):
specific_runs.append(updates.pop().result)
info = async_track_template_result(
hass, [TrackTemplate(template_complex, None)], specific_run_callback
)
await hass.async_block_till_done()
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {"switch.test"},
"time": True,
}
await hass.async_block_till_done()
now = dt_util.utcnow()
async_fire_time_changed(hass, now + timedelta(seconds=61))
async_fire_time_changed(hass, now + timedelta(seconds=61 * 2))
await hass.async_block_till_done()
assert specific_runs[-1] != specific_runs[0]
info.async_remove()
async def test_track_template_with_time_default(hass):
"""Test tracking template with time."""
specific_runs = []
template_complex = Template("{{ now() }}", hass)
def specific_run_callback(event, updates):
specific_runs.append(updates.pop().result)
info = async_track_template_result(
hass, [TrackTemplate(template_complex, None)], specific_run_callback
)
await hass.async_block_till_done()
assert info.listeners == {
"all": False,
"domains": set(),
"entities": set(),
"time": True,
}
await hass.async_block_till_done()
now = dt_util.utcnow()
async_fire_time_changed(hass, now + timedelta(seconds=2))
async_fire_time_changed(hass, now + timedelta(seconds=4))
await hass.async_block_till_done()
assert len(specific_runs) < 2
async_fire_time_changed(hass, now + timedelta(minutes=2))
await hass.async_block_till_done()
async_fire_time_changed(hass, now + timedelta(minutes=4))
await hass.async_block_till_done()
assert len(specific_runs) >= 2
assert specific_runs[-1] != specific_runs[0]
info.async_remove()
async def test_track_template_with_time_that_leaves_scope(hass):
"""Test tracking template with time."""
now = dt_util.utcnow()
test_time = datetime(now.year + 1, 5, 24, 11, 59, 1, 500000, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=test_time):
hass.states.async_set("binary_sensor.washing_machine", "on")
specific_runs = []
template_complex = Template(
"""
{% if states.binary_sensor.washing_machine.state == "on" %}
{{ now() }}
{% else %}
{{ states.binary_sensor.washing_machine.last_updated }}
{% endif %}
""",
hass,
)
def specific_run_callback(event, updates):
specific_runs.append(updates.pop().result)
info = async_track_template_result(
hass, [TrackTemplate(template_complex, None)], specific_run_callback
)
await hass.async_block_till_done()
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {"binary_sensor.washing_machine"},
"time": True,
}
hass.states.async_set("binary_sensor.washing_machine", "off")
await hass.async_block_till_done()
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {"binary_sensor.washing_machine"},
"time": False,
}
hass.states.async_set("binary_sensor.washing_machine", "on")
await hass.async_block_till_done()
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {"binary_sensor.washing_machine"},
"time": True,
}
# Verify we do not update before the minute rolls over
callback_count_before_time_change = len(specific_runs)
async_fire_time_changed(hass, test_time)
await hass.async_block_till_done()
assert len(specific_runs) == callback_count_before_time_change
async_fire_time_changed(hass, test_time + timedelta(seconds=58))
await hass.async_block_till_done()
assert len(specific_runs) == callback_count_before_time_change
# Verify we do update on the next change of minute
async_fire_time_changed(hass, test_time + timedelta(seconds=59))
await hass.async_block_till_done()
assert len(specific_runs) == callback_count_before_time_change + 1
info.async_remove()
async def test_async_track_template_result_multiple_templates_mixing_listeners(hass):
"""Test tracking multiple templates with mixing listener types."""
template_1 = Template("{{ states.switch.test.state == 'on' }}")
template_2 = Template("{{ now() and True }}")
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates)
now = dt_util.utcnow()
time_that_will_not_match_right_away = datetime(
now.year + 1, 5, 24, 11, 59, 55, tzinfo=dt_util.UTC
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
info = async_track_template_result(
hass,
[
TrackTemplate(template_1, None),
TrackTemplate(template_2, None),
],
refresh_listener,
)
assert info.listeners == {
"all": False,
"domains": set(),
"entities": {"switch.test"},
"time": True,
}
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert refresh_runs == [
[
TrackTemplateResult(template_1, None, True),
]
]
refresh_runs = []
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert refresh_runs == [
[
TrackTemplateResult(template_1, True, False),
]
]
refresh_runs = []
next_time = time_that_will_not_match_right_away + timedelta(hours=25)
with patch("homeassistant.util.dt.utcnow", return_value=next_time):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [
[
TrackTemplateResult(template_2, None, True),
]
]
async def test_track_same_state_simple_no_trigger(hass):
"""Test track_same_change with no trigger."""
callback_runs = []
period = timedelta(minutes=1)
@ha.callback
def callback_run_callback():
callback_runs.append(1)
async_track_same_state(
hass,
period,
callback_run_callback,
callback(lambda _, _2, to_s: to_s.state == "on"),
entity_ids="light.Bowl",
)
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(callback_runs) == 0
# Change state on state machine
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(callback_runs) == 0
# change time to track and see if they trigger
future = dt_util.utcnow() + period
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert len(callback_runs) == 0
async def test_track_same_state_simple_trigger_check_funct(hass):
"""Test track_same_change with trigger and check funct."""
callback_runs = []
check_func = []
period = timedelta(minutes=1)
@ha.callback
def callback_run_callback():
callback_runs.append(1)
@ha.callback
def async_check_func(entity, from_s, to_s):
check_func.append((entity, from_s, to_s))
return True
async_track_same_state(
hass,
period,
callback_run_callback,
entity_ids="light.Bowl",
async_check_same_func=async_check_func,
)
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(callback_runs) == 0
assert check_func[-1][2].state == "on"
assert check_func[-1][0] == "light.bowl"
# change time to track and see if they trigger
future = dt_util.utcnow() + period
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert len(callback_runs) == 1
async def test_track_time_interval(hass):
"""Test tracking time interval."""
specific_runs = []
utc_now = dt_util.utcnow()
unsub = async_track_time_interval(
hass, callback(lambda x: specific_runs.append(x)), timedelta(seconds=10)
)
async_fire_time_changed(hass, utc_now + timedelta(seconds=5))
await hass.async_block_till_done()
assert len(specific_runs) == 0
async_fire_time_changed(hass, utc_now + timedelta(seconds=13))
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(hass, utc_now + timedelta(minutes=20))
await hass.async_block_till_done()
assert len(specific_runs) == 2
unsub()
async_fire_time_changed(hass, utc_now + timedelta(seconds=30))
await hass.async_block_till_done()
assert len(specific_runs) == 2
async def test_track_sunrise(hass, legacy_patchable_time):
"""Test track the sunrise."""
latitude = 32.87336
longitude = 117.22743
# Setup sun component
hass.config.latitude = latitude
hass.config.longitude = longitude
assert await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
location = LocationInfo(
latitude=hass.config.latitude, longitude=hass.config.longitude
)
# Get next sunrise/sunset
utc_now = datetime(2014, 5, 24, 12, 0, 0, tzinfo=dt_util.UTC)
utc_today = utc_now.date()
mod = -1
while True:
next_rising = astral.sun.sunrise(
location.observer, date=utc_today + timedelta(days=mod)
)
if next_rising > utc_now:
break
mod += 1
# Track sunrise
runs = []
with patch("homeassistant.util.dt.utcnow", return_value=utc_now):
unsub = async_track_sunrise(hass, callback(lambda: runs.append(1)))
offset_runs = []
offset = timedelta(minutes=30)
with patch("homeassistant.util.dt.utcnow", return_value=utc_now):
unsub2 = async_track_sunrise(
hass, callback(lambda: offset_runs.append(1)), offset
)
# run tests
async_fire_time_changed(hass, next_rising - offset)
await hass.async_block_till_done()
assert len(runs) == 0
assert len(offset_runs) == 0
async_fire_time_changed(hass, next_rising)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 0
async_fire_time_changed(hass, next_rising + offset)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 1
unsub()
unsub2()
async_fire_time_changed(hass, next_rising + offset)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 1
async def test_track_sunrise_update_location(hass, legacy_patchable_time):
"""Test track the sunrise."""
# Setup sun component
hass.config.latitude = 32.87336
hass.config.longitude = 117.22743
assert await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
location = LocationInfo(
latitude=hass.config.latitude, longitude=hass.config.longitude
)
# Get next sunrise
utc_now = datetime(2014, 5, 24, 12, 0, 0, tzinfo=dt_util.UTC)
utc_today = utc_now.date()
mod = -1
while True:
next_rising = astral.sun.sunrise(
location.observer, date=utc_today + timedelta(days=mod)
)
if next_rising > utc_now:
break
mod += 1
# Track sunrise
runs = []
with patch("homeassistant.util.dt.utcnow", return_value=utc_now):
async_track_sunrise(hass, callback(lambda: runs.append(1)))
# Mimic sunrise
async_fire_time_changed(hass, next_rising)
await hass.async_block_till_done()
assert len(runs) == 1
# Move!
with patch("homeassistant.util.dt.utcnow", return_value=utc_now):
await hass.config.async_update(latitude=40.755931, longitude=-73.984606)
await hass.async_block_till_done()
# update location for astral
location = LocationInfo(
latitude=hass.config.latitude, longitude=hass.config.longitude
)
# Mimic sunrise
async_fire_time_changed(hass, next_rising)
await hass.async_block_till_done()
# Did not increase
assert len(runs) == 1
# Get next sunrise
mod = -1
while True:
next_rising = astral.sun.sunrise(
location.observer, date=utc_today + timedelta(days=mod)
)
if next_rising > utc_now:
break
mod += 1
# Mimic sunrise at new location
async_fire_time_changed(hass, next_rising)
await hass.async_block_till_done()
assert len(runs) == 2
async def test_track_sunset(hass, legacy_patchable_time):
"""Test track the sunset."""
latitude = 32.87336
longitude = 117.22743
location = LocationInfo(latitude=latitude, longitude=longitude)
# Setup sun component
hass.config.latitude = latitude
hass.config.longitude = longitude
assert await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
# Get next sunrise/sunset
utc_now = datetime(2014, 5, 24, 12, 0, 0, tzinfo=dt_util.UTC)
utc_today = utc_now.date()
mod = -1
while True:
next_setting = astral.sun.sunset(
location.observer, date=utc_today + timedelta(days=mod)
)
if next_setting > utc_now:
break
mod += 1
# Track sunset
runs = []
with patch("homeassistant.util.dt.utcnow", return_value=utc_now):
unsub = async_track_sunset(hass, callback(lambda: runs.append(1)))
offset_runs = []
offset = timedelta(minutes=30)
with patch("homeassistant.util.dt.utcnow", return_value=utc_now):
unsub2 = async_track_sunset(
hass, callback(lambda: offset_runs.append(1)), offset
)
# Run tests
async_fire_time_changed(hass, next_setting - offset)
await hass.async_block_till_done()
assert len(runs) == 0
assert len(offset_runs) == 0
async_fire_time_changed(hass, next_setting)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 0
async_fire_time_changed(hass, next_setting + offset)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 1
unsub()
unsub2()
async_fire_time_changed(hass, next_setting + offset)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 1
async def test_async_track_time_change(hass):
"""Test tracking time change."""
wildcard_runs = []
specific_runs = []
now = dt_util.utcnow()
time_that_will_not_match_right_away = datetime(
now.year + 1, 5, 24, 11, 59, 55, tzinfo=dt_util.UTC
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
unsub = async_track_time_change(
hass, callback(lambda x: wildcard_runs.append(x))
)
unsub_utc = async_track_utc_time_change(
hass, callback(lambda x: specific_runs.append(x)), second=[0, 30]
)
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 12, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 1
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 12, 0, 15, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 2
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 12, 0, 30, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 3
unsub()
unsub_utc()
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 12, 0, 30, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 3
async def test_periodic_task_minute(hass):
"""Test periodic tasks per minute."""
specific_runs = []
now = dt_util.utcnow()
time_that_will_not_match_right_away = datetime(
now.year + 1, 5, 24, 11, 59, 55, tzinfo=dt_util.UTC
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
unsub = async_track_utc_time_change(
hass, callback(lambda x: specific_runs.append(x)), minute="/5", second=0
)
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 12, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 12, 3, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 12, 5, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 2
unsub()
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 12, 5, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 2
async def test_periodic_task_hour(hass):
"""Test periodic tasks per hour."""
specific_runs = []
now = dt_util.utcnow()
time_that_will_not_match_right_away = datetime(
now.year + 1, 5, 24, 21, 59, 55, tzinfo=dt_util.UTC
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
unsub = async_track_utc_time_change(
hass,
callback(lambda x: specific_runs.append(x)),
hour="/2",
minute=0,
second=0,
)
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 22, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 23, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 25, 0, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 2
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 25, 1, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 2
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 25, 2, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 3
unsub()
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 25, 2, 0, 0, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 3
async def test_periodic_task_wrong_input(hass):
"""Test periodic tasks with wrong input."""
specific_runs = []
now = dt_util.utcnow()
with pytest.raises(ValueError):
async_track_utc_time_change(
hass, callback(lambda x: specific_runs.append(x)), hour="/two"
)
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 2, 0, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 0
async def test_periodic_task_clock_rollback(hass):
"""Test periodic tasks with the time rolling backwards."""
specific_runs = []
now = dt_util.utcnow()
time_that_will_not_match_right_away = datetime(
now.year + 1, 5, 24, 21, 59, 55, tzinfo=dt_util.UTC
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
unsub = async_track_utc_time_change(
hass,
callback(lambda x: specific_runs.append(x)),
hour="/2",
minute=0,
second=0,
)
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 22, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 23, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(
hass,
datetime(now.year + 1, 5, 24, 22, 0, 0, 999999, tzinfo=dt_util.UTC),
fire_all=True,
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(
hass,
datetime(now.year + 1, 5, 24, 0, 0, 0, 999999, tzinfo=dt_util.UTC),
fire_all=True,
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 25, 2, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 2
unsub()
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 25, 2, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 2
async def test_periodic_task_duplicate_time(hass):
"""Test periodic tasks not triggering on duplicate time."""
specific_runs = []
now = dt_util.utcnow()
time_that_will_not_match_right_away = datetime(
now.year + 1, 5, 24, 21, 59, 55, tzinfo=dt_util.UTC
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
unsub = async_track_utc_time_change(
hass,
callback(lambda x: specific_runs.append(x)),
hour="/2",
minute=0,
second=0,
)
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 22, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 24, 22, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 1
async_fire_time_changed(
hass, datetime(now.year + 1, 5, 25, 0, 0, 0, 999999, tzinfo=dt_util.UTC)
)
await hass.async_block_till_done()
assert len(specific_runs) == 2
unsub()
# DST starts early morning March 28th 2021
@pytest.mark.freeze_time("2021-03-28 01:28:00+01:00")
async def test_periodic_task_entering_dst(hass, freezer):
"""Test periodic task behavior when entering dst."""
hass.config.set_time_zone("Europe/Vienna")
specific_runs = []
today = date.today().isoformat()
tomorrow = (date.today() + timedelta(days=1)).isoformat()
# Make sure we enter DST during the test
now_local = dt_util.now()
assert now_local.utcoffset() != (now_local + timedelta(hours=2)).utcoffset()
unsub = async_track_time_change(
hass,
callback(lambda x: specific_runs.append(x)),
hour=2,
minute=30,
second=0,
)
freezer.move_to(f"{today} 01:50:00.999999+01:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert len(specific_runs) == 0
# There was no 02:30 today, the event should not fire until tomorrow
freezer.move_to(f"{today} 03:50:00.999999+02:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert len(specific_runs) == 0
freezer.move_to(f"{tomorrow} 01:50:00.999999+02:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert len(specific_runs) == 0
freezer.move_to(f"{tomorrow} 02:50:00.999999+02:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert len(specific_runs) == 1
unsub()
# DST starts early morning March 28th 2021
@pytest.mark.freeze_time("2021-03-28 01:59:59+01:00")
async def test_periodic_task_entering_dst_2(hass, freezer):
"""Test periodic task behavior when entering dst.
This tests a task firing every second in the range 0..58 (not *:*:59)
"""
hass.config.set_time_zone("Europe/Vienna")
specific_runs = []
today = date.today().isoformat()
tomorrow = (date.today() + timedelta(days=1)).isoformat()
# Make sure we enter DST during the test
now_local = dt_util.now()
assert now_local.utcoffset() != (now_local + timedelta(hours=2)).utcoffset()
unsub = async_track_time_change(
hass,
callback(lambda x: specific_runs.append(x)),
second=list(range(59)),
)
freezer.move_to(f"{today} 01:59:59.999999+01:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert len(specific_runs) == 0
freezer.move_to(f"{today} 03:00:00.999999+02:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert len(specific_runs) == 1
freezer.move_to(f"{today} 03:00:01.999999+02:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert len(specific_runs) == 2
freezer.move_to(f"{tomorrow} 01:59:59.999999+02:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert len(specific_runs) == 3
freezer.move_to(f"{tomorrow} 02:00:00.999999+02:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert len(specific_runs) == 4
unsub()
# DST ends early morning October 31st 2021
@pytest.mark.freeze_time("2021-10-31 02:28:00+02:00")
async def test_periodic_task_leaving_dst(hass, freezer):
"""Test periodic task behavior when leaving dst."""
hass.config.set_time_zone("Europe/Vienna")
specific_runs = []
today = date.today().isoformat()
tomorrow = (date.today() + timedelta(days=1)).isoformat()
# Make sure we leave DST during the test
now_local = dt_util.now()
assert now_local.utcoffset() != (now_local + timedelta(hours=1)).utcoffset()
unsub = async_track_time_change(
hass,
callback(lambda x: specific_runs.append(x)),
hour=2,
minute=30,
second=0,
)
# The task should not fire yet
freezer.move_to(f"{today} 02:28:00.999999+02:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 0
await hass.async_block_till_done()
assert len(specific_runs) == 0
# The task should fire
freezer.move_to(f"{today} 02:30:00.999999+02:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 0
await hass.async_block_till_done()
assert len(specific_runs) == 1
# The task should not fire again
freezer.move_to(f"{today} 02:55:00.999999+02:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 0
await hass.async_block_till_done()
assert len(specific_runs) == 1
# DST has ended, the task should not fire yet
freezer.move_to(f"{today} 02:15:00.999999+01:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 1 # DST has ended
await hass.async_block_till_done()
assert len(specific_runs) == 1
# The task should fire
freezer.move_to(f"{today} 02:45:00.999999+01:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 1
await hass.async_block_till_done()
assert len(specific_runs) == 2
# The task should not fire again
freezer.move_to(f"{today} 02:55:00.999999+01:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 1
await hass.async_block_till_done()
assert len(specific_runs) == 2
# The task should fire again the next day
freezer.move_to(f"{tomorrow} 02:55:00.999999+01:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 0
await hass.async_block_till_done()
assert len(specific_runs) == 3
unsub()
# DST ends early morning October 31st 2021
@pytest.mark.freeze_time("2021-10-31 02:28:00+02:00")
async def test_periodic_task_leaving_dst_2(hass, freezer):
"""Test periodic task behavior when leaving dst."""
hass.config.set_time_zone("Europe/Vienna")
specific_runs = []
today = date.today().isoformat()
# Make sure we leave DST during the test
now_local = dt_util.now()
assert now_local.utcoffset() != (now_local + timedelta(hours=1)).utcoffset()
unsub = async_track_time_change(
hass,
callback(lambda x: specific_runs.append(x)),
minute=30,
second=0,
)
# The task should not fire yet
freezer.move_to(f"{today} 02:28:00.999999+02:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 0
await hass.async_block_till_done()
assert len(specific_runs) == 0
# The task should fire
freezer.move_to(f"{today} 02:55:00.999999+02:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 0
await hass.async_block_till_done()
assert len(specific_runs) == 1
# DST has ended, the task should not fire yet
freezer.move_to(f"{today} 02:15:00.999999+01:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 1
await hass.async_block_till_done()
assert len(specific_runs) == 1
# The task should fire
freezer.move_to(f"{today} 02:45:00.999999+01:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 1
await hass.async_block_till_done()
assert len(specific_runs) == 2
# The task should not fire again
freezer.move_to(f"{today} 02:55:00.999999+01:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 1
await hass.async_block_till_done()
assert len(specific_runs) == 2
# The task should fire again the next hour
freezer.move_to(f"{today} 03:55:00.999999+01:00")
async_fire_time_changed(hass)
assert dt_util.now().fold == 0
await hass.async_block_till_done()
assert len(specific_runs) == 3
unsub()
async def test_call_later(hass):
"""Test calling an action later."""
def action():
pass
now = datetime(2017, 12, 19, 15, 40, 0, tzinfo=dt_util.UTC)
with patch(
"homeassistant.helpers.event.async_track_point_in_utc_time"
) as mock, patch("homeassistant.util.dt.utcnow", return_value=now):
async_call_later(hass, 3, action)
assert len(mock.mock_calls) == 1
p_hass, p_action, p_point = mock.mock_calls[0][1]
assert p_hass is hass
assert p_action is action
assert p_point == now + timedelta(seconds=3)
async def test_async_call_later(hass):
"""Test calling an action later."""
def action():
pass
now = datetime(2017, 12, 19, 15, 40, 0, tzinfo=dt_util.UTC)
with patch(
"homeassistant.helpers.event.async_track_point_in_utc_time"
) as mock, patch("homeassistant.util.dt.utcnow", return_value=now):
remove = async_call_later(hass, 3, action)
assert len(mock.mock_calls) == 1
p_hass, p_action, p_point = mock.mock_calls[0][1]
assert p_hass is hass
assert p_action is action
assert p_point == now + timedelta(seconds=3)
assert remove is mock()
async def test_async_call_later_timedelta(hass):
"""Test calling an action later with a timedelta."""
def action():
pass
now = datetime(2017, 12, 19, 15, 40, 0, tzinfo=dt_util.UTC)
with patch(
"homeassistant.helpers.event.async_track_point_in_utc_time"
) as mock, patch("homeassistant.util.dt.utcnow", return_value=now):
remove = async_call_later(hass, timedelta(seconds=3), action)
assert len(mock.mock_calls) == 1
p_hass, p_action, p_point = mock.mock_calls[0][1]
assert p_hass is hass
assert p_action is action
assert p_point == now + timedelta(seconds=3)
assert remove is mock()
async def test_track_state_change_event_chain_multple_entity(hass):
"""Test that adding a new state tracker inside a tracker does not fire right away."""
tracker_called = []
chained_tracker_called = []
chained_tracker_unsub = []
tracker_unsub = []
@ha.callback
def chained_single_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
chained_tracker_called.append((old_state, new_state))
@ha.callback
def single_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
tracker_called.append((old_state, new_state))
chained_tracker_unsub.append(
async_track_state_change_event(
hass, ["light.bowl", "light.top"], chained_single_run_callback
)
)
tracker_unsub.append(
async_track_state_change_event(
hass, ["light.bowl", "light.top"], single_run_callback
)
)
hass.states.async_set("light.bowl", "on")
hass.states.async_set("light.top", "on")
await hass.async_block_till_done()
assert len(tracker_called) == 2
assert len(chained_tracker_called) == 1
assert len(tracker_unsub) == 1
assert len(chained_tracker_unsub) == 2
hass.states.async_set("light.bowl", "off")
await hass.async_block_till_done()
assert len(tracker_called) == 3
assert len(chained_tracker_called) == 3
assert len(tracker_unsub) == 1
assert len(chained_tracker_unsub) == 3
async def test_track_state_change_event_chain_single_entity(hass):
"""Test that adding a new state tracker inside a tracker does not fire right away."""
tracker_called = []
chained_tracker_called = []
chained_tracker_unsub = []
tracker_unsub = []
@ha.callback
def chained_single_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
chained_tracker_called.append((old_state, new_state))
@ha.callback
def single_run_callback(event):
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
tracker_called.append((old_state, new_state))
chained_tracker_unsub.append(
async_track_state_change_event(
hass, "light.bowl", chained_single_run_callback
)
)
tracker_unsub.append(
async_track_state_change_event(hass, "light.bowl", single_run_callback)
)
hass.states.async_set("light.bowl", "on")
await hass.async_block_till_done()
assert len(tracker_called) == 1
assert len(chained_tracker_called) == 0
assert len(tracker_unsub) == 1
assert len(chained_tracker_unsub) == 1
hass.states.async_set("light.bowl", "off")
await hass.async_block_till_done()
assert len(tracker_called) == 2
assert len(chained_tracker_called) == 1
assert len(tracker_unsub) == 1
assert len(chained_tracker_unsub) == 2
async def test_track_point_in_utc_time_cancel(hass):
"""Test cancel of async track point in time."""
times = []
@ha.callback
def run_callback(utc_time):
nonlocal times
times.append(utc_time)
def _setup_listeners():
"""Ensure we test the non-async version."""
utc_now = dt_util.utcnow()
with pytest.raises(TypeError):
track_point_in_utc_time("nothass", run_callback, utc_now)
unsub1 = hass.helpers.event.track_point_in_utc_time(
run_callback, utc_now + timedelta(seconds=0.1)
)
hass.helpers.event.track_point_in_utc_time(
run_callback, utc_now + timedelta(seconds=0.1)
)
unsub1()
await hass.async_add_executor_job(_setup_listeners)
await asyncio.sleep(0.2)
assert len(times) == 1
assert times[0].tzinfo == dt_util.UTC
async def test_async_track_point_in_time_cancel(hass):
"""Test cancel of async track point in time."""
times = []
hass.config.set_time_zone("US/Hawaii")
hst_tz = dt_util.get_time_zone("US/Hawaii")
@ha.callback
def run_callback(local_time):
nonlocal times
times.append(local_time)
utc_now = dt_util.utcnow()
hst_now = utc_now.astimezone(hst_tz)
unsub1 = hass.helpers.event.async_track_point_in_time(
run_callback, hst_now + timedelta(seconds=0.1)
)
hass.helpers.event.async_track_point_in_time(
run_callback, hst_now + timedelta(seconds=0.1)
)
unsub1()
await asyncio.sleep(0.2)
assert len(times) == 1
assert "US/Hawaii" in str(times[0].tzinfo)
async def test_async_track_entity_registry_updated_event(hass):
"""Test tracking entity registry updates for an entity_id."""
entity_id = "switch.puppy_feeder"
new_entity_id = "switch.dog_feeder"
untracked_entity_id = "switch.kitty_feeder"
hass.states.async_set(entity_id, "on")
await hass.async_block_till_done()
event_data = []
@ha.callback
def run_callback(event):
event_data.append(event.data)
unsub1 = hass.helpers.event.async_track_entity_registry_updated_event(
entity_id, run_callback
)
unsub2 = hass.helpers.event.async_track_entity_registry_updated_event(
new_entity_id, run_callback
)
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED, {"action": "create", "entity_id": entity_id}
)
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{"action": "create", "entity_id": untracked_entity_id},
)
await hass.async_block_till_done()
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{
"action": "update",
"entity_id": new_entity_id,
"old_entity_id": entity_id,
"changes": {},
},
)
await hass.async_block_till_done()
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED, {"action": "remove", "entity_id": new_entity_id}
)
await hass.async_block_till_done()
unsub1()
unsub2()
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED, {"action": "create", "entity_id": entity_id}
)
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED, {"action": "create", "entity_id": new_entity_id}
)
await hass.async_block_till_done()
assert event_data[0] == {"action": "create", "entity_id": "switch.puppy_feeder"}
assert event_data[1] == {
"action": "update",
"changes": {},
"entity_id": "switch.dog_feeder",
"old_entity_id": "switch.puppy_feeder",
}
assert event_data[2] == {"action": "remove", "entity_id": "switch.dog_feeder"}
async def test_async_track_entity_registry_updated_event_with_a_callback_that_throws(
hass,
):
"""Test tracking entity registry updates for an entity_id when one callback throws."""
entity_id = "switch.puppy_feeder"
hass.states.async_set(entity_id, "on")
await hass.async_block_till_done()
event_data = []
@ha.callback
def run_callback(event):
event_data.append(event.data)
@ha.callback
def failing_callback(event):
raise ValueError
unsub1 = hass.helpers.event.async_track_entity_registry_updated_event(
entity_id, failing_callback
)
unsub2 = hass.helpers.event.async_track_entity_registry_updated_event(
entity_id, run_callback
)
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED, {"action": "create", "entity_id": entity_id}
)
await hass.async_block_till_done()
unsub1()
unsub2()
assert event_data[0] == {"action": "create", "entity_id": "switch.puppy_feeder"}
async def test_async_track_entity_registry_updated_event_with_empty_list(hass):
"""Test async_track_entity_registry_updated_event passing an empty list of entities."""
unsub_single = hass.helpers.event.async_track_entity_registry_updated_event(
[], ha.callback(lambda event: None)
)
unsub_single2 = hass.helpers.event.async_track_entity_registry_updated_event(
[], ha.callback(lambda event: None)
)
unsub_single2()
unsub_single()
| 32.069261
| 117
| 0.664143
|
d2ccbcc3c9f87b60bd7874b7d9a7ed955545dcac
| 6,677
|
py
|
Python
|
main.py
|
valord577/auto_add_wechat_friends_py
|
e612e277a7cf0951dd779adbb58bea48f1397c21
|
[
"Apache-2.0"
] | 124
|
2018-11-06T07:29:01.000Z
|
2021-12-22T06:51:45.000Z
|
main.py
|
AllureLo/auto_add_wechat_friends_py
|
e612e277a7cf0951dd779adbb58bea48f1397c21
|
[
"Apache-2.0"
] | 8
|
2018-11-10T08:02:15.000Z
|
2022-03-14T12:41:00.000Z
|
main.py
|
AllureLo/auto_add_wechat_friends_py
|
e612e277a7cf0951dd779adbb58bea48f1397c21
|
[
"Apache-2.0"
] | 42
|
2018-11-06T07:57:20.000Z
|
2022-03-07T07:39:43.000Z
|
#!/usr/local/bin/python
# -*- coding:utf-8 -*-
"""
@author: valor
@file: main.py
@time: 2018/11/5 15:59
"""
import time
from adb import By
from adb import Adb
import file
class Main:
def __init__(self, port=None, device=None):
self._adb = Adb(port, device)
# 用于查找失败三次时 程序暂停半小时
self._flag = 0
self._success = []
self._failed = []
self._dict = {'success': self._success, 'failed': self._failed}
self._file = file.File()
self._json = self._file.json()
# config.json 配置信息
# 查找联系人模式 file | loop
self._mode = self._json['mode']
# 循环首尾 包含首 不包含尾
self._loop = self._json['loop']
# 文件路径 手机号码一行一个
self._file = self._json['file']
# 自动切换账号 微信登录 微信预留账号
self._account = self._json['account']
# 累计查找结果达到指定个数 会从内存写入到文件
self._dump = self._json['dump']
# 切换账号达到一定次数 会休眠 单位分钟
self._sleep = self._json['sleep']
# 切换账号指定次数
self._sleep_flag = self._json['sleep-flag']
# 输出添加结果到内存 或 文件
def push(self, key: str, value):
_list = self._dict[key]
_list.append(value)
# list到一定长度 输出到文件
if int(self._dump) == len(_list):
self._file.dump(_list, key)
def init(self):
self._adb.click_by_text_after_refresh('通讯录')
self._adb.click_by_text_after_refresh('外部联系人')
self._adb.click_by_text_after_refresh('添加')
self._adb.click_by_text_after_refresh('微信号/手机号')
def add_friends(self, phone: str):
print('===== 开始查找 ===== ' + phone + ' =====')
self._adb.click_by_text_after_refresh('微信号/手机号')
# 输入号码
self._adb.adb_input(phone)
# 点击搜索
self._adb.click_by_text_after_refresh('搜索:' + phone)
print(' ==> 点击搜索 ==> ')
self._adb.refresh_nodes()
if self._adb.find_nodes_by_text('查找失败'):
print(' <== 查找失败 <== ')
self.push('failed', phone + '查找失败')
self._adb.adb_put_back()
print(' ---- 计算切换账号次数 ----')
self._flag += 1
if int(self._sleep_flag) == self._flag:
print(' ---- 休眠半小时 ----')
time.sleep(int(self._sleep) * 60)
self._flag = 0
else:
print(' ---- 开始切换账号 ----')
# 企业微信退回到主页面
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.click_by_text_after_refresh('我')
# 回到桌面
self._adb.adb_back_to_desktop()
# 切换微信
# todo --notice
self._adb.click_by_text_after_refresh('微信')
self._adb.click_by_text_after_refresh('我')
self._adb.click_by_text_after_refresh('设置')
self._adb.click_by_text_after_refresh('切换帐号')
# 判断当前使用哪个账号
self._adb.refresh_nodes()
self._adb.find_nodes_by_text(self._account[0])
left = float(self._adb.get_bounds()[0])
self._adb.find_nodes_by_text(self._account[1])
right = float(self._adb.get_bounds()[0])
self._adb.find_nodes_by_text('当前使用')
cursor = float(self._adb.get_bounds()[0])
self._adb.find_nodes('true', By.naf)
# 左侧用户在使用中
if abs(cursor - left) < abs(cursor - right):
self._adb.click(1)
else:
self._adb.click(0)
# 判断是否登录成功
while True:
self._adb.refresh_nodes()
if self._adb.find_nodes_by_text('通讯录'):
break
time.sleep(2)
# 回到桌面打开企业微信
self._adb.adb_back_to_desktop()
# todo --notice
self._adb.click_by_text_after_refresh('企业微信')
self._adb.click_by_text_after_refresh('设置')
self._adb.click_by_text_after_refresh('退出登录')
self._adb.click_by_text_after_refresh('退出当前帐号')
self._adb.click_by_text_after_refresh('确定')
self._adb.click_by_text_after_refresh('微信登录')
# 判断是否登录成功
while True:
self._adb.refresh_nodes()
if self._adb.find_nodes_by_text('进入企业 '):
break
time.sleep(2)
self._adb.click(0)
while True:
self._adb.refresh_nodes()
if self._adb.find_nodes_by_text('通讯录'):
break
time.sleep(2)
self.init()
# 查找成功
elif self._adb.find_nodes_by_text('添加为联系人'):
self._adb.click(0)
self._adb.click_by_text_after_refresh('发送添加邀请')
self._adb.refresh_nodes()
if self._adb.find_nodes_by_text('发送添加邀请'):
print(' <== 发送失败 <== ')
self.push('failed', phone + '发送失败')
self._adb.adb_put_back()
self._adb.adb_put_back()
else:
print(' !! <== 发送成功 <== ')
self.push('success', phone + '发送成功')
self._adb.adb_put_back()
elif self._adb.find_nodes_by_text('发消息'):
print(' <== 已经是好友 无需再次添加 <== ')
self.push('failed', phone + '已经是好友')
self._adb.adb_put_back()
elif self._adb.find_nodes_by_text('同时拥有微信和企业微信'):
print(' <== 同时拥有微信和企业微信 <== ')
self.push('failed', phone + '同时拥有微信和企业微信')
self._adb.adb_put_back()
elif self._adb.find_nodes_by_text('该用户不存在') or self._adb.find_nodes_by_text('被搜帐号状态异常,无法显示'):
print(' <== 该用户不存在 或 帐号异常 <== ')
self.push('failed', phone + '该用户不存在 或 帐号异常')
self._adb.adb_put_back()
# 清空已输入的字符
self._adb.refresh_nodes()
if self._adb.find_nodes('true', By.naf):
self._adb.click(1)
def main(self):
self.init()
if 'file' == self._mode:
with self._file.open(self._file) as f:
for line in f:
line = file.delete_line_breaks(line)
self.add_friends(line)
f.close()
elif 'loop' == self._mode:
for line in range(int(self._loop[0]), int(self._loop[1])):
self.add_friends(str(line))
# 输出最后的添加结果
self._file.dump(self._success, 'success')
self._file.dump(self._failed, 'failed')
| 32.100962
| 101
| 0.50996
|
f2862b5b035b8721de31d8aeca4f7e36c390c7db
| 75,539
|
py
|
Python
|
nuitka/tree/ComplexCallHelperFunctions.py
|
sthagen/Nuitka-Nuitka
|
023dc76eeafd9c53ee2a51931474ddd98a3ba083
|
[
"Apache-2.0"
] | null | null | null |
nuitka/tree/ComplexCallHelperFunctions.py
|
sthagen/Nuitka-Nuitka
|
023dc76eeafd9c53ee2a51931474ddd98a3ba083
|
[
"Apache-2.0"
] | null | null | null |
nuitka/tree/ComplexCallHelperFunctions.py
|
sthagen/Nuitka-Nuitka
|
023dc76eeafd9c53ee2a51931474ddd98a3ba083
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" This module is providing helper functions for complex call re-formulations.
One for each type of call. """
from nuitka.nodes.AttributeNodes import (
ExpressionAttributeCheck,
makeExpressionAttributeLookup,
)
from nuitka.nodes.BuiltinDictNodes import ExpressionBuiltinDict
from nuitka.nodes.BuiltinIteratorNodes import ExpressionBuiltinIter1
from nuitka.nodes.BuiltinNextNodes import ExpressionBuiltinNext1
from nuitka.nodes.BuiltinRefNodes import (
ExpressionBuiltinAnonymousRef,
makeExpressionBuiltinTypeRef,
)
from nuitka.nodes.BuiltinTypeNodes import ExpressionBuiltinTuple
from nuitka.nodes.CallNodes import makeExpressionCall
from nuitka.nodes.ComparisonNodes import (
ExpressionComparisonIn,
ExpressionComparisonIsNot,
)
from nuitka.nodes.ConditionalNodes import (
ExpressionConditionalOr,
makeStatementConditional,
)
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import makeExpressionMakeTuple
from nuitka.nodes.DictionaryNodes import (
ExpressionDictOperationIteritems,
StatementDictOperationSet,
StatementDictOperationSetKeyValue,
)
from nuitka.nodes.ExceptionNodes import (
ExpressionBuiltinMakeException,
StatementRaiseException,
)
from nuitka.nodes.FunctionAttributeNodes import ExpressionFunctionErrorStr
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionRef,
)
from nuitka.nodes.LoopNodes import StatementLoop, StatementLoopBreak
from nuitka.nodes.OperatorNodes import makeBinaryOperationNode
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.SubscriptNodes import (
ExpressionSubscriptLookup,
StatementAssignmentSubscript,
)
from nuitka.nodes.TypeNodes import (
ExpressionBuiltinIsinstance,
ExpressionBuiltinType1,
)
from nuitka.nodes.VariableAssignNodes import makeStatementAssignmentVariable
from nuitka.nodes.VariableDelNodes import StatementReleaseVariable
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef,
)
from nuitka.PythonVersions import (
getComplexCallSequenceErrorTemplate,
python_version,
)
from nuitka.specs.ParameterSpecs import ParameterSpec
from .InternalModule import (
internal_source_ref,
makeInternalHelperFunctionBody,
once_decorator,
)
from .ReformulationTryExceptStatements import makeTryExceptSingleHandlerNode
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .TreeHelpers import (
makeCallNode,
makeStatementsSequenceFromStatement,
makeStatementsSequenceFromStatements,
)
# TODO: Consider using ExpressionOutlineNodes for at least some of these
# or their own helpers.
def orderArgs(*args):
if python_version >= 0x350:
def weight(arg):
result = args.index(arg)
if arg == "kw":
result += 1.5
elif arg == "star_arg_list":
result -= 1.5
return result
return tuple(sorted(args, key=weight))
return args
def _makeNameAttributeLookup(node, attribute_name="__name__"):
return makeExpressionAttributeLookup(
expression=node, attribute_name=attribute_name, source_ref=internal_source_ref
)
@once_decorator
def getCallableNameDescBody():
helper_name = "get_callable_name_desc"
# Equivalent of:
#
# Note: The "called_type" is a temporary variable.
#
# called_type = type(BuiltinFunctionType)
#
# if isinstance(called, (FunctionType, MethodType, BuiltinFunctionType)):
# return called.__name__
# elif python_version < 0x3 and isinstance(called, ClassType):
# return called_type.__name__ + " constructor"
# elif python_version < 0x3 and isinstance(called, InstanceType):
# return called_type.__name__ + " instance"
# else:
# return called_type.__name__ + " object"
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=("called",),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
if python_version < 0x390:
function_name = makeBinaryOperationNode(
operator="Add",
left=_makeNameAttributeLookup(
node=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
attribute_name="__name__",
),
right=makeConstantRefNode(
constant="()",
source_ref=internal_source_ref,
user_provided=True,
),
source_ref=internal_source_ref,
)
else:
# TODO: Make it usable for pre-Python 3.9 too.
function_name = ExpressionFunctionErrorStr(
value=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
)
functions_case = makeStatementsSequenceFromStatement(
statement=(
StatementReturn(
expression=function_name,
source_ref=internal_source_ref,
)
)
)
if python_version >= 0x390:
result.setChild("body", functions_case)
return result
no_branch = StatementReturn(
expression=makeBinaryOperationNode(
operator="Add",
right=makeConstantRefNode(
constant=" object", source_ref=internal_source_ref, user_provided=True
),
left=_makeNameAttributeLookup(
ExpressionBuiltinType1(
value=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
)
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
if python_version < 0x300:
instance_case = StatementReturn(
expression=makeBinaryOperationNode(
operator="Add",
right=makeConstantRefNode(
constant=" instance",
source_ref=internal_source_ref,
user_provided=True,
),
left=_makeNameAttributeLookup(
_makeNameAttributeLookup(
ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
attribute_name="__class__",
)
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
no_branch = makeStatementConditional(
condition=ExpressionBuiltinIsinstance(
instance=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
classes=ExpressionBuiltinAnonymousRef(
builtin_name="instance", source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
yes_branch=instance_case,
no_branch=no_branch,
source_ref=internal_source_ref,
)
class_case = StatementReturn(
expression=makeBinaryOperationNode(
operator="Add",
right=makeConstantRefNode(
constant=" constructor",
source_ref=internal_source_ref,
user_provided=True,
),
left=_makeNameAttributeLookup(
ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
)
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
no_branch = makeStatementConditional(
condition=ExpressionBuiltinIsinstance(
instance=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
classes=ExpressionBuiltinAnonymousRef(
builtin_name="classobj", source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
yes_branch=class_case,
no_branch=no_branch,
source_ref=internal_source_ref,
)
if python_version < 0x300:
normal_cases = ("function", "builtin_function_or_method", "instancemethod")
else:
normal_cases = ("function", "builtin_function_or_method")
result.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeStatementConditional(
condition=ExpressionBuiltinIsinstance(
instance=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
classes=makeExpressionMakeTuple(
elements=tuple(
ExpressionBuiltinAnonymousRef(
builtin_name=builtin_name,
source_ref=internal_source_ref,
)
for builtin_name in normal_cases
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
yes_branch=functions_case,
no_branch=no_branch,
source_ref=internal_source_ref,
)
),
)
return result
def makeStarListArgumentErrorRaise(called_variable, star_list_variable):
return StatementRaiseException(
exception_type=ExpressionBuiltinMakeException(
exception_name="TypeError",
args=(
makeBinaryOperationNode(
operator="Mod",
left=makeConstantRefNode(
constant=getComplexCallSequenceErrorTemplate(),
source_ref=internal_source_ref,
user_provided=True,
),
right=makeExpressionMakeTuple(
elements=(
ExpressionFunctionCall(
function=ExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getCallableNameDescBody(),
source_ref=internal_source_ref,
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=internal_source_ref,
),
values=(
ExpressionVariableRef(
variable=called_variable,
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
_makeNameAttributeLookup(
ExpressionBuiltinType1(
value=ExpressionVariableRef(
variable=star_list_variable,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
),
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
exception_value=None,
exception_trace=None,
exception_cause=None,
source_ref=internal_source_ref,
)
def _makeStarListArgumentToTupleStatement(called_variable, star_list_variable):
if python_version >= 0x350:
non_tuple_code = makeStatementConditional(
condition=ExpressionConditionalOr(
left=ExpressionAttributeCheck(
expression=ExpressionVariableRef(
variable=star_list_variable, source_ref=internal_source_ref
),
attribute_name="__iter__",
source_ref=internal_source_ref,
),
right=ExpressionAttributeCheck(
expression=ExpressionVariableRef(
variable=star_list_variable, source_ref=internal_source_ref
),
attribute_name="__getitem__",
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
yes_branch=makeStatementAssignmentVariable(
variable=star_list_variable,
source=ExpressionBuiltinTuple(
value=ExpressionVariableRef(
variable=star_list_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
no_branch=makeStarListArgumentErrorRaise(
called_variable=called_variable, star_list_variable=star_list_variable
),
source_ref=internal_source_ref,
)
else:
non_tuple_code = makeTryExceptSingleHandlerNode(
tried=makeStatementAssignmentVariable(
variable=star_list_variable,
source=ExpressionBuiltinTuple(
value=ExpressionVariableRef(
variable=star_list_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
exception_name="TypeError",
handler_body=makeStarListArgumentErrorRaise(
called_variable=called_variable, star_list_variable=star_list_variable
),
source_ref=internal_source_ref,
)
return makeStatementConditional(
condition=ExpressionComparisonIsNot(
left=ExpressionBuiltinType1(
value=ExpressionVariableRef(
variable=star_list_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
right=makeExpressionBuiltinTypeRef(
builtin_name="tuple", source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
yes_branch=non_tuple_code,
no_branch=None,
source_ref=internal_source_ref,
)
def _makeRaiseExceptionMustBeMapping(called_variable, star_dict_variable):
return StatementRaiseException(
exception_type=ExpressionBuiltinMakeException(
exception_name="TypeError",
args=(
makeBinaryOperationNode(
operator="Mod",
left=makeConstantRefNode(
constant="""\
%s argument after ** must be a mapping, not %s""",
source_ref=internal_source_ref,
user_provided=True,
),
right=makeExpressionMakeTuple(
elements=(
ExpressionFunctionCall(
function=ExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getCallableNameDescBody(),
source_ref=internal_source_ref,
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=internal_source_ref,
),
values=(
ExpressionVariableRef(
variable=called_variable,
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
_makeNameAttributeLookup(
ExpressionBuiltinType1(
value=ExpressionVariableRef(
variable=star_dict_variable,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
),
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
exception_value=None,
exception_trace=None,
exception_cause=None,
source_ref=internal_source_ref,
)
def _makeIteratingLoopStatement(tmp_iter_variable, tmp_item_variable, statements):
loop_body = makeStatementsSequenceFromStatements(
makeTryExceptSingleHandlerNode(
tried=makeStatementAssignmentVariable(
variable=tmp_item_variable,
source=ExpressionBuiltinNext1(
value=ExpressionTempVariableRef(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
exception_name="StopIteration",
handler_body=StatementLoopBreak(source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
*statements
)
return StatementLoop(loop_body=loop_body, source_ref=internal_source_ref)
def _makeStarDictArgumentToDictStatement(result, called_variable, star_dict_variable):
temp_scope = result.allocateTempScope("mapping")
tmp_dict_variable = result.allocateTempVariable(temp_scope, "dict")
tmp_iter_variable = result.allocateTempVariable(temp_scope, "iter")
tmp_keys_variable = result.allocateTempVariable(temp_scope, "keys")
tmp_key_variable = result.allocateTempVariable(temp_scope, "key")
loop_body = []
if python_version >= 0x380:
loop_body.append(
makeStatementConditional(
condition=ExpressionComparisonIn(
left=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
right=ExpressionVariableRef(
variable=tmp_dict_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
yes_branch=_makeRaiseDuplicationItem(
called_variable=called_variable, tmp_key_variable=tmp_key_variable
),
no_branch=None,
source_ref=internal_source_ref,
)
)
loop_body.append(
StatementDictOperationSet(
dict_arg=ExpressionTempVariableRef(
variable=tmp_dict_variable, source_ref=internal_source_ref
),
key=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
value=ExpressionSubscriptLookup(
expression=ExpressionVariableRef(
variable=star_dict_variable, source_ref=internal_source_ref
),
subscript=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
)
mapping_case = makeStatementsSequenceFromStatements(
makeTryExceptSingleHandlerNode(
tried=makeStatementAssignmentVariable(
variable=tmp_keys_variable,
source=makeCallNode(
_makeNameAttributeLookup(
ExpressionVariableRef(
variable=star_dict_variable, source_ref=internal_source_ref
),
attribute_name="keys",
),
internal_source_ref,
),
source_ref=internal_source_ref,
),
exception_name="AttributeError",
handler_body=_makeRaiseExceptionMustBeMapping(
called_variable=called_variable, star_dict_variable=star_dict_variable
),
source_ref=internal_source_ref,
),
makeStatementAssignmentVariable(
variable=tmp_iter_variable,
source=ExpressionBuiltinIter1(
value=ExpressionTempVariableRef(
variable=tmp_keys_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
makeStatementAssignmentVariable(
variable=tmp_dict_variable,
source=makeConstantRefNode(
constant={}, source_ref=internal_source_ref, user_provided=True
),
source_ref=internal_source_ref,
),
_makeIteratingLoopStatement(
tmp_iter_variable=tmp_iter_variable,
tmp_item_variable=tmp_key_variable,
statements=loop_body,
),
makeStatementAssignmentVariable(
variable=star_dict_variable,
source=ExpressionTempVariableRef(
variable=tmp_dict_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
)
tried = makeStatementConditional(
condition=ExpressionComparisonIsNot(
left=ExpressionBuiltinType1(
value=ExpressionVariableRef(
variable=star_dict_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
right=makeExpressionBuiltinTypeRef(
builtin_name="dict", source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
yes_branch=mapping_case,
no_branch=None,
source_ref=internal_source_ref,
)
final = (
StatementReleaseVariable(
variable=tmp_dict_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_keys_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_key_variable, source_ref=internal_source_ref
),
)
return makeTryFinallyStatement(
provider=result, tried=tried, final=final, source_ref=internal_source_ref
)
def _makeRaiseNoStringItem(called_variable):
if python_version < 0x390:
raise_arg = makeBinaryOperationNode(
operator="Mod",
left=makeConstantRefNode(
constant="%s keywords must be strings",
source_ref=internal_source_ref,
user_provided=True,
),
right=ExpressionFunctionCall(
function=ExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getCallableNameDescBody(),
source_ref=internal_source_ref,
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=internal_source_ref,
),
values=(
ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
else:
raise_arg = makeConstantRefNode(
constant="keywords must be strings",
source_ref=internal_source_ref,
user_provided=True,
)
return StatementRaiseException(
exception_type=ExpressionBuiltinMakeException(
exception_name="TypeError",
args=(raise_arg,),
source_ref=internal_source_ref,
),
exception_value=None,
exception_trace=None,
exception_cause=None,
source_ref=internal_source_ref,
)
def _makeRaiseDuplicationItem(called_variable, tmp_key_variable):
return StatementRaiseException(
exception_type=ExpressionBuiltinMakeException(
exception_name="TypeError",
args=(
makeBinaryOperationNode(
operator="Mod",
left=makeConstantRefNode(
constant="""\
%s got multiple values for keyword argument '%s'""",
source_ref=internal_source_ref,
user_provided=True,
),
right=makeExpressionMakeTuple(
elements=(
ExpressionFunctionCall(
function=ExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getCallableNameDescBody(),
source_ref=internal_source_ref,
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=internal_source_ref,
),
values=(
ExpressionVariableRef(
variable=called_variable,
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
ExpressionTempVariableRef(
variable=tmp_key_variable,
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
exception_value=None,
exception_trace=None,
exception_cause=None,
source_ref=internal_source_ref,
)
def _makeStarDictArgumentMergeToKwStatement(
result, called_variable, kw_variable, star_dict_variable
):
# This is plain terribly complex
temp_scope = result.allocateTempScope("dict")
tmp_iter_variable = result.allocateTempVariable(temp_scope, "iter")
tmp_keys_variable = result.allocateTempVariable(temp_scope, "keys")
tmp_key_variable = result.allocateTempVariable(temp_scope, "key_xxx")
final = [
StatementReleaseVariable(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_keys_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_key_variable, source_ref=internal_source_ref
),
]
mapping_loop_body = (
makeStatementConditional(
condition=ExpressionComparisonIn(
left=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
right=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
yes_branch=_makeRaiseDuplicationItem(
called_variable=called_variable, tmp_key_variable=tmp_key_variable
),
no_branch=None,
source_ref=internal_source_ref,
),
StatementAssignmentSubscript(
subscribed=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
subscript=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
source=ExpressionSubscriptLookup(
expression=ExpressionVariableRef(
variable=star_dict_variable, source_ref=internal_source_ref
),
subscript=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
mapping_case = makeStatementsSequenceFromStatements(
makeTryExceptSingleHandlerNode(
tried=makeStatementAssignmentVariable(
variable=tmp_keys_variable,
source=makeCallNode(
_makeNameAttributeLookup(
ExpressionVariableRef(
variable=star_dict_variable, source_ref=internal_source_ref
),
attribute_name="keys",
),
internal_source_ref,
),
source_ref=internal_source_ref,
),
exception_name="AttributeError",
handler_body=_makeRaiseExceptionMustBeMapping(
called_variable=called_variable, star_dict_variable=star_dict_variable
),
source_ref=internal_source_ref,
),
makeStatementAssignmentVariable(
variable=tmp_iter_variable,
source=ExpressionBuiltinIter1(
value=ExpressionTempVariableRef(
variable=tmp_keys_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
_makeIteratingLoopStatement(
tmp_iter_variable=tmp_iter_variable,
tmp_item_variable=tmp_key_variable,
statements=mapping_loop_body,
),
)
temp_scope = result.allocateTempScope("dict")
tmp_iter_variable = result.allocateTempVariable(temp_scope, "iter")
tmp_item_variable = result.allocateTempVariable(temp_scope, "item")
tmp_key_variable = result.allocateTempVariable(temp_scope, "key")
final += (
StatementReleaseVariable(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_item_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_key_variable, source_ref=internal_source_ref
),
)
dict_loop_body = (
makeStatementAssignmentVariable(
variable=tmp_key_variable,
source=ExpressionSubscriptLookup(
expression=ExpressionTempVariableRef(
variable=tmp_item_variable, source_ref=internal_source_ref
),
subscript=makeConstantRefNode(
constant=0, source_ref=internal_source_ref, user_provided=True
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
makeStatementConditional(
condition=ExpressionComparisonIn(
left=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
right=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
yes_branch=_makeRaiseDuplicationItem(
called_variable=called_variable, tmp_key_variable=tmp_key_variable
),
no_branch=None,
source_ref=internal_source_ref,
),
StatementAssignmentSubscript(
subscribed=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
subscript=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
source=ExpressionSubscriptLookup(
expression=ExpressionTempVariableRef(
variable=tmp_item_variable, source_ref=internal_source_ref
),
subscript=makeConstantRefNode(
constant=1, source_ref=internal_source_ref, user_provided=True
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
dict_case = makeStatementsSequenceFromStatements(
makeStatementAssignmentVariable(
variable=kw_variable,
source=ExpressionBuiltinDict(
pos_arg=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
pairs=(),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
makeStatementAssignmentVariable(
variable=tmp_iter_variable,
source=ExpressionBuiltinIter1(
value=ExpressionDictOperationIteritems(
dict_arg=ExpressionVariableRef(
variable=star_dict_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
_makeIteratingLoopStatement(
tmp_iter_variable=tmp_iter_variable,
tmp_item_variable=tmp_item_variable,
statements=dict_loop_body,
),
)
dict_case = makeStatementConditional(
condition=ExpressionVariableRef(
variable=star_dict_variable, source_ref=internal_source_ref
),
yes_branch=dict_case,
no_branch=None,
source_ref=internal_source_ref,
)
tried = makeStatementConditional(
condition=ExpressionComparisonIsNot(
left=ExpressionBuiltinType1(
value=ExpressionVariableRef(
variable=star_dict_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
right=makeExpressionBuiltinTypeRef(
builtin_name="dict", source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
yes_branch=mapping_case,
no_branch=dict_case,
source_ref=internal_source_ref,
)
return makeTryFinallyStatement(
provider=result, tried=tried, final=final, source_ref=internal_source_ref
)
@once_decorator
def getFunctionCallHelperStarList():
helper_name = "complex_call_helper_star_list"
# Equivalent of:
#
# Note: Call in here is not the same, as it can go without checks directly
# to PyObject_Call.
#
# if not isinstance(star_arg_list, tuple):
# try:
# star_arg_list = tuple(star_arg_list)
# except TypeError:
# raise TypeError, "%s argument after * must be a sequence, not %s" % (
# get_callable_name_desc(function),
# type(star_arg_list).__name__
# )
#
# return called(*star_arg_list)
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=("called", "star_arg_list"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
star_arg_list_variable = result.getVariableForAssignment(
variable_name="star_arg_list"
)
body = makeStatementsSequenceFromStatements(
_makeStarListArgumentToTupleStatement(
called_variable=called_variable, star_list_variable=star_arg_list_variable
),
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=ExpressionVariableRef(
variable=star_arg_list_variable, source_ref=internal_source_ref
),
kw=None,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperKeywordsStarList():
helper_name = "complex_call_helper_keywords_star_list"
# Equivalent of:
#
# Note: Call in here is not the same, as it can go without checks directly
# to PyObject_Call.
#
# if not isinstance(star_arg_list, tuple):
# try:
# star_arg_list = tuple(star_arg_list)
# except TypeError:
# raise TypeError, "%s argument after * must be a sequence, not %s" % (
# get_callable_name_desc(function),
# type(star_arg_list).__name__
# )
#
# return called(*star_arg_list)
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=orderArgs("called", "kw", "star_arg_list"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
kw_variable = result.getVariableForAssignment(variable_name="kw")
star_arg_list_variable = result.getVariableForAssignment(
variable_name="star_arg_list"
)
body = makeStatementsSequenceFromStatements(
_makeStarListArgumentToTupleStatement(
called_variable=called_variable, star_list_variable=star_arg_list_variable
),
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=ExpressionVariableRef(
variable=star_arg_list_variable, source_ref=internal_source_ref
),
kw=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperPosStarList():
helper_name = "complex_call_helper_pos_star_list"
# Equivalent of:
#
# Note: Call in here is not the same, as it can go without checks directly
# to PyObject_Call.
#
# if not isinstance(star_arg_list, tuple):
# try:
# star_arg_list = tuple(star_arg_list)
# except TypeError:
# raise TypeError, "%s argument after * must be a sequence, not %s" % (
# get_callable_name_desc(function),
# type(star_arg_list).__name__
# )
#
# return called(*star_arg_list)
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=("called", "args", "star_arg_list"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
args_variable = result.getVariableForAssignment(variable_name="args")
star_arg_list_variable = result.getVariableForAssignment(
variable_name="star_arg_list"
)
body = makeStatementsSequenceFromStatements(
_makeStarListArgumentToTupleStatement(
called_variable=called_variable, star_list_variable=star_arg_list_variable
),
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=makeBinaryOperationNode(
operator="Add",
left=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
right=ExpressionVariableRef(
variable=star_arg_list_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
kw=None,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperPosKeywordsStarList():
helper_name = "complex_call_helper_pos_keywords_star_list"
# Equivalent of:
#
# Note: Call in here is not the same, as it can go without checks directly
# to PyObject_Call.
#
# if not isinstance(star_arg_list, tuple):
# try:
# star_arg_list = tuple(star_arg_list)
# except TypeError:
# raise TypeError, "%s argument after * must be a sequence, not %s" % (
# get_callable_name_desc(function),
# type(star_arg_list).__name__
# )
#
# return called(*star_arg_list)
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=orderArgs("called", "args", "kw", "star_arg_list"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
args_variable = result.getVariableForAssignment(variable_name="args")
kw_variable = result.getVariableForAssignment(variable_name="kw")
star_arg_list_variable = result.getVariableForAssignment(
variable_name="star_arg_list"
)
body = makeStatementsSequenceFromStatements(
_makeStarListArgumentToTupleStatement(
called_variable=called_variable, star_list_variable=star_arg_list_variable
),
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=makeBinaryOperationNode(
operator="Add",
left=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
right=ExpressionVariableRef(
variable=star_arg_list_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
kw=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperStarDict():
helper_name = "complex_call_helper_star_dict"
# Equivalent of:
#
# Note: Call in here is not the same, as it can go without checks directly
# to PyObject_Call.
#
# if not isinstance(star_arg_dict, dict):
# try:
# tmp_keys = star_arg_dict.keys()
# except AttributeError:
# raise TypeError, ""%s argument after ** must be a mapping, not %s" % (
# get_callable_name_desc(function),
# type(star_arg_dict).__name__
# )
#
# tmp_iter = iter(keys)
# tmp_dict = {}
#
# while 1:
# try:
# tmp_key = tmp_iter.next()
# except StopIteration:
# break
#
# tmp_dict[tmp_key] = star_dict_arg[tmp_key]
#
# star_arg_dict = new
#
# return called(**star_arg_dict)
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=("called", "star_arg_dict"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
star_arg_dict_variable = result.getVariableForAssignment(
variable_name="star_arg_dict"
)
body = makeStatementsSequenceFromStatements(
_makeStarDictArgumentToDictStatement(
result=result,
called_variable=called_variable,
star_dict_variable=star_arg_dict_variable,
),
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=None,
kw=ExpressionVariableRef(
variable=star_arg_dict_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperPosStarDict():
helper_name = "complex_call_helper_pos_star_dict"
# Equivalent of:
#
# Note: Call in here is not the same, as it can go without checks directly
# to PyObject_Call.
#
# if not isinstance(star_arg_dict, dict):
# try:
# tmp_keys = star_arg_dict.keys()
# except AttributeError:
# raise TypeError, ""%s argument after ** must be a mapping, not %s" % (
# get_callable_name_desc(function),
# type(star_arg_dict).__name__
# )
#
# tmp_iter = iter(keys)
# tmp_dict = {}
#
# while 1:
# try:
# tmp_key = tmp_iter.next()
# except StopIteration:
# break
#
# tmp_dict[tmp_key] = star_dict_arg[tmp_key]
#
# star_arg_dict = new
#
# return called(args, **star_arg_dict)
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=("called", "args", "star_arg_dict"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
args_variable = result.getVariableForAssignment(variable_name="args")
star_arg_dict_variable = result.getVariableForAssignment(
variable_name="star_arg_dict"
)
body = makeStatementsSequenceFromStatements(
_makeStarDictArgumentToDictStatement(
result=result,
called_variable=called_variable,
star_dict_variable=star_arg_dict_variable,
),
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
kw=ExpressionVariableRef(
variable=star_arg_dict_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperKeywordsStarDict():
helper_name = "complex_call_helper_keywords_star_dict"
# Equivalent of:
#
# Note: Call in here is not the same, as it can go without checks directly
# to PyObject_Call. One goal is to avoid copying "kw" unless really
# necessary, and to take the slow route only for non-dictionaries.
#
# if not isinstance(star_arg_dict, dict):
# try:
# tmp_keys = star_arg_dict.keys()
# except AttributeError:
# raise TypeError, ""%s argument after ** must be a mapping, not %s" % (
# get_callable_name_desc(function),
# type(star_arg_dict).__name__
# )
#
# if keys:
# kw = dict(kw)
#
# tmp_iter = iter(keys)
# tmp_dict = {}
#
# while 1:
# try:
# tmp_key = tmp_iter.next()
# except StopIteration:
# break
#
# if tmp_key in kw:
# raise TypeError, "%s got multiple values for keyword argument '%s'" % (
# get_callable_name_desc(function),
# tmp_key
# )
#
# kw[tmp_key] = star_dict_arg[tmp_key)
#
# elif star_arg_dict:
# tmp_iter = star_arg_dict.iteritems()
#
# kw = dict(kw)
# while 1:
# try:
# tmp_key, tmp_value = tmp_iter.next()
# except StopIteration:
# break
#
# if tmp_key in kw:
# raise TypeError, "%s got multiple values for keyword argument '%s'" % (
# get_callable_name_desc(function),
# tmp_key
# )
#
# kw[tmp_key] = tmp_value
#
# return called(**kw)
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=("called", "kw", "star_arg_dict"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
kw_variable = result.getVariableForAssignment(variable_name="kw")
star_arg_dict_variable = result.getVariableForAssignment(
variable_name="star_arg_dict"
)
body = makeStatementsSequenceFromStatements(
_makeStarDictArgumentMergeToKwStatement(
result=result,
called_variable=called_variable,
kw_variable=kw_variable,
star_dict_variable=star_arg_dict_variable,
),
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=None,
kw=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperPosKeywordsStarDict():
helper_name = "complex_call_helper_pos_keywords_star_dict"
# Equivalent of:
#
# Note: Call in here is not the same, as it can go without checks directly
# to PyObject_Call. One goal is to avoid copying "kw" unless really
# necessary, and to take the slow route only for non-dictionaries.
#
# if not isinstance(star_arg_dict, dict):
# try:
# tmp_keys = star_arg_dict.keys()
# except AttributeError:
# raise TypeError, ""%s argument after ** must be a mapping, not %s" % (
# get_callable_name_desc(function),
# type(star_arg_dict).__name__
# )
#
# if keys:
# kw = dict(kw)
#
# tmp_iter = iter(keys)
# tmp_dict = {}
#
# while 1:
# try:
# tmp_key = tmp_iter.next()
# except StopIteration:
# break
#
# if tmp_key in kw:
# raise TypeError, "%s got multiple values for keyword argument '%s'" % (
# get_callable_name_desc(function),
# tmp_key
# )
#
# kw[tmp_key] = star_dict_arg[tmp_key]
#
# elif star_arg_dict:
# tmp_iter = star_arg_dict.iteritems()
#
# kw = dict(kw)
# while 1:
# try:
# tmp_key, tmp_value = tmp_iter.next()
# except StopIteration:
# break
#
# if tmp_key in kw:
# raise TypeError, "%s got multiple values for keyword argument '%s'" % (
# get_callable_name_desc(function),
# tmp_key
# )
#
# kw[tmp_key] = tmp_value
#
# return called(**kw )
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=("called", "args", "kw", "star_arg_dict"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
args_variable = result.getVariableForAssignment(variable_name="args")
kw_variable = result.getVariableForAssignment(variable_name="kw")
star_arg_dict_variable = result.getVariableForAssignment(
variable_name="star_arg_dict"
)
body = makeStatementsSequenceFromStatements(
_makeStarDictArgumentMergeToKwStatement(
result=result,
called_variable=called_variable,
kw_variable=kw_variable,
star_dict_variable=star_arg_dict_variable,
),
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
kw=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
result.setChild("body", body)
return result
def getDoubleStarArgsConversion(
result, called_variable, kw_variable, star_arg_list_variable, star_arg_dict_variable
):
statements = []
if kw_variable is not None:
statements.append(
_makeStarDictArgumentMergeToKwStatement(
result=result,
called_variable=called_variable,
kw_variable=kw_variable,
star_dict_variable=star_arg_dict_variable,
)
)
else:
statements.append(
_makeStarDictArgumentToDictStatement(
result=result,
called_variable=called_variable,
star_dict_variable=star_arg_dict_variable,
)
)
statements.append(
_makeStarListArgumentToTupleStatement(
called_variable=called_variable, star_list_variable=star_arg_list_variable
)
)
return statements
@once_decorator
def getFunctionCallHelperStarListStarDict():
helper_name = "complex_call_helper_star_list_star_dict"
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=("called", "star_arg_list", "star_arg_dict"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
star_arg_list_variable = result.getVariableForAssignment(
variable_name="star_arg_list"
)
star_arg_dict_variable = result.getVariableForAssignment(
variable_name="star_arg_dict"
)
statements = getDoubleStarArgsConversion(
result=result,
called_variable=called_variable,
star_arg_list_variable=star_arg_list_variable,
kw_variable=None,
star_arg_dict_variable=star_arg_dict_variable,
)
statements.append(
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=ExpressionVariableRef(
variable=star_arg_list_variable, source_ref=internal_source_ref
),
kw=ExpressionVariableRef(
variable=star_arg_dict_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
)
body = makeStatementsSequenceFromStatements(*statements)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperPosStarListStarDict():
helper_name = "complex_call_helper_pos_star_list_star_dict"
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=("called", "args", "star_arg_list", "star_arg_dict"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
args_variable = result.getVariableForAssignment(variable_name="args")
star_arg_list_variable = result.getVariableForAssignment(
variable_name="star_arg_list"
)
star_arg_dict_variable = result.getVariableForAssignment(
variable_name="star_arg_dict"
)
statements = getDoubleStarArgsConversion(
result=result,
called_variable=called_variable,
star_arg_list_variable=star_arg_list_variable,
kw_variable=None,
star_arg_dict_variable=star_arg_dict_variable,
)
if python_version >= 0x360:
statements.reverse()
statements.append(
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=makeBinaryOperationNode(
operator="Add",
left=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
right=ExpressionVariableRef(
variable=star_arg_list_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
kw=ExpressionVariableRef(
variable=star_arg_dict_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
)
body = makeStatementsSequenceFromStatements(*statements)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperKeywordsStarListStarDict():
helper_name = "complex_call_helper_keywords_star_list_star_dict"
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=orderArgs("called", "kw", "star_arg_list", "star_arg_dict"),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
kw_variable = result.getVariableForAssignment(variable_name="kw")
star_arg_list_variable = result.getVariableForAssignment(
variable_name="star_arg_list"
)
star_arg_dict_variable = result.getVariableForAssignment(
variable_name="star_arg_dict"
)
statements = getDoubleStarArgsConversion(
result=result,
called_variable=called_variable,
star_arg_list_variable=star_arg_list_variable,
kw_variable=kw_variable,
star_arg_dict_variable=star_arg_dict_variable,
)
statements.append(
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=ExpressionVariableRef(
variable=star_arg_list_variable, source_ref=internal_source_ref
),
kw=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
)
body = makeStatementsSequenceFromStatements(*statements)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperPosKeywordsStarListStarDict():
helper_name = "complex_call_helper_pos_keywords_star_list_star_dict"
# Only need to check if the star argument value is a sequence and then
# convert to tuple.
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=orderArgs(
"called", "args", "kw", "star_arg_list", "star_arg_dict"
),
ps_list_star_arg=None,
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
called_variable = result.getVariableForAssignment(variable_name="called")
args_variable = result.getVariableForAssignment(variable_name="args")
kw_variable = result.getVariableForAssignment(variable_name="kw")
star_arg_list_variable = result.getVariableForAssignment(
variable_name="star_arg_list"
)
star_arg_dict_variable = result.getVariableForAssignment(
variable_name="star_arg_dict"
)
statements = getDoubleStarArgsConversion(
result=result,
called_variable=called_variable,
star_arg_list_variable=star_arg_list_variable,
kw_variable=kw_variable,
star_arg_dict_variable=star_arg_dict_variable,
)
if python_version >= 0x360:
statements.reverse()
statements.append(
StatementReturn(
expression=makeExpressionCall(
called=ExpressionVariableRef(
variable=called_variable, source_ref=internal_source_ref
),
args=makeBinaryOperationNode(
operator="Add",
left=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
right=ExpressionVariableRef(
variable=star_arg_list_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
kw=ExpressionVariableRef(
variable=kw_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
)
body = makeStatementsSequenceFromStatements(*statements)
result.setChild("body", body)
return result
@once_decorator
def getFunctionCallHelperDictionaryUnpacking():
helper_name = "complex_call_helper_dict_unpacking_checks"
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=("called",),
ps_list_star_arg="args",
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
args_variable = result.getVariableForAssignment(variable_name="args")
called_variable = result.getVariableForAssignment(variable_name="called")
temp_scope = None
tmp_result_variable = result.allocateTempVariable(temp_scope, "dict")
tmp_iter_variable = result.allocateTempVariable(temp_scope, "dicts_iter")
tmp_item_variable = result.allocateTempVariable(temp_scope, "args_item")
tmp_iter2_variable = result.allocateTempVariable(temp_scope, "dict_iter")
tmp_key_variable = result.allocateTempVariable(temp_scope, "dict_key")
update_body = (
makeStatementConditional(
condition=ExpressionComparisonIsNot(
left=ExpressionBuiltinType1(
value=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
right=makeExpressionBuiltinTypeRef(
builtin_name="str", source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
yes_branch=_makeRaiseNoStringItem(called_variable=called_variable),
no_branch=None,
source_ref=internal_source_ref,
),
makeStatementConditional(
condition=ExpressionComparisonIn(
left=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
right=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
yes_branch=_makeRaiseDuplicationItem(
called_variable=called_variable, tmp_key_variable=tmp_key_variable
),
no_branch=None,
source_ref=internal_source_ref,
),
StatementDictOperationSetKeyValue(
dict_arg=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
key=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
value=ExpressionSubscriptLookup(
expression=ExpressionTempVariableRef(
variable=tmp_item_variable, source_ref=internal_source_ref
),
subscript=ExpressionTempVariableRef(
variable=tmp_key_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
loop_body = (
makeTryExceptSingleHandlerNode(
tried=makeStatementsSequenceFromStatements(
makeStatementAssignmentVariable(
variable=tmp_iter2_variable,
source=ExpressionBuiltinIter1(
value=makeCallNode(
_makeNameAttributeLookup(
ExpressionTempVariableRef(
variable=tmp_item_variable,
source_ref=internal_source_ref,
),
attribute_name="keys",
),
internal_source_ref,
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
_makeIteratingLoopStatement(
tmp_iter_variable=tmp_iter2_variable,
tmp_item_variable=tmp_key_variable,
statements=update_body,
),
),
exception_name="AttributeError",
handler_body=_makeRaiseExceptionMustBeMapping(
called_variable=called_variable, star_dict_variable=tmp_item_variable
),
source_ref=internal_source_ref,
),
)
final = (
StatementReleaseVariable(
variable=tmp_result_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_item_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_iter2_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_key_variable, source_ref=internal_source_ref
),
)
tried = makeStatementsSequenceFromStatements(
makeStatementAssignmentVariable(
variable=tmp_iter_variable,
source=ExpressionBuiltinIter1(
value=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
makeStatementAssignmentVariable(
variable=tmp_result_variable,
source=makeConstantRefNode(constant={}, source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
_makeIteratingLoopStatement(
tmp_iter_variable=tmp_iter_variable,
tmp_item_variable=tmp_item_variable,
statements=loop_body,
),
StatementReturn(
expression=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
)
body = makeStatementsSequenceFromStatement(
makeTryFinallyStatement(
provider=result,
tried=tried,
final=final,
source_ref=internal_source_ref,
)
)
result.setChild("body", body)
return result
| 35.036642
| 93
| 0.583672
|
807ac0916bf1a651541dc440dff3ad6d472d1cbb
| 8,760
|
py
|
Python
|
vk/session.py
|
ashvardanian/PyScrapeVk
|
f47757ba9eec08dfb3ad44708eeb3e5b5aa35e24
|
[
"MIT"
] | 1
|
2020-04-15T19:38:17.000Z
|
2020-04-15T19:38:17.000Z
|
vk/session.py
|
ashvardanian/PyScrapeVk
|
f47757ba9eec08dfb3ad44708eeb3e5b5aa35e24
|
[
"MIT"
] | null | null | null |
vk/session.py
|
ashvardanian/PyScrapeVk
|
f47757ba9eec08dfb3ad44708eeb3e5b5aa35e24
|
[
"MIT"
] | null | null | null |
import re
import urllib
import logging
from abc import abstractmethod
import requests
from .exceptions import VkAuthError, VkAPIError
from .exceptions import CAPTCHA_IS_NEEDED as CAPTCHA_IS_NEEDED
from .exceptions import ACCESS_DENIED as ACCESS_DENIED
from .api import APINamespace
from .utils import json_iter_parse, stringify
logger = logging.getLogger('vk')
class SilentAPI:
METHOD_COMMON_PARAMS = {'v', 'lang', 'https', 'test_mode'}
API_URL = 'https://api.vk.com/method/'
CAPTCHA_URL = 'https://m.vk.com/captcha.php'
def __new__(cls, *args, **kwargs):
method_common_params = {key: kwargs.pop(key) for key in tuple(kwargs) if key in cls.METHOD_COMMON_PARAMS}
api = object.__new__(cls)
api.__init__(*args, **kwargs)
return APINamespace(api, method_common_params)
def __init__(self, timeout=10):
self.access_token = None
self.timeout = timeout
self.session = requests.Session()
self.session.headers['Accept'] = 'application/json'
self.session.headers['Content-Type'] = 'application/x-www-form-urlencoded'
def send(self, request):
logger.debug('Prepare API Method request')
self.prepare_request(request)
method_url = self.API_URL + request.method
response = self.session.post(method_url, request.method_params, timeout=self.timeout)
# Make sure we don't have any generic HTTP errors.
try:
response.raise_for_status()
except Exception as e:
yield e
return
# Split the incoming stream of JSON dictionaries
# or arrays into conceise separate objects.
for resp in json_iter_parse(response.text):
if 'error' in resp:
yield VkAPIError(resp['error'])
elif 'response' in resp:
yield resp['response']
@abstractmethod
def prepare_request(self, request):
if self.access_token is not None:
request.method_params['access_token'] = self.access_token
class LoudAPI(SilentAPI):
def send(self, request):
for resp in SilentAPI.send(self, request):
if isinstance(resp, VkAPIError):
# if resp.code == CAPTCHA_IS_NEEDED:
# request.method_params['captcha_key'] = self.get_captcha_key(request)
# request.method_params['captcha_sid'] = request.api_error.captcha_sid
# yield from self.send(request)
# elif resp.code == ACCESS_DENIED:
# self.access_token = self.get_access_token()
# yield from self.send(request)
# else:
# raise resp
raise resp
elif isinstance(resp, Exception):
raise resp
else:
yield resp
def get_access_token(self):
raise NotImplementedError
def get_captcha_key(self, request):
raise NotImplementedError
class API(LoudAPI):
def __init__(self, access_token, **kwargs):
super().__init__(**kwargs)
self.access_token = access_token
class UserAPI(LoudAPI):
LOGIN_URL = 'https://m.vk.com'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
def __init__(self, user_login='', user_password='', app_id=None, scope='offline', **kwargs):
super().__init__(**kwargs)
self.user_login = user_login
self.user_password = user_password
self.app_id = app_id
self.scope = scope
self.access_token = self.get_access_token()
@staticmethod
def get_form_action(response):
form_action = re.findall(r'<form(?= ).* action="(.+)"', response.text)
if form_action:
return form_action[0]
else:
raise VkAuthError('No form on page {}'.format(response.url))
def get_response_url_queries(self, response):
if not response.ok:
if response.status_code == 401:
raise VkAuthError(response.json()['error_description'])
else:
response.raise_for_status()
return self.get_url_queries(response.url)
@staticmethod
def get_url_queries(url):
parsed_url = urllib.parse.urlparse(url)
url_queries = urllib.parse.parse_qsl(parsed_url.fragment)
# We lose repeating keys values
return dict(url_queries)
def get_access_token(self):
auth_session = requests.Session()
if self.login(auth_session):
return self.authorize(auth_session)
def get_login_form_data(self):
return {
'email': self.user_login,
'pass': self.user_password,
}
def login(self, auth_session):
# Get login page
login_page_response = auth_session.get(self.LOGIN_URL)
# Get login form action. It must contains ip_h and lg_h values
login_action = self.get_form_action(login_page_response)
# Login using user credentials
login_response = auth_session.post(login_action, self.get_login_form_data())
if 'remixsid' in auth_session.cookies or 'remixsid6' in auth_session.cookies:
return True
url_queries = self.get_url_queries(login_response.url)
if 'sid' in url_queries:
self.auth_captcha_is_needed(login_response)
elif url_queries.get('act') == 'authcheck':
self.auth_check_is_needed(login_response.text)
elif 'security_check' in url_queries:
self.phone_number_is_needed(login_response.text)
else:
raise VkAuthError('Login error (e.g. incorrect password)')
def get_auth_params(self):
return {
'client_id': self.app_id,
'scope': self.scope,
'display': 'mobile',
'response_type': 'token',
}
def authorize(self, auth_session):
"""
OAuth2
"""
# Ask access
ask_access_response = auth_session.post(self.AUTHORIZE_URL, self.get_auth_params())
url_queries = self.get_response_url_queries(ask_access_response)
if 'access_token' not in url_queries:
# Grant access
grant_access_action = self.get_form_action(ask_access_response)
grant_access_response = auth_session.post(grant_access_action)
url_queries = self.get_response_url_queries(grant_access_response)
return self.process_auth_url_queries(url_queries)
def process_auth_url_queries(self, url_queries):
self.expires_in = url_queries.get('expires_in')
self.user_id = url_queries.get('user_id')
return url_queries.get('access_token')
class CommunityAPI(UserAPI):
def __init__(self, *args, **kwargs):
self.group_ids = kwargs.pop('group_ids', None)
self.default_group_id = None
self.access_tokens = {}
super().__init__(*args, **kwargs)
def get_auth_params(self):
auth_params = super().get_auth_params()
auth_params['group_ids'] = stringify(self.group_ids)
return auth_params
def process_auth_url_queries(self, url_queries):
super().process_auth_url_queries(url_queries)
self.access_tokens = {}
for key, value in url_queries.items():
# access_token_GROUP-ID: ACCESS-TOKEN
if key.startswith('access_token_'):
group_id = int(key[len('access_token_'):])
self.access_tokens[group_id] = value
self.default_group_id = self.group_ids[0]
def prepare_request(self, request):
group_id = request.method_params.get('group_id', self.default_group_id)
request.method_params['access_token'] = self.access_tokens[group_id]
class InteractiveMixin:
def get_user_login(self):
user_login = input('VK user login: ')
return user_login.strip()
def get_user_password(self):
import getpass
user_password = getpass.getpass('VK user password: ')
return user_password
def get_access_token(self):
logger.debug('InteractiveMixin.get_access_token()')
access_token = super().get_access_token()
if not access_token:
access_token = input('VK API access token: ')
return access_token
def get_captcha_key(self, captcha_image_url):
"""
Read CAPTCHA key from shell
"""
print('Open CAPTCHA image url: ', captcha_image_url)
captcha_key = input('Enter CAPTCHA key: ')
return captcha_key
def get_auth_check_code(self):
"""
Read Auth code from shell
"""
auth_check_code = input('Auth check code: ')
return auth_check_code.strip()
| 33.692308
| 113
| 0.634589
|
d86358a3cada88d531b0e4c9d3f7ffe12ce7e596
| 7,367
|
py
|
Python
|
ec2rlcore/programversion.py
|
ketanbhut/aws-ec2rescue-linux
|
3a4c096f31005ea3b3c36bd8e6f840d457ccc937
|
[
"Apache-2.0"
] | null | null | null |
ec2rlcore/programversion.py
|
ketanbhut/aws-ec2rescue-linux
|
3a4c096f31005ea3b3c36bd8e6f840d457ccc937
|
[
"Apache-2.0"
] | null | null | null |
ec2rlcore/programversion.py
|
ketanbhut/aws-ec2rescue-linux
|
3a4c096f31005ea3b3c36bd8e6f840d457ccc937
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
This is the programversion module.
Functions:
None
Classes:
ProgramVersion: object representation of a four-piece version number.
Exceptions:
ProgramVersionError: base exception class for this module
VersionParsingError: raised when the given version string doesn't conform to the expected standard
InvalidComparisonError: raised when a ProgramVersion object is compared to a non-like object
"""
import re
class ProgramVersion(object):
"""
Object class that provides version representation with multiple parts.
Implements string representation for printing as well as comparison operators for comparing like objects.
Attributes:
major (int): the major version number
minor (int): the minor version number
maintenance (int): the maintenance version number
pre_release (int): the pre_release version number
release (str): the release type
release_short (str): the single char, shorthand release type
release_numerical (int): integer representation of the release for comparison purposes
"""
def __init__(self, version_string):
"""
Parameters:
version_string (str): version string in the form of:
[major version].[minor version].[maintenance version][release type shorthand][pre-release version]
Example: 1.0.0a1
"""
# regex below is provided in PEP 440
# post releases have been excluded (bump the maintenance number instead)
# dev releases have been excluded (these should already be alpha/beta/rc releases)
if not re.match(r"\A([1-9]\d*!)?(0|[1-9]\d*)(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?\Z", version_string):
raise ProgramVersionParsingError(version_string)
self.major, self.minor, remaining = version_string.split(".")
self.release_short = ""
if len(re.split(r"(a|b|rc)", remaining)) == 1:
self.maintenance = remaining
else:
self.maintenance, self.release_short, self.pre_release = re.split(r"(a|b|rc)", remaining)
if self.release_short == "a":
self.release = "alpha"
self.release_numerical = 1
elif self.release_short == "b":
self.release = "beta"
self.release_numerical = 2
elif self.release_short == "rc":
self.release = "candidate"
self.release_numerical = 3
else:
self.release_short = "r"
self.pre_release = "0"
self.release = "release"
self.release_numerical = 4
def __repr__(self):
"""
Implementation of __repr__ returns a string suitable for recreation of a ProgramVersion instance.
"""
if self.release_short == "r":
return "ProgramVersion(\"{}.{}.{}\")".format(self.major, self.minor, self.maintenance)
else:
return "ProgramVersion(\"{}.{}.{}{}{}\")".format(self.major, self.minor, self.maintenance,
self.release_short, self.pre_release)
def __str__(self):
"""
Implementation of __str__ enables customization of how an object instance is 'printed' when used as
an argument of print().
"""
if self.release_short == "r":
return "{}.{}.{}".format(self.major, self.minor, self.maintenance)
else:
return "{}.{}.{}{}{}".format(self.major, self.minor, self.maintenance, self.release_short, self.pre_release)
def __format__(self, *args):
return self.__str__()
def __len__(self):
"""Implementation enables the builtin len() function to return the length of the string representation."""
return len(self.__str__())
def __eq__(self, other):
"""Implenentation enables the rich comparison operator '=='."""
if isinstance(other, self.__class__):
return int(self.major) == int(other.major) and \
int(self.minor) == int(other.minor) and \
int(self.maintenance) == int(other.maintenance) and \
int(self.release_numerical) == int(other.release_numerical) and \
int(self.pre_release) == int(other.pre_release)
raise ProgramVersionInvalidComparisonError(type(other))
def __ne__(self, other):
"""Implementation enables the rich comparison operator '!='."""
if isinstance(other, self.__class__):
return not self.__eq__(other)
raise ProgramVersionInvalidComparisonError(type(other))
def __lt__(self, other):
"""
Implementation enables the rich comparison operator '<'.
Python 2.5+ will infer the other operators, le, gt, and ge.
"""
if isinstance(other, self.__class__):
return int(other.major) > int(self.major) \
or (int(other.major) == int(self.major) and
int(other.minor) > int(self.minor)) \
or (int(other.major) == int(self.major) and
int(other.minor) == int(self.minor) and
int(other.maintenance) > int(self.maintenance)) \
or (int(other.major) == int(self.major) and
int(other.minor) == int(self.minor) and
int(other.maintenance) == int(self.maintenance) and
other.release_numerical > self.release_numerical) \
or (int(other.major) == int(self.major) and
int(other.minor) == int(self.minor) and
int(other.maintenance) == int(self.maintenance) and
other.release_numerical == self.release_numerical and
int(other.pre_release) > int(self.pre_release))
raise ProgramVersionInvalidComparisonError(type(other))
class ProgramVersionError(Exception):
"""Base class for exceptions in this module."""
pass
class ProgramVersionParsingError(ProgramVersionError):
"""An invalid version string was encountered."""
def __init__(self, version_string, *args):
message = "Invalid version string: '{}'. Example correct version string: 1.0.0rc1." \
" For formatting details see PEP 440.".format(version_string)
super(ProgramVersionParsingError, self).__init__(message, *args)
class ProgramVersionInvalidComparisonError(ProgramVersionError):
"""A ProgramVersion was compared with another object of a different type."""
def __init__(self, other_type, *args):
message = "Invalid comparison of 'ProgramVersion' object with object of type '{}'.".format(other_type)
super(ProgramVersionInvalidComparisonError, self).__init__(message, *args)
| 44.648485
| 120
| 0.628478
|
41e4871aaed7e8c72fd67d9cb0161ef240bbca69
| 3,081
|
py
|
Python
|
tests/MyTestCase.py
|
mribrgr/StuRa-Mitgliederdatenbank
|
87a261d66c279ff86056e315b05e6966b79df9fa
|
[
"MIT"
] | 8
|
2019-11-26T13:34:46.000Z
|
2021-06-21T13:41:57.000Z
|
src/tests/MyTestCase.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | 93
|
2019-12-16T09:29:10.000Z
|
2021-04-24T12:03:33.000Z
|
src/tests/MyTestCase.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | 2
|
2020-12-03T12:43:19.000Z
|
2020-12-22T21:48:47.000Z
|
import csv
from platform import system
from selenium import webdriver
from django.contrib.auth import get_user_model
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from aemter.models import Funktion, Unterbereich, Organisationseinheit
import importscripts.main as imp
class MyTestCase(StaticLiveServerTestCase):
"""
Setup and Teardown funktions are specified here.
The following Testcases inherit from this class.
All testcases inheriting from this class are testing the User Interface.
"""
# befor every test funktion
def setUp(self):
"""
This function is called before every testcase.
It sets up the webdriver and creates 1 admin and 1 user.
You can adjust the webdriver by changing the *options* parameter.
The Importscripts from the folder *importscripts* are also called here.
The Webdriver Instance is stored in **self.browser**.
:param self:
:type self:
:return: No return Value
"""
# Auskommentieren bei localen tests
options = webdriver.FirefoxOptions()
options.headless = True
options.add_argument("--no-sandbox") # bypass OS security model
options.add_argument("--disable-dev-shm-usage") # overcome limited resource problems
try:
if system() == 'Windows':
self.browser = webdriver.Firefox(
executable_path='tests/firefoxdriver-win64/geckodriver.exe',
firefox_options=options,
service_log_path='django.log',
keep_alive=True
)
pass
if system() == 'Linux':
self.browser = webdriver.Firefox(
executable_path='tests/firefoxdriver-linux64/geckodriver',
firefox_options=options,
service_log_path='django.log',
keep_alive=True
)
pass
self.browser.implicitly_wait(2)
except BaseException as e:
print("konnte keine Webdriver-Instanz bekommen")
print(e)
# Hinzufügen von Admin
user = get_user_model().objects.create_superuser(
username='testlukasadmin', password='0123456789test')
# Hinzufügen von Nutzern
user = get_user_model().objects.create_user(
username='testlukas', password='0123456789test')
# Hinzufügen von Ämter - über Importscript
file = open("importscripts/ReferateUnterbereicheAemter.csv", encoding="utf-8")
imp.importAemter(file)
file.close()
pass
# after every test funktion
def tearDown(self):
"""
This function is called after every testcase.
The Webdriver Instance that is stored in **self.browser** will be closed.
:param self:
:type self:
:return: No return Value
"""
self.browser.quit()
pass
| 33.857143
| 92
| 0.605972
|
72a632c8adf92ea637f8cf8bea4689960dc36fd2
| 6,019
|
py
|
Python
|
designate/context.py
|
Woody89/designate-private
|
0a6ed5a1d7cdac5cb1e9dec8fd3ddfb9a77c58f5
|
[
"Apache-2.0"
] | null | null | null |
designate/context.py
|
Woody89/designate-private
|
0a6ed5a1d7cdac5cb1e9dec8fd3ddfb9a77c58f5
|
[
"Apache-2.0"
] | null | null | null |
designate/context.py
|
Woody89/designate-private
|
0a6ed5a1d7cdac5cb1e9dec8fd3ddfb9a77c58f5
|
[
"Apache-2.0"
] | 1
|
2019-11-16T10:55:49.000Z
|
2019-11-16T10:55:49.000Z
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import copy
from oslo_context import context
from oslo_log import log as logging
from designate import policy
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
class DesignateContext(context.RequestContext):
_all_tenants = False
_hide_counts = False
_abandon = None
original_tenant = None
_edit_managed_records = False
_client_addr = None
def __init__(self, service_catalog=None, all_tenants=False, abandon=None,
tsigkey_id=None, user_identity=None, original_tenant=None,
edit_managed_records=False, hide_counts=False,
client_addr=None, **kwargs):
# NOTE: user_identity may be passed in, but will be silently dropped as
# it is a generated field based on several others.
super(DesignateContext, self).__init__(**kwargs)
self.service_catalog = service_catalog
self.tsigkey_id = tsigkey_id
self.original_tenant = original_tenant
self.all_tenants = all_tenants
self.abandon = abandon
self.edit_managed_records = edit_managed_records
self.hide_counts = hide_counts
self.client_addr = client_addr
def deepcopy(self):
d = self.to_dict()
return self.from_dict(d)
def to_dict(self):
d = super(DesignateContext, self).to_dict()
# Override the user_identity field to account for TSIG. When a TSIG key
# is used as authentication e.g. via MiniDNS, it will act as a form
# of "user",
user = self.user or '-'
if self.tsigkey_id and not self.user:
user = 'TSIG:%s' % self.tsigkey_id
user_idt = (
self.user_idt_format.format(user=user,
tenant=self.tenant or '-',
domain=self.domain or '-',
user_domain=self.user_domain or '-',
p_domain=self.project_domain or '-'))
# Update the dict with Designate specific extensions and overrides
d.update({
'user_identity': user_idt,
'original_tenant': self.original_tenant,
'service_catalog': self.service_catalog,
'all_tenants': self.all_tenants,
'abandon': self.abandon,
'edit_managed_records': self.edit_managed_records,
'tsigkey_id': self.tsigkey_id,
'hide_counts': self.hide_counts,
'client_addr': self.client_addr,
})
return copy.deepcopy(d)
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, show_deleted=None, all_tenants=False,
edit_managed_records=False):
"""Return a version of this context with admin flag set.
Optionally set all_tenants and edit_managed_records
"""
context = self.deepcopy()
context.is_admin = True
# NOTE(kiall): Ugly - required to match http://tinyurl.com/o3y8qmw
context.roles.append('admin')
if show_deleted is not None:
context.show_deleted = show_deleted
if all_tenants:
context.all_tenants = True
if edit_managed_records:
context.edit_managed_records = True
return context
def sudo(self, tenant):
policy.check('use_sudo', self)
LOG.info(_LI('Accepted sudo from user %(user)s to tenant %(tenant)s'),
{'user': self.user, 'tenant': tenant})
self.original_tenant = self.tenant
self.tenant = tenant
@classmethod
def get_admin_context(cls, **kwargs):
# TODO(kiall): Remove Me
kwargs['is_admin'] = True
kwargs['roles'] = ['admin']
return cls(None, **kwargs)
@classmethod
def get_context_from_function_and_args(cls, function, args, kwargs):
"""
Find an arg of type DesignateContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, cls):
return arg
return None
@property
def all_tenants(self):
return self._all_tenants
@all_tenants.setter
def all_tenants(self, value):
if value:
policy.check('all_tenants', self)
self._all_tenants = value
@property
def hide_counts(self):
return self._hide_counts
@hide_counts.setter
def hide_counts(self, value):
self._hide_counts = value
@property
def abandon(self):
return self._abandon
@abandon.setter
def abandon(self, value):
if value:
policy.check('abandon_zone', self)
self._abandon = value
@property
def edit_managed_records(self):
return self._edit_managed_records
@edit_managed_records.setter
def edit_managed_records(self, value):
if value:
policy.check('edit_managed_records', self)
self._edit_managed_records = value
@property
def client_addr(self):
return self._client_addr
@client_addr.setter
def client_addr(self, value):
self._client_addr = value
def get_current():
return context.get_current()
| 29.79703
| 79
| 0.629839
|
bc933805857fcf886ad7b3b093c08fbd44d82755
| 60
|
py
|
Python
|
src/practice/20_framework/rl/types/__init__.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | 1
|
2019-10-06T11:45:52.000Z
|
2019-10-06T11:45:52.000Z
|
src/practice/20_framework/rl/types/__init__.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | null | null | null |
src/practice/20_framework/rl/types/__init__.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | null | null | null |
from .episode import Episode
from .episodes import Episodes
| 20
| 30
| 0.833333
|
87a8e6b17ad85194a0e8096d489e0eb6d85f0acb
| 2,325
|
py
|
Python
|
maupassant/tensorflow_helper/layers_helper.py
|
Jwuthri/TextToolKit
|
5feb87f9818932cf2f67b404a9a088b785ec5287
|
[
"MIT"
] | 2
|
2021-01-14T13:56:36.000Z
|
2021-01-14T18:41:43.000Z
|
maupassant/tensorflow_helper/layers_helper.py
|
Jwuthri/TextToolKit
|
5feb87f9818932cf2f67b404a9a088b785ec5287
|
[
"MIT"
] | 4
|
2020-03-25T18:07:29.000Z
|
2022-02-09T23:40:14.000Z
|
maupassant/tensorflow_helper/layers_helper.py
|
Jwuthri/TextToolKit
|
5feb87f9818932cf2f67b404a9a088b785ec5287
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
def text_to_layer(block, unit, return_sequences=False):
"""Build tensorflow layer, easily."""
layer = None
if block == "CNN":
layer = tf.keras.layers.Conv1D(unit, kernel_size=1, strides=1, padding='same', activation='relu')
elif block == "LCNN":
layer = tf.keras.layers.LocallyConnected1D(unit, kernel_size=1, strides=1, padding='valid', activation='relu')
elif block == "BiLSTM":
layer = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(unit, activation="relu", return_sequences=return_sequences))
elif block == "BiGRU":
layer = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(unit, activation="relu", return_sequences=return_sequences))
elif block == "BiRNN":
layer = tf.keras.layers.Bidirectional(tf.keras.layers.SimpleRNN(unit, activation="relu", return_sequences=return_sequences))
elif block == "CudaLSTM":
layer = tf.compat.v1.keras.layers.CuDNNLSTM(unit, return_sequences=return_sequences)
elif block == "LSTM":
layer = tf.keras.layers.LSTM(unit, activation='relu', return_sequences=return_sequences)
elif block == "GRU":
layer = tf.keras.layers.GRU(unit, activation='relu', return_sequences=return_sequences)
elif block == "RNN":
layer = tf.keras.layers.SimpleRNN(unit, activation='relu', return_sequences=return_sequences)
elif block == "DENSE":
layer = tf.keras.layers.Dense(unit, activation="relu")
elif block == "TIME_DISTRIB_DENSE":
layer = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(unit, activation="relu"))
elif block == "FLATTEN":
layer = tf.keras.layers.Flatten()
elif block == "RESHAPE":
layer = tf.keras.layers.Reshape(target_shape=unit)
elif block == "DROPOUT":
layer = tf.keras.layers.Dropout(unit)
elif block == "SPATIAL_DROPOUT":
layer = tf.keras.layers.SpatialDropout1D(unit)
elif block == "GLOBAL_MAX_POOL":
layer = tf.keras.layers.GlobalMaxPooling1D()
elif block == "MAX_POOL":
layer = tf.keras.layers.MaxPool1D(pool_size=unit)
elif block == "GLOBAL_AVERAGE_POOL":
layer = tf.keras.layers.GlobalAveragePooling1D()
elif block == "AVERAGE_POOL":
layer = tf.keras.layers.AveragePooling1D(pool_size=unit)
return layer
| 48.4375
| 132
| 0.683441
|
f1404eaaeb32786c9b0abc7f9896fe843a9e8d22
| 12,356
|
py
|
Python
|
great_expectations/data_context/types/resource_identifiers.py
|
MajorDaxx/great_expectations
|
5c4aad62292a0f83316960c54250d9ceeb89d5a8
|
[
"Apache-2.0"
] | 1
|
2022-03-16T22:09:49.000Z
|
2022-03-16T22:09:49.000Z
|
great_expectations/data_context/types/resource_identifiers.py
|
draev/great_expectations
|
317e15ee7e50f6e0d537b62154177440f33b795d
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/data_context/types/resource_identifiers.py
|
draev/great_expectations
|
317e15ee7e50f6e0d537b62154177440f33b795d
|
[
"Apache-2.0"
] | null | null | null |
import logging
import warnings
from typing import Optional, Union
from uuid import UUID
from dateutil.parser import parse
from great_expectations.core.data_context_key import DataContextKey
from great_expectations.core.id_dict import BatchKwargs, IDDict
from great_expectations.core.run_identifier import RunIdentifier, RunIdentifierSchema
from great_expectations.exceptions import DataContextError, InvalidDataContextKeyError
from great_expectations.marshmallow__shade import Schema, fields, post_load
logger = logging.getLogger(__name__)
class ExpectationSuiteIdentifier(DataContextKey):
def __init__(self, expectation_suite_name: str):
super().__init__()
if not isinstance(expectation_suite_name, str):
raise InvalidDataContextKeyError(
f"expectation_suite_name must be a string, not {type(expectation_suite_name).__name__}"
)
self._expectation_suite_name = expectation_suite_name
@property
def expectation_suite_name(self):
return self._expectation_suite_name
def to_tuple(self):
return tuple(self.expectation_suite_name.split("."))
def to_fixed_length_tuple(self):
return (self.expectation_suite_name,)
@classmethod
def from_tuple(cls, tuple_):
return cls(".".join(tuple_))
@classmethod
def from_fixed_length_tuple(cls, tuple_):
return cls(expectation_suite_name=tuple_[0])
def __repr__(self):
return f"{self.__class__.__name__}::{self._expectation_suite_name}"
class ExpectationSuiteIdentifierSchema(Schema):
expectation_suite_name = fields.Str()
# noinspection PyUnusedLocal
@post_load
def make_expectation_suite_identifier(self, data, **kwargs):
return ExpectationSuiteIdentifier(**data)
class BatchIdentifier(DataContextKey):
"""A BatchIdentifier tracks"""
def __init__(
self,
batch_identifier: Union[BatchKwargs, dict, str],
data_asset_name: str = None,
):
super().__init__()
# if isinstance(batch_identifier, (BatchKwargs, dict)):
# self._batch_identifier = batch_identifier.batch_fingerprint
self._batch_identifier = batch_identifier
self._data_asset_name = data_asset_name
@property
def batch_identifier(self):
return self._batch_identifier
@property
def data_asset_name(self):
return self._data_asset_name
def to_tuple(self):
return (self.batch_identifier,)
@classmethod
def from_tuple(cls, tuple_):
return cls(batch_identifier=tuple_[0])
class BatchIdentifierSchema(Schema):
batch_identifier = fields.Str()
data_asset_name = fields.Str()
# noinspection PyUnusedLocal
@post_load
def make_batch_identifier(self, data, **kwargs):
return BatchIdentifier(**data)
class ValidationResultIdentifier(DataContextKey):
"""A ValidationResultIdentifier identifies a validation result by the fully-qualified expectation_suite_identifier
and run_id.
"""
def __init__(self, expectation_suite_identifier, run_id, batch_identifier):
"""Constructs a ValidationResultIdentifier
Args:
expectation_suite_identifier (ExpectationSuiteIdentifier, list, tuple, or dict):
identifying information for the fully-qualified expectation suite used to validate
run_id (RunIdentifier): The run_id for which validation occurred
"""
super().__init__()
self._expectation_suite_identifier = expectation_suite_identifier
if isinstance(run_id, str):
warnings.warn(
"String run_ids will be deprecated in the future. Please provide a run_id of type "
"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name "
"and run_time (both optional).",
DeprecationWarning,
)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
run_time = None
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif run_id is None:
run_id = RunIdentifier()
elif not isinstance(run_id, RunIdentifier):
run_id = RunIdentifier(run_name=str(run_id))
self._run_id = run_id
self._batch_identifier = batch_identifier
@property
def expectation_suite_identifier(self) -> ExpectationSuiteIdentifier:
return self._expectation_suite_identifier
@property
def run_id(self):
return self._run_id
@property
def batch_identifier(self):
return self._batch_identifier
def to_tuple(self):
return tuple(
list(self.expectation_suite_identifier.to_tuple())
+ list(self.run_id.to_tuple())
+ [self.batch_identifier or "__none__"]
)
def to_fixed_length_tuple(self):
return tuple(
[self.expectation_suite_identifier.expectation_suite_name]
+ list(self.run_id.to_tuple())
+ [self.batch_identifier or "__none__"]
)
@classmethod
def from_tuple(cls, tuple_):
return cls(
ExpectationSuiteIdentifier.from_tuple(tuple_[0:-3]),
RunIdentifier.from_tuple((tuple_[-3], tuple_[-2])),
tuple_[-1],
)
@classmethod
def from_fixed_length_tuple(cls, tuple_):
return cls(
ExpectationSuiteIdentifier(tuple_[0]),
RunIdentifier.from_tuple((tuple_[1], tuple_[2])),
tuple_[3],
)
@classmethod
def from_object(cls, validation_result):
batch_kwargs = validation_result.meta.get("batch_kwargs", {})
if isinstance(batch_kwargs, IDDict):
batch_identifier = batch_kwargs.to_id()
elif isinstance(batch_kwargs, dict):
batch_identifier = IDDict(batch_kwargs).to_id()
else:
raise DataContextError(
"Unable to construct ValidationResultIdentifier from provided object."
)
return cls(
expectation_suite_identifier=ExpectationSuiteIdentifier(
validation_result.meta["expectation_suite_name"]
),
run_id=validation_result.meta.get("run_id"),
batch_identifier=batch_identifier,
)
class GeCloudIdentifier(DataContextKey):
def __init__(self, resource_type: str, ge_cloud_id: Optional[str] = None):
super().__init__()
self._resource_type = resource_type
self._ge_cloud_id = ge_cloud_id if ge_cloud_id is not None else ""
@property
def resource_type(self):
return self._resource_type
@resource_type.setter
def resource_type(self, value):
self._resource_type = value
@property
def ge_cloud_id(self):
return self._ge_cloud_id
@ge_cloud_id.setter
def ge_cloud_id(self, value):
self._ge_cloud_id = value
def to_tuple(self):
return (self.resource_type, self.ge_cloud_id)
def to_fixed_length_tuple(self):
return self.to_tuple()
@classmethod
def from_tuple(cls, tuple_):
return cls(resource_type=tuple_[0], ge_cloud_id=tuple_[1])
@classmethod
def from_fixed_length_tuple(cls, tuple_):
return cls.from_tuple(tuple_)
def __repr__(self):
return f"{self.__class__.__name__}::{self.resource_type}::{self.ge_cloud_id}"
class ValidationResultIdentifierSchema(Schema):
expectation_suite_identifier = fields.Nested(
ExpectationSuiteIdentifierSchema,
required=True,
error_messages={
"required": "expectation_suite_identifier is required for a ValidationResultIdentifier"
},
)
run_id = fields.Nested(
RunIdentifierSchema,
required=True,
error_messages={
"required": "run_id is required for a " "ValidationResultIdentifier"
},
)
batch_identifier = fields.Nested(BatchIdentifierSchema, required=True)
# noinspection PyUnusedLocal
@post_load
def make_validation_result_identifier(self, data, **kwargs):
return ValidationResultIdentifier(**data)
class SiteSectionIdentifier(DataContextKey):
def __init__(self, site_section_name, resource_identifier):
self._site_section_name = site_section_name
if site_section_name in ["validations", "profiling"]:
if isinstance(resource_identifier, ValidationResultIdentifier):
self._resource_identifier = resource_identifier
elif isinstance(resource_identifier, (tuple, list)):
self._resource_identifier = ValidationResultIdentifier(
*resource_identifier
)
else:
self._resource_identifier = ValidationResultIdentifier(
**resource_identifier
)
elif site_section_name == "expectations":
if isinstance(resource_identifier, ExpectationSuiteIdentifier):
self._resource_identifier = resource_identifier
elif isinstance(resource_identifier, (tuple, list)):
self._resource_identifier = ExpectationSuiteIdentifier(
*resource_identifier
)
else:
self._resource_identifier = ExpectationSuiteIdentifier(
**resource_identifier
)
else:
raise InvalidDataContextKeyError(
"SiteSectionIdentifier only supports 'validations' and 'expectations' as site section names"
)
@property
def site_section_name(self):
return self._site_section_name
@property
def resource_identifier(self):
return self._resource_identifier
def to_tuple(self):
site_section_identifier_tuple_list = [self.site_section_name] + list(
self.resource_identifier.to_tuple()
)
return tuple(site_section_identifier_tuple_list)
@classmethod
def from_tuple(cls, tuple_):
if tuple_[0] == "validations":
return cls(
site_section_name=tuple_[0],
resource_identifier=ValidationResultIdentifier.from_tuple(tuple_[1:]),
)
elif tuple_[0] == "expectations":
return cls(
site_section_name=tuple_[0],
resource_identifier=ExpectationSuiteIdentifier.from_tuple(tuple_[1:]),
)
else:
raise InvalidDataContextKeyError(
"SiteSectionIdentifier only supports 'validations' and 'expectations' as site section names"
)
class ConfigurationIdentifier(DataContextKey):
def __init__(self, configuration_key: Union[str, UUID]):
super().__init__()
if isinstance(configuration_key, UUID):
configuration_key = str(configuration_key)
if not isinstance(configuration_key, str):
raise InvalidDataContextKeyError(
f"configuration_key must be a string, not {type(configuration_key).__name__}"
)
self._configuration_key = configuration_key
@property
def configuration_key(self) -> str:
return self._configuration_key
def to_tuple(self):
return tuple(self.configuration_key.split("."))
def to_fixed_length_tuple(self):
return (self.configuration_key,)
@classmethod
def from_tuple(cls, tuple_):
return cls(".".join(tuple_))
@classmethod
def from_fixed_length_tuple(cls, tuple_):
return cls(configuration_key=tuple_[0])
def __repr__(self):
return f"{self.__class__.__name__}::{self._configuration_key}"
class ConfigurationIdentifierSchema(Schema):
configuration_key = fields.Str()
# noinspection PyUnusedLocal
@post_load
def make_configuration_identifier(self, data, **kwargs):
return ConfigurationIdentifier(**data)
expectationSuiteIdentifierSchema = ExpectationSuiteIdentifierSchema()
validationResultIdentifierSchema = ValidationResultIdentifierSchema()
runIdentifierSchema = RunIdentifierSchema()
batchIdentifierSchema = BatchIdentifierSchema()
configurationIdentifierSchema = ConfigurationIdentifierSchema()
| 33.394595
| 118
| 0.670282
|
cf610b34a2a5b2c0f9952c7261b8ea3e71a49096
| 1,404
|
py
|
Python
|
geetools/collection/group.py
|
carderne/gee_tools
|
4003e75ffb0ffefc9f41b1a34d849eebdb486161
|
[
"MIT"
] | 1
|
2019-10-22T10:11:52.000Z
|
2019-10-22T10:11:52.000Z
|
geetools/collection/group.py
|
carderne/gee_tools
|
4003e75ffb0ffefc9f41b1a34d849eebdb486161
|
[
"MIT"
] | null | null | null |
geetools/collection/group.py
|
carderne/gee_tools
|
4003e75ffb0ffefc9f41b1a34d849eebdb486161
|
[
"MIT"
] | 1
|
2020-01-09T07:41:04.000Z
|
2020-01-09T07:41:04.000Z
|
# coding=utf-8
""" Group of collections """
from . import getCommonBands, rescale
from datetime import date
class CollectionGroup(object):
def __init__(self, *args):
self.collections = args
@property
def ids(self):
return [col.id for col in self.collections]
def commonBands(self, reference='all', match='id'):
""" Get a list of the bands that exist in all collections """
return getCommonBands(*self.collections, reference=reference,
match=match)
def scales(self):
""" Get the minimum scale value that takes evey common band """
scales = {}
common = self.commonBands()
for band in common:
band_scales = []
for collection in self.collections:
scale = collection.scales()[band]
band_scales.append(scale)
scales[band] = {'min': min(band_scales),
'max': max(band_scales)}
return scales
def start_date(self):
""" Get the absolute start date for the group """
starts = [date.fromisoformat(c.start_date) for c in self.collections]
return min(starts).isoformat()
def end_date(self):
""" Get the absolute end date for the group """
starts = [date.fromisoformat(c.end_date) for c in self.collections]
return max(starts).isoformat()
| 33.428571
| 77
| 0.597578
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.