hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fcebc9f17eecf6f68658b4ad13dd86f6792240bb | 30,711 | py | Python | lrs/utils/req_validate.py | learningturf/lt_lrs_docker | acb80cf0a18f3815213d15d39d90a949c7675a27 | [
"Apache-2.0"
] | null | null | null | lrs/utils/req_validate.py | learningturf/lt_lrs_docker | acb80cf0a18f3815213d15d39d90a949c7675a27 | [
"Apache-2.0"
] | null | null | null | lrs/utils/req_validate.py | learningturf/lt_lrs_docker | acb80cf0a18f3815213d15d39d90a949c7675a27 | [
"Apache-2.0"
] | null | null | null | import json
from isodate.isodatetime import parse_datetime
from isodate.isoerror import ISO8601Error
import uuid
from . import get_agent_ifp
from authorization import auth
from StatementValidator import StatementValidator
from ..models import Statement, Agent, Activity, ActivityState, ActivityProfile, AgentProfile
from ..exceptions import ParamConflict, ParamError, Forbidden, NotFound, BadRequest, IDNotFoundError
def check_for_existing_statementId(stmtID):
return Statement.objects.filter(statement_id=stmtID).exists()
def check_for_no_other_params_supplied(query_dict):
supplied = True
if len(query_dict) <= 1:
supplied = False
return supplied
# Extra agent validation for state and profile
def validate_oauth_state_or_profile_agent(req_dict, endpoint):
ag = req_dict['params']['agent']
token = req_dict['auth']['oauth_token']
scopes = token.scope_to_list()
if not 'all' in scopes:
if not isinstance(ag, dict):
ag = json.loads(ag)
try:
agent = Agent.objects.get(**ag)
except Agent.DoesNotExist:
err_msg = "Agent in %s cannot be found to match user in authorization" % endpoint
raise NotFound(err_msg)
if not agent in req_dict['auth']['agent'].member.all():
err_msg = "Authorization doesn't match agent in %s" % endpoint
raise Forbidden(err_msg)
def validate_void_statement(void_id):
# Retrieve statement, check if the verb is 'voided' - if not then set the voided flag to true else return error
# since you cannot unvoid a statement and should just reissue the statement under a new ID.
stmts = Statement.objects.filter(statement_id=void_id)
if len(stmts) > 1:
raise IDNotFoundError("Something went wrong. %s statements found with id %s" % (len(stmts), void_id))
elif len(stmts) == 1:
if stmts[0].voided:
err_msg = "Statement with ID: %s is already voided, cannot unvoid. Please re-issue the statement under a new ID." % void_id
raise BadRequest(err_msg)
def validate_body(body, auth, payload_sha2s, content_type):
[server_validate_statement(stmt, auth, payload_sha2s, content_type) for stmt in body]
def server_validate_statement(stmt, auth, payload_sha2s, content_type):
if 'id' in stmt:
statement_id = stmt['id']
if check_for_existing_statementId(statement_id):
err_msg = "A statement with ID %s already exists" % statement_id
raise ParamConflict(err_msg)
if stmt['verb']['id'] == 'http://adlnet.gov/expapi/verbs/voided':
validate_void_statement(stmt['object']['id'])
if 'attachments' in stmt:
attachment_data = stmt['attachments']
validate_attachments(attachment_data, payload_sha2s, content_type)
@auth
def statements_post(req_dict):
if req_dict['params'].keys():
raise ParamError("The post statements request contained unexpected parameters: %s" % ", ".join(req_dict['params'].keys()))
try:
validator = StatementValidator(req_dict['body'])
validator.validate()
except Exception, e:
raise BadRequest(e.message)
except ParamError, e:
raise ParamError(e.message)
if isinstance(req_dict['body'], dict):
body = [req_dict['body']]
else:
body = req_dict['body']
validate_body(body, req_dict['auth'], req_dict.get('payload_sha2s', None), req_dict['headers']['CONTENT_TYPE'])
return req_dict
@auth
def statements_more_get(req_dict):
if not 'more_id' in req_dict:
err_msg = "Missing more_id while trying to hit /more endpoint"
raise ParamError(err_msg)
return req_dict
def validate_statementId(req_dict):
if 'statementId' in req_dict['params'] and 'voidedStatementId' in req_dict['params']:
err_msg = "Cannot have both statementId and voidedStatementId in a GET request"
raise ParamError(err_msg)
elif 'statementId' in req_dict['params']:
statementId = req_dict['params']['statementId']
voided = False
else:
statementId = req_dict['params']['voidedStatementId']
voided = True
not_allowed = ["agent", "verb", "activity", "registration",
"related_activities", "related_agents", "since",
"until", "limit", "ascending"]
bad_keys = set(not_allowed) & set(req_dict['params'].keys())
if bad_keys:
err_msg = "Cannot have %s in a GET request only 'format' and/or 'attachments' are allowed with 'statementId' and 'voidedStatementId'" % ', '.join(bad_keys)
raise ParamError(err_msg)
# Try to retrieve stmt, if DNE then return empty else return stmt info
try:
uuidId = uuid.UUID(str(statementId))
st = Statement.objects.get(statement_id=uuidId)
except (Statement.DoesNotExist, ValueError):
err_msg = 'There is no statement associated with the id: %s' % statementId
raise IDNotFoundError(err_msg)
auth = req_dict.get('auth', None)
mine_only = auth and 'statements_mine_only' in auth
if auth['agent']:
if mine_only and st.authority.id != auth['agent'].id:
err_msg = "Incorrect permissions to view statements"
raise Forbidden(err_msg)
if st.voided != voided:
if st.voided:
err_msg = 'The requested statement (%s) is voided. Use the "voidedStatementId" parameter to retrieve your statement.' % statementId
else:
err_msg = 'The requested statement (%s) is not voided. Use the "statementId" parameter to retrieve your statement.' % statementId
raise IDNotFoundError(err_msg)
return statementId
@auth
def statements_get(req_dict):
rogueparams = set(req_dict['params']) - set(["statementId","voidedStatementId","agent", "verb", "activity", "registration",
"related_activities", "related_agents", "since",
"until", "limit", "format", "attachments", "ascending"])
if rogueparams:
raise ParamError("The get statements request contained unexpected parameters: %s" % ", ".join(rogueparams))
formats = ['exact', 'canonical', 'ids']
if 'params' in req_dict and 'format' in req_dict['params']:
if req_dict['params']['format'] not in formats:
raise ParamError("The format filter value (%s) was not one of the known values: %s" % (req_dict['params']['format'], ','.join(formats)))
else:
req_dict['params']['format'] = 'exact'
# StatementId could be for voided statement as well
if 'params' in req_dict and ('statementId' in req_dict['params'] or 'voidedStatementId' in req_dict['params']):
req_dict['statementId'] = validate_statementId(req_dict)
if 'since' in req_dict['params']:
try:
parse_datetime(req_dict['params']['since'])
except (Exception, ISO8601Error):
raise ParamError("Since parameter was not a valid ISO8601 timestamp")
if 'until' in req_dict['params']:
try:
parse_datetime(req_dict['params']['until'])
except (Exception, ISO8601Error):
raise ParamError("Until parameter was not a valid ISO8601 timestamp")
# Django converts all query values to string - make boolean depending on if client wants attachments or not
# Only need to do this in GET b/c GET/more will have it saved in pickle information
if 'params' in req_dict and 'attachments' in req_dict['params']:
if req_dict['params']['attachments'].lower() == 'true':
req_dict['params']['attachments'] = True
else:
req_dict['params']['attachments'] = False
else:
req_dict['params']['attachments'] = False
return req_dict
@auth
def statements_put(req_dict):
# Find any unexpected parameters
rogueparams = set(req_dict['params']) - set(["statementId"])
if rogueparams:
raise ParamError("The put statements request contained unexpected parameters: %s" % ", ".join(rogueparams))
# Statement id can must be supplied in query param. If in the body too, it must be the same
if not 'statementId' in req_dict['params']:
raise ParamError("Error -- statements - method = %s, but no statementId parameter or ID given in statement" % req_dict['method'])
else:
statement_id = req_dict['params']['statementId']
# Try to get id if in body
try:
statement_body_id = req_dict['body']['id']
except Exception, e:
statement_body_id = None
# If ids exist in both places, check if they are equal
if statement_body_id and statement_id != statement_body_id:
raise ParamError("Error -- statements - method = %s, param and body ID both given, but do not match" % req_dict['method'])
# Set id inside of statement with param id
if not statement_body_id:
req_dict['body']['id'] = statement_id
# If there are no other params-raise param error since nothing else is supplied
if not check_for_no_other_params_supplied(req_dict['body']):
raise ParamError("No other params are supplied with statementId.")
# Validate statement in body
try:
validator = StatementValidator(req_dict['body'])
validator.validate()
except Exception, e:
raise BadRequest(e.message)
except ParamError, e:
raise ParamError(e.message)
validate_body([req_dict['body']], req_dict['auth'], req_dict.get('payload_sha2s', None), req_dict['headers']['CONTENT_TYPE'])
return req_dict
def validate_attachments(attachment_data, payload_sha2s, content_type):
if "multipart/mixed" in content_type:
for attachment in attachment_data:
# If the attachment data has a sha2 field, must validate it against the payload data
if 'sha2' in attachment:
sha2 = attachment['sha2']
# Check if the sha2 field is a key in the payload dict
if payload_sha2s:
if not sha2 in payload_sha2s and not 'fileUrl' in attachment:
err_msg = "Could not find attachment payload with sha: %s" % sha2
raise ParamError(err_msg)
else:
if not 'fileUrl' in attachment:
raise BadRequest("Missing X-Experience-API-Hash field in header")
elif "application/json" == content_type:
for attachment in attachment_data:
if not 'fileUrl' in attachment:
raise BadRequest("When sending statements with attachments as 'application/json', you must include fileUrl field")
else:
raise BadRequest('Invalid Content-Type %s when sending statements with attachments' % content_type)
@auth
def activity_state_post(req_dict):
rogueparams = set(req_dict['params']) - set(["activityId", "agent", "stateId", "registration"])
if rogueparams:
raise ParamError("The post activity state request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'activityId' in req_dict['params']:
validator.validate_iri(req_dict['params']['activityId'], "activityId param for activity state")
else:
err_msg = "Error -- activity_state - method = %s, but activityId parameter is missing.." % req_dict['method']
raise ParamError(err_msg)
if not 'stateId' in req_dict['params']:
err_msg = "Error -- activity_state - method = %s, but stateId parameter is missing.." % req_dict['method']
raise ParamError(err_msg)
if 'registration' in req_dict['params']:
validator.validate_uuid(req_dict['params']['registration'], "registration param for activity state")
if 'agent' in req_dict['params']:
try:
agent = json.loads(req_dict['params']['agent'])
req_dict['params']['agent'] = agent
except Exception:
raise ParamError("agent param for activity state is not valid")
validator.validate_agent(agent, "Activity state agent param")
else:
err_msg = "Error -- activity_state - method = %s, but agent parameter is missing.." % req_dict['method']
raise ParamError(err_msg)
# Must have body included for state
if 'body' not in req_dict:
err_msg = "Could not find the state"
raise ParamError(err_msg)
# Extra validation if oauth
if req_dict['auth']['type'] == 'oauth':
validate_oauth_state_or_profile_agent(req_dict, "state")
# Check the content type if the document already exists
registration = req_dict['params'].get('registration', None)
agent = req_dict['params']['agent']
a = Agent.objects.retrieve_or_create(**agent)[0]
exists = False
if registration:
try:
s = ActivityState.objects.get(state_id=req_dict['params']['stateId'], agent=a,
activity_id=req_dict['params']['activityId'], registration_id=req_dict['params']['registration'])
exists = True
except ActivityState.DoesNotExist:
pass
else:
try:
s = ActivityState.objects.get(state_id=req_dict['params']['stateId'], agent=a,
activity_id=req_dict['params']['activityId'])
exists = True
except ActivityState.DoesNotExist:
pass
if exists:
if str(s.content_type) != "application/json" or ("application/json" not in req_dict['headers']['CONTENT_TYPE'] or \
req_dict['headers']['CONTENT_TYPE'] != "application/json"):
raise ParamError("Neither original document or document to be posted has a Content-Type of 'application/json'")
# Set state
req_dict['state'] = req_dict.pop('raw_body', req_dict.pop('body', None))
return req_dict
@auth
def activity_state_put(req_dict):
rogueparams = set(req_dict['params']) - set(["activityId", "agent", "stateId", "registration"])
if rogueparams:
raise ParamError("The put activity state request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'activityId' in req_dict['params']:
validator.validate_iri(req_dict['params']['activityId'], "activityId param for activity state")
else:
err_msg = "Error -- activity_state - method = %s, but activityId parameter is missing.." % req_dict['method']
raise ParamError(err_msg)
if not 'stateId' in req_dict['params']:
err_msg = "Error -- activity_state - method = %s, but stateId parameter is missing.." % req_dict['method']
raise ParamError(err_msg)
if 'registration' in req_dict['params']:
validator.validate_uuid(req_dict['params']['registration'], "registration param for activity state")
if 'agent' in req_dict['params']:
try:
agent = json.loads(req_dict['params']['agent'])
req_dict['params']['agent'] = agent
except Exception:
raise ParamError("agent param for activity state is not valid")
validator.validate_agent(agent, "Activity state agent param")
else:
err_msg = "Error -- activity_state - method = %s, but agent parameter is missing.." % req_dict['method']
raise ParamError(err_msg)
# Must have body included for state
if 'body' not in req_dict:
err_msg = "Could not find the state"
raise ParamError(err_msg)
# Extra validation if oauth
if req_dict['auth']['type'] == 'oauth':
validate_oauth_state_or_profile_agent(req_dict, "state")
# Set state
req_dict['state'] = req_dict.pop('raw_body', req_dict.pop('body', None))
return req_dict
@auth
def activity_state_get(req_dict):
rogueparams = set(req_dict['params']) - set(["activityId", "agent", "stateId", "registration", "since"])
if rogueparams:
raise ParamError("The get activity state request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'activityId' in req_dict['params']:
validator.validate_iri(req_dict['params']['activityId'], "activityId param for activity state")
else:
err_msg = "Error -- activity_state - method = %s, but activityId parameter is missing.." % req_dict['method']
raise ParamError(err_msg)
if 'registration' in req_dict['params']:
validator.validate_uuid(req_dict['params']['registration'], "registration param for activity state")
if 'agent' in req_dict['params']:
try:
agent = json.loads(req_dict['params']['agent'])
req_dict['params']['agent'] = agent
except Exception:
raise ParamError("agent param for activity state is not valid")
validator.validate_agent(agent, "Activity state agent param")
else:
err_msg = "Error -- activity_state - method = %s, but agent parameter is missing.." % req_dict['method']
raise ParamError(err_msg)
if 'since' in req_dict['params']:
try:
parse_datetime(req_dict['params']['since'])
except (Exception, ISO8601Error):
raise ParamError("Since parameter was not a valid ISO8601 timestamp")
# Extra validation if oauth
if req_dict['auth']['type'] == 'oauth':
validate_oauth_state_or_profile_agent(req_dict, "state")
return req_dict
@auth
def activity_state_delete(req_dict):
rogueparams = set(req_dict['params']) - set(["activityId", "agent", "stateId", "registration"])
if rogueparams:
raise ParamError("The delete activity state request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'activityId' in req_dict['params']:
validator.validate_iri(req_dict['params']['activityId'], "activityId param for activity state")
else:
err_msg = "Error -- activity_state - method = %s, but activityId parameter is missing.." % req_dict['method']
raise ParamError(err_msg)
if 'registration' in req_dict['params']:
validator.validate_uuid(req_dict['params']['registration'], "registration param for activity state")
if 'agent' in req_dict['params']:
try:
agent = json.loads(req_dict['params']['agent'])
req_dict['params']['agent'] = agent
except Exception:
raise ParamError("agent param for activity state is not valid")
validator.validate_agent(agent, "Activity state agent param")
else:
err_msg = "Error -- activity_state - method = %s, but agent parameter is missing.." % req_dict['method']
raise ParamError(err_msg)
# Extra validation if oauth
if req_dict['auth']['type'] == 'oauth':
validate_oauth_state_or_profile_agent(req_dict, "state")
return req_dict
@auth
def activity_profile_post(req_dict):
rogueparams = set(req_dict['params']) - set(["activityId", "profileId"])
if rogueparams:
raise ParamError("The post activity profile request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'activityId' in req_dict['params']:
validator.validate_iri(req_dict['params']['activityId'], "activityId param for activity profile")
else:
err_msg = "Error -- activity_profile - method = %s, but activityId parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if not 'profileId' in req_dict['params']:
err_msg = "Error -- activity_profile - method = %s, but profileId parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if 'body' not in req_dict:
err_msg = "Could not find the profile document"
raise ParamError(err_msg)
# Check the content type if the document already exists
exists = False
try:
p = ActivityProfile.objects.get(activity_id=req_dict['params']['activityId'],
profile_id=req_dict['params']['profileId'])
exists = True
except ActivityProfile.DoesNotExist:
pass
if exists:
if str(p.content_type) != "application/json" or ("application/json" not in req_dict['headers']['CONTENT_TYPE'] or \
req_dict['headers']['CONTENT_TYPE'] != "application/json"):
raise ParamError("Neither original document or document to be posted has a Content-Type of 'application/json'")
req_dict['profile'] = req_dict.pop('raw_body', req_dict.pop('body', None))
return req_dict
@auth
def activity_profile_put(req_dict):
rogueparams = set(req_dict['params']) - set(["activityId", "profileId"])
if rogueparams:
raise ParamError("The put activity profile request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'activityId' in req_dict['params']:
validator.validate_iri(req_dict['params']['activityId'], "activityId param for activity profile")
else:
err_msg = "Error -- activity_profile - method = %s, but activityId parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if not 'profileId' in req_dict['params']:
err_msg = "Error -- activity_profile - method = %s, but profileId parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if 'body' not in req_dict:
err_msg = "Could not find the profile document"
raise ParamError(err_msg)
# Set profile - req_parse converts all request bodies to dict, act profile needs it as string and need to replace single quotes with double quotes
# b/c of quotation issue when using javascript with activity profile
req_dict['profile'] = req_dict.pop('raw_body', req_dict.pop('body', None))
return req_dict
@auth
def activity_profile_get(req_dict):
rogueparams = set(req_dict['params']) - set(["activityId", "profileId", "since"])
if rogueparams:
raise ParamError("The get activity profile request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'activityId' in req_dict['params']:
validator.validate_iri(req_dict['params']['activityId'], "activityId param for activity profile")
else:
err_msg = "Error -- activity_profile - method = %s, but activityId parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if 'since' in req_dict['params']:
try:
parse_datetime(req_dict['params']['since'])
except (Exception, ISO8601Error):
raise ParamError("Since parameter was not a valid ISO8601 timestamp")
return req_dict
@auth
def activity_profile_delete(req_dict):
rogueparams = set(req_dict['params']) - set(["activityId", "profileId"])
if rogueparams:
raise ParamError("The delete activity profile request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'activityId' in req_dict['params']:
validator.validate_iri(req_dict['params']['activityId'], "activityId param for activity profile")
else:
err_msg = "Error -- activity_profile - method = %s, but activityId parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if not 'profileId' in req_dict['params']:
err_msg = "Error -- activity_profile - method = %s, but profileId parameter missing.." % req_dict['method']
raise ParamError(err_msg)
return req_dict
@auth
def activities_get(req_dict):
rogueparams = set(req_dict['params']) - set(["activityId"])
if rogueparams:
raise ParamError("The get activities request contained unexpected parameters: %s" % ", ".join(rogueparams))
try:
activity_id = req_dict['params']['activityId']
except KeyError:
err_msg = "Error -- activities - method = %s, but activityId parameter is missing" % req_dict['method']
raise ParamError(err_msg)
# Try to retrieve activity, if DNE then return empty else return activity info
try:
Activity.objects.get(activity_id=activity_id, authority__isnull=False)
except Activity.DoesNotExist:
err_msg = "No activity found with ID %s" % activity_id
raise IDNotFoundError(err_msg)
return req_dict
@auth
def agent_profile_post(req_dict):
rogueparams = set(req_dict['params']) - set(["agent", "profileId"])
if rogueparams:
raise ParamError("The post agent profile request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'agent' in req_dict['params']:
try:
agent = json.loads(req_dict['params']['agent'])
req_dict['params']['agent'] = agent
except Exception:
raise ParamError("agent param for agent profile is not valid")
validator.validate_agent(agent, "agent param for agent profile")
else:
err_msg = "Error -- agent_profile - method = %s, but agent parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if not 'profileId' in req_dict['params']:
err_msg = "Error -- agent_profile - method = %s, but profileId parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if 'body' not in req_dict:
err_msg = "Could not find the profile document"
raise ParamError(err_msg)
# Extra validation if oauth
if req_dict['auth']['type'] == 'oauth':
validate_oauth_state_or_profile_agent(req_dict, "profile")
# Check the content type if the document already exists
exists = False
agent = req_dict['params']['agent']
a = Agent.objects.retrieve_or_create(**agent)[0]
try:
p = AgentProfile.objects.get(profile_id=req_dict['params']['profileId'],agent=a)
exists = True
except AgentProfile.DoesNotExist:
pass
if exists:
if str(p.content_type) != "application/json" or ("application/json" not in req_dict['headers']['CONTENT_TYPE'] or \
req_dict['headers']['CONTENT_TYPE'] != "application/json"):
raise ParamError("Neither original document or document to be posted has a Content-Type of 'application/json'")
# Set profile
req_dict['profile'] = req_dict.pop('raw_body', req_dict.pop('body', None))
return req_dict
@auth
def agent_profile_put(req_dict):
rogueparams = set(req_dict['params']) - set(["agent", "profileId"])
if rogueparams:
raise ParamError("The put agent profile request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'agent' in req_dict['params']:
try:
agent = json.loads(req_dict['params']['agent'])
req_dict['params']['agent'] = agent
except Exception:
raise ParamError("agent param for agent profile is not valid")
validator.validate_agent(agent, "agent param for agent profile")
else:
err_msg = "Error -- agent_profile - method = %s, but agent parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if not 'profileId' in req_dict['params']:
err_msg = "Error -- agent_profile - method = %s, but profileId parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if 'body' not in req_dict:
err_msg = "Could not find the profile document"
raise ParamError(err_msg)
# Extra validation if oauth
if req_dict['auth']['type'] == 'oauth':
validate_oauth_state_or_profile_agent(req_dict, "profile")
req_dict['profile'] = req_dict.pop('raw_body', req_dict.pop('body', None))
return req_dict
@auth
def agent_profile_get(req_dict):
rogueparams = set(req_dict['params']) - set(["agent", "profileId", "since"])
if rogueparams:
raise ParamError("The get agent profile request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'agent' in req_dict['params']:
try:
agent = json.loads(req_dict['params']['agent'])
req_dict['params']['agent'] = agent
except Exception:
raise ParamError("agent param for agent profile is not valid")
validator.validate_agent(agent, "agent param for agent profile")
else:
err_msg = "Error -- agent_profile - method = %s, but agent parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if 'since' in req_dict['params']:
try:
parse_datetime(req_dict['params']['since'])
except (Exception, ISO8601Error):
raise ParamError("Since parameter was not a valid ISO8601 timestamp")
# Extra validation if oauth
if req_dict['auth']['type'] == 'oauth':
validate_oauth_state_or_profile_agent(req_dict, "profile")
return req_dict
@auth
def agent_profile_delete(req_dict):
rogueparams = set(req_dict['params']) - set(["agent", "profileId"])
if rogueparams:
raise ParamError("The delete agent profile request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'agent' in req_dict['params']:
try:
agent = json.loads(req_dict['params']['agent'])
req_dict['params']['agent'] = agent
except Exception:
raise ParamError("agent param for agent profile is not valid")
validator.validate_agent(agent, "agent param for agent profile")
else:
err_msg = "Error -- agent_profile - method = %s, but agent parameter missing.." % req_dict['method']
raise ParamError(err_msg)
if not 'profileId' in req_dict['params']:
err_msg = "Error -- agent_profile - method = %s, but profileId parameter missing.." % req_dict['method']
raise ParamError(err_msg)
# Extra validation if oauth
if req_dict['auth']['type'] == 'oauth':
validate_oauth_state_or_profile_agent(req_dict, "profile")
return req_dict
@auth
def agents_get(req_dict):
rogueparams = set(req_dict['params']) - set(["agent"])
if rogueparams:
raise ParamError("The get agent request contained unexpected parameters: %s" % ", ".join(rogueparams))
try:
req_dict['params']['agent']
except KeyError:
err_msg = "Error -- agents url, but no agent parameter.. the agent parameter is required"
raise ParamError(err_msg)
agent = json.loads(req_dict['params']['agent'])
params = get_agent_ifp(agent)
if not Agent.objects.filter(**params).exists():
raise IDNotFoundError("Error with Agent. The agent partial did not match any agents on record")
req_dict['agent_ifp'] = params
return req_dict | 43.25493 | 163 | 0.660089 |
01035e4deb6625eacf0cc981b5eb9592c7b70aab | 2,442 | py | Python | bentoml/frameworks/gluon.py | alvarogg777/BentoML | 2bb297dca0330228c27b14aeeba0742820c6f0ef | [
"Apache-2.0"
] | null | null | null | bentoml/frameworks/gluon.py | alvarogg777/BentoML | 2bb297dca0330228c27b14aeeba0742820c6f0ef | [
"Apache-2.0"
] | null | null | null | bentoml/frameworks/gluon.py | alvarogg777/BentoML | 2bb297dca0330228c27b14aeeba0742820c6f0ef | [
"Apache-2.0"
] | null | null | null | import os
from bentoml.exceptions import MissingDependencyException
from bentoml.service.artifacts import BentoServiceArtifact
from bentoml.service.env import BentoServiceEnv
class GluonModelArtifact(BentoServiceArtifact):
"""
Abstraction for saving/loading gluon models
Args:
name (str): Name for the artifact
Raises:
MissingDependencyError: mxnet package is required for GluonModelArtifact
Example usage:
>>> from bentoml import env, artifacts, api, BentoService
>>> from bentoml.adapters import JsonInput
>>> from bentoml.frameworks.gluon import GluonModelArtifact
>>> import mxnet as mx
>>>
>>> @env(infer_pip_packages=True)
>>> @artifacts([GluonModelArtifact('model')])
>>> class GluonClassifier(BentoService):
>>> @api(input=JsonInput(), batch=False)
>>> def predict(self, request):
>>> nd_input = mx.nd.array(request['input'])
>>> return self.artifacts.model(nd_input).asnumpy()
>>>
>>> svc = GluonClassifier()
>>> svc.pack('model', model_to_save)
>>> svc.save()
"""
def __init__(self, name: str):
super(GluonModelArtifact, self).__init__(name)
self._model = None
def pack(self, model, metadata: dict = None): # pylint: disable=unused-argument
try:
import mxnet # noqa # pylint: disable=unused-import
except ImportError:
raise MissingDependencyException(
"mxnet package is required to use GluonModelArtifact"
)
self._model = model
return self
def load(self, path):
try:
from mxnet import gluon # noqa # pylint: disable=unused-import
except ImportError:
raise MissingDependencyException(
"mxnet package is required to use GluonModelArtifact"
)
prefix = self._model_file_path(path)
model = gluon.nn.SymbolBlock.imports(
"{}-symbol.json".format(prefix), ["data"], "{}-0000.params".format(prefix)
)
return self.pack(model)
def _model_file_path(self, base_path):
return os.path.join(base_path, self.name)
def save(self, dst):
self._model.export(self._model_file_path(dst))
def get(self):
return self._model
def set_dependencies(self, env: BentoServiceEnv):
if env._infer_pip_packages:
env.add_pip_packages(["mxnet"])
| 32.56 | 86 | 0.63964 |
af47a58fb66a558149c4d0d488b706ab63601ddf | 2,988 | py | Python | stokepy/helpers.py | bobbywlindsey/stokepy | e7f4ad0c27d8fc22129558db6ae0dbbab0627ace | [
"MIT"
] | 9 | 2017-05-09T20:00:10.000Z | 2020-07-02T18:00:22.000Z | stokepy/helpers.py | bobbywlindsey/stokepy | e7f4ad0c27d8fc22129558db6ae0dbbab0627ace | [
"MIT"
] | null | null | null | stokepy/helpers.py | bobbywlindsey/stokepy | e7f4ad0c27d8fc22129558db6ae0dbbab0627ace | [
"MIT"
] | 2 | 2017-08-10T14:47:07.000Z | 2019-01-25T02:37:34.000Z | from .headers import *
def architecture_check_passed(P, phi):
"""
Make sure columns of the transition matrix, P, (which represent the
number of states) match the number of elements in the initial distribution,
phi, which represents the starting point in each state
"""
num_states = P.shape[0]
if phi.shape[0] != num_states:
#print("Transition matrix and initial distribution dimensions don't match")
return False
row_sums = np.sum(P, 1)
should_be = np.ones_like(row_sums)
if np.allclose(row_sums, should_be) == False:
#print('Rows of transition matrix do not all sum to 1')
return False
return True
def compute_absorbed_proportions(vector, states_in_recurrent_classes = []):
""" return numpy array of absorbed proportions for each recurrent state """
if states_in_recurrent_classes == []:
ap = [1.0]
else:
ap = [np.sum(vector[states]) for states in states_in_recurrent_classes]
return np.array(ap)
def support(vector):
""" return nonzero entries of a vector """
return np.nonzero(vector)[0]
def normalize_vector(vector):
""" normalize vector given hard-coded tolerance level """
tolerance = 0.001
sigma = np.sum(vector)
if np.abs(sigma) < tolerance:
# if row sums to 0, divide by leftmost nonzero entry
sigma = vector[support(vector)][0]
return vector/sigma
def normalize_rows(transition_matrix):
return np.apply_along_axis(normalize_vector, 1, transition_matrix)
def get_newly_absorbed_proportions(absorption_proportions, tolerance):
absorption_proportions = np.array(absorption_proportions)
# total absorbed by all recurrent classes
absorbed_cumulative = np.sum(absorption_proportions, axis = 1)
# remove tail, if simulation continued to run after complete absorption
absorbed_cumulative = absorbed_cumulative[absorbed_cumulative < 1-tolerance]
# append 1.0 to end
absorbed_cumulative = np.append(absorbed_cumulative, 1.0)
# find "newly" absorbed proportion at each step
absorbed_marginal = np.diff(absorbed_cumulative)
# np.diff shrinks array by 1 entry so prepend 0.0
absorbed_marginal = np.insert(absorbed_marginal, 0, absorbed_cumulative[0])
return absorbed_marginal
def plot_absorption_helper(absorption_proportions, tolerance):
absorbed_marginal = get_newly_absorbed_proportions(absorption_proportions, \
tolerance)
times = np.arange(absorbed_marginal.shape[0])
plt.bar(times, absorbed_marginal)
plt.xlabel('jumps')
plt.ylabel('proportion')
plt.title('Distribution of Absorption Times')
plt.show()
return None
def text_to_numeric(symbols, text):
return tuple(symbols.index(char.lower()) for char in text)
def numeric_to_text(symbols, numeric):
s = [symbols[num] for num in numeric]
return ''.join(s)
def apply_cipher(cipher, message_numeric):
return tuple(cipher[s] for s in message_numeric)
| 37.822785 | 83 | 0.716867 |
3ae224a8a388b3baca41ae2ececfd32f750515de | 9,535 | py | Python | diofant/concrete/delta.py | rajkk1/diofant | 6b361334569e4ec2e8c7d30dc324387a4ad417c2 | [
"BSD-3-Clause"
] | 57 | 2016-09-13T23:16:26.000Z | 2022-03-29T06:45:51.000Z | diofant/concrete/delta.py | rajkk1/diofant | 6b361334569e4ec2e8c7d30dc324387a4ad417c2 | [
"BSD-3-Clause"
] | 402 | 2016-05-11T11:11:47.000Z | 2022-03-31T14:27:02.000Z | diofant/concrete/delta.py | rajkk1/diofant | 6b361334569e4ec2e8c7d30dc324387a4ad417c2 | [
"BSD-3-Clause"
] | 20 | 2016-05-11T08:17:37.000Z | 2021-09-10T09:15:51.000Z | """
This module implements sums and products containing the Kronecker Delta function.
"""
from ..core import Add, Dummy, Integer, Mul, cacheit
from ..functions import KroneckerDelta, Piecewise, piecewise_fold
from ..logic import true
from ..polys import factor
from ..sets import Interval
from ..utilities import default_sort_key
@cacheit
def _expand_delta(expr, index):
"""Expand the first Add containing a simple KroneckerDelta."""
if not expr.is_Mul:
return expr
delta = None
func = Add
terms = [Integer(1)]
for h in expr.args:
if delta is None and h.is_Add and _has_simple_delta(h, index):
delta = True
func = h.func
terms = [terms[0]*t for t in h.args]
else:
terms = [t*h for t in terms]
return func(*terms)
@cacheit
def _extract_delta(expr, index):
"""Extract a simple KroneckerDelta from the expression.
Returns the tuple ``(delta, newexpr)`` where:
- ``delta`` is a simple KroneckerDelta expression if one was found,
or ``None`` if no simple KroneckerDelta expression was found.
- ``newexpr`` is a Mul containing the remaining terms; ``expr`` is
returned unchanged if no simple KroneckerDelta expression was found.
Examples
========
>>> from diofant.abc import i, j
>>> _extract_delta(4*x*y*KroneckerDelta(i, j), i)
(KroneckerDelta(i, j), 4*x*y)
>>> _extract_delta(4*x*y*KroneckerDelta(i, j), k)
(None, 4*x*y*KroneckerDelta(i, j))
See Also
========
diofant.functions.special.tensor_functions.KroneckerDelta
deltaproduct
deltasummation
"""
if not _has_simple_delta(expr, index):
return None, expr
if isinstance(expr, KroneckerDelta):
return expr, Integer(1)
assert expr.is_Mul
delta = None
terms = []
for arg in expr.args:
if delta is None and _is_simple_delta(arg, index):
delta = arg
else:
terms.append(arg)
return delta, expr.func(*terms)
@cacheit
def _has_simple_delta(expr, index):
"""
Returns True if ``expr`` is an expression that contains a KroneckerDelta
that is simple in the index ``index``, meaning that this KroneckerDelta
is nonzero for a single value of the index ``index``.
"""
if expr.has(KroneckerDelta):
if _is_simple_delta(expr, index):
return True
if expr.is_Add or expr.is_Mul:
for arg in expr.args:
if _has_simple_delta(arg, index):
return True
return False
@cacheit
def _is_simple_delta(delta, index):
"""
Returns True if ``delta`` is a KroneckerDelta and is nonzero for a single
value of the index ``index``.
"""
if isinstance(delta, KroneckerDelta) and delta.has(index):
p = (delta.args[0] - delta.args[1]).as_poly(index)
if p:
return p.degree() == 1
return False
@cacheit
def _remove_multiple_delta(expr):
"""Evaluate products of KroneckerDelta's."""
from ..solvers import solve
if expr.is_Add:
return expr.func(*list(map(_remove_multiple_delta, expr.args)))
if not expr.is_Mul:
return expr
eqs = []
newargs = []
for arg in expr.args:
if isinstance(arg, KroneckerDelta):
eqs.append(arg.args[0] - arg.args[1])
else:
newargs.append(arg)
if not eqs:
return expr
solns = solve(eqs)
if len(solns) == 0:
return Integer(0)
elif len(solns) == 1:
for key in solns[0]:
newargs.append(KroneckerDelta(key, solns[0][key]))
expr2 = expr.func(*newargs)
if expr != expr2:
return _remove_multiple_delta(expr2)
return expr
@cacheit
def _simplify_delta(expr):
"""Rewrite a KroneckerDelta's indices in its simplest form."""
from ..solvers import solve
if isinstance(expr, KroneckerDelta):
slns = solve(expr.args[0] - expr.args[1])
if slns and len(slns) == 1:
return Mul(*[KroneckerDelta(*(key, value))
for key, value in slns[0].items()])
return expr
@cacheit
def deltaproduct(f, limit):
"""Handle products containing a KroneckerDelta.
See Also
========
deltasummation
diofant.functions.special.tensor_functions.KroneckerDelta
diofant.concrete.products.product
"""
from .products import product
if (limit[2] - limit[1] < 0) == true:
return Integer(1)
if not f.has(KroneckerDelta):
return product(f, limit)
if f.is_Add:
# Identify the term in the Add that has a simple KroneckerDelta
delta = None
terms = []
for arg in sorted(f.args, key=default_sort_key):
if delta is None and _has_simple_delta(arg, limit[0]):
delta = arg
else:
terms.append(arg)
newexpr = f.func(*terms)
result = deltaproduct(newexpr, limit)
if isinstance(limit[1], int) and isinstance(limit[2], int):
result += sum(deltaproduct(newexpr,
(limit[0], limit[1], ik - 1)) *
delta.subs({limit[0]: ik}) *
deltaproduct(newexpr,
(limit[0], ik + 1, limit[2]))
for ik in range(int(limit[1]), int(limit[2] + 1)))
else:
k = Dummy('kprime', integer=True)
result += deltasummation(deltaproduct(newexpr,
(limit[0],
limit[1], k - 1)) *
delta.subs({limit[0]: k}) *
deltaproduct(newexpr, (limit[0],
k + 1, limit[2])),
(k, limit[1], limit[2]),
no_piecewise=_has_simple_delta(newexpr,
limit[0]))
return _remove_multiple_delta(result)
delta, _ = _extract_delta(f, limit[0])
if not delta:
g = _expand_delta(f, limit[0])
if f != g:
return factor(deltaproduct(g, limit))
return product(f, limit)
return (_remove_multiple_delta(f.subs({limit[0]: limit[1]}) *
KroneckerDelta(limit[2], limit[1])) +
_simplify_delta(KroneckerDelta(limit[2], limit[1] - 1)))
@cacheit
def deltasummation(f, limit, no_piecewise=False):
"""Handle summations containing a KroneckerDelta.
The idea for summation is the following:
- If we are dealing with a KroneckerDelta expression, i.e.
KroneckerDelta(g(x), j), we try to simplify it.
If we could simplify it, then we sum the resulting expression.
We already know we can sum a simplified expression, because only
simple KroneckerDelta expressions are involved.
If we couldn't simplify it, there are two cases:
1) The expression is a simple expression: we return the summation,
taking care if we are dealing with a Derivative or with a proper
KroneckerDelta.
2) The expression is not simple (i.e. KroneckerDelta(cos(x))): we
can do nothing at all.
- If the expr is a multiplication expr having a KroneckerDelta term:
First we expand it.
If the expansion did work, then we try to sum the expansion.
If not, we try to extract a simple KroneckerDelta term, then we
have two cases:
1) We have a simple KroneckerDelta term, so we return the summation.
2) We didn't have a simple term, but we do have an expression with
simplified KroneckerDelta terms, so we sum this expression.
Examples
========
>>> i, j = symbols('i, j', integer=True, finite=True)
>>> deltasummation(KroneckerDelta(i, k), (k, -oo, oo))
1
>>> deltasummation(KroneckerDelta(i, k), (k, 0, oo))
Piecewise((1, 0 <= i), (0, true))
>>> deltasummation(KroneckerDelta(i, k), (k, 1, 3))
Piecewise((1, (1 <= i) & (i <= 3)), (0, true))
>>> deltasummation(k*KroneckerDelta(i, j)*KroneckerDelta(j, k),
... (k, -oo, oo))
j*KroneckerDelta(i, j)
>>> deltasummation(j*KroneckerDelta(i, j), (j, -oo, oo))
i
>>> deltasummation(i*KroneckerDelta(i, j), (i, -oo, oo))
j
See Also
========
deltaproduct
diofant.functions.special.tensor_functions.KroneckerDelta
diofant.concrete.sums.summation
"""
from ..solvers import solve
from .summations import summation
if (limit[2] - limit[1] < 0) == true:
return Integer(0)
if not f.has(KroneckerDelta):
return summation(f, limit)
x = limit[0]
g = _expand_delta(f, x)
if g.is_Add:
return piecewise_fold(g.func(*[deltasummation(h, limit,
no_piecewise)
for h in g.args]))
# try to extract a simple KroneckerDelta term
delta, expr = _extract_delta(g, x)
if not delta:
return summation(f, limit)
solns = solve(delta.args[0] - delta.args[1], x)
assert len(solns) == 1
value = solns[0][x]
if no_piecewise:
return expr.subs({x: value})
return Piecewise((expr.subs({x: value}),
Interval(*limit[1:3]).as_relational(value)),
(0, True))
| 31.160131 | 81 | 0.575039 |
c5dedc9f0a9a22f03d486696dc8627fe16e90606 | 10,430 | py | Python | theano/misc/check_blas.py | goodfeli/Theano | 0494f6891a326b02d4e135ac7ed9ccf639560212 | [
"BSD-3-Clause"
] | 11 | 2016-12-01T19:49:28.000Z | 2021-11-08T11:12:08.000Z | theano/misc/check_blas.py | goodfeli/Theano | 0494f6891a326b02d4e135ac7ed9ccf639560212 | [
"BSD-3-Clause"
] | null | null | null | theano/misc/check_blas.py | goodfeli/Theano | 0494f6891a326b02d4e135ac7ed9ccf639560212 | [
"BSD-3-Clause"
] | 6 | 2015-06-21T20:55:55.000Z | 2019-04-24T20:03:25.000Z | #!/usr/bin/env python
#print info to check we link with witch version of blas
#test the speed of the blas gemm fct:
#C=a*C+dot(A,B)*b
#A,B,C matrix
#a,b scalar
s = """
result for shapes=(2000,2000) and iters=100
GTX 470 7.22s
GTX 285, 6.84s
GTX 480 5.83s
"""
import os
import sys
import time
from optparse import OptionParser
import subprocess
import numpy
import theano
import theano.tensor as T
from theano.gof.python25 import any
def execute(execute=True, verbose=True, M=2000, N=2000, K=2000,
iters=10, order='C'):
"""
:param execute: If True, execute a Theano function that should call gemm.
:param verbose: If True, will print some Theano flags and env variables.
:param M,N,K: The M,N,K size used by gemm.
:param iters: The number of calls to gemm to do.
:return: a tuple (execution time,
str that represents the implementation used)
"""
if verbose:
print 'Some Theano flags:'
print ' blas.ldflags=', theano.config.blas.ldflags
print ' compiledir=', theano.config.compiledir
print ' floatX=', theano.config.floatX
print ' device=', theano.config.device
print 'Some OS information:'
print ' sys.platform=', sys.platform
print ' sys.version=', sys.version
print ' sys.prefix=', sys.prefix
print 'Some environment variables:'
print ' MKL_NUM_THREADS=', os.getenv('MKL_NUM_THREADS')
print ' OMP_NUM_THREADS=', os.getenv('OMP_NUM_THREADS')
print ' GOTO_NUM_THREADS=', os.getenv('GOTO_NUM_THREADS')
print
print ('Numpy config: (used when the Theano flag'
' "blas.ldflags" is empty)')
numpy.show_config()
print 'Numpy dot module:', numpy.dot.__module__
print 'Numpy location:', numpy.__file__
print 'Numpy version:', numpy.__version__
if (theano.config.device.startswith("gpu") or
theano.config.init_gpu_device.startswith("gpu")):
print 'nvcc version:'
subprocess.call((theano.sandbox.cuda.nvcc_compiler.nvcc_path,
"--version"))
print
a = theano.shared(numpy.ones((M, N), dtype=theano.config.floatX,
order=order))
b = theano.shared(numpy.ones((N, K), dtype=theano.config.floatX,
order=order))
c = theano.shared(numpy.ones((M, K), dtype=theano.config.floatX,
order=order))
f = theano.function([], updates=[(c, 0.4 * c + .8 * T.dot(a, b))])
if any([x.op.__class__.__name__ == 'Gemm' for x in
f.maker.fgraph.toposort()]):
c_impl = [hasattr(thunk, 'cthunk')
for node, thunk in zip(f.fn.nodes, f.fn.thunks)
if node.op.__class__.__name__ == "Gemm"]
assert len(c_impl) == 1
if c_impl[0]:
impl = 'CPU (with direct Theano binding to blas)'
else:
impl = 'CPU (without direct Theano binding to blas but with numpy/scipy binding to blas)'
elif any([x.op.__class__.__name__ == 'GpuGemm' for x in
f.maker.fgraph.toposort()]):
impl = 'GPU'
else:
impl = 'ERROR, unable to tell if Theano used the cpu or the gpu:\n'
impl += str(f.maker.fgraph.toposort())
t0 = 0
t1 = -1
if execute:
sync = (hasattr(theano, "sandbox") and
hasattr(theano.sandbox, "cuda") and
theano.sandbox.cuda.cuda_available)
t0 = time.time()
for i in range(iters):
f()
if sync:
theano.sandbox.cuda.synchronize()
t1 = time.time()
return t1 - t0, impl
def jobman_job(state, channel):
execute()
return channel.COMPLETE
def test():
execute()
parser = OptionParser(
usage='%prog <options>\nCompute time needed to perform BLAS gemm '
'computations between matrices of size (M, N) and (N, K).')
parser.add_option('-q', '--quiet', action='store_true', dest='quiet',
default=False,
help="If true, do not print the comparison table and config "
"options")
parser.add_option('--print_only', action='store_true', dest='print_only',
default=False,
help="If true, do not perform gemm computations")
parser.add_option('-M', '--M', action='store', dest='M',
default=2000, type="int",
help="The M size to gemm")
parser.add_option('-N', '--N', action='store', dest='N',
default=2000, type="int",
help="The N size to gemm")
parser.add_option('-K', '--K', action='store', dest='K',
default=2000, type="int",
help="The K size to gemm")
parser.add_option('--iter', action='store', dest='iter',
default=10, type="int",
help="The number of calls to gemm")
parser.add_option('--order', action='store', dest='order',
default="C",
help="The numpy memory layout parameter used when creating"
" the numpy.ndarray objects. It accepts 'C' for C memory"
" order and 'F' for Fortran order (for all matrices).")
if __name__ == "__main__":
options, arguments = parser.parse_args(sys.argv)
if hasattr(options, "help"):
print options.help
sys.exit(0)
if not options.quiet:
print """
Some results that you can compare against. They were 10 executions
of gemm in float64 with matrices of shape 2000x2000 (M=N=K=2000).
All memory layout was in C order.
CPU tested: Xeon E5345(2.33Ghz, 8M L2 cache, 1333Mhz FSB),
Xeon E5430(2.66Ghz, 12M L2 cache, 1333Mhz FSB),
Xeon E5450(3Ghz, 12M L2 cache, 1333Mhz FSB),
Xeon X5560(2.8Ghz, 12M L2 cache, hyper-threads?)
Core 2 E8500, Core i7 930(2.8Ghz, hyper-threads enabled),
Core i7 950(3.07GHz, hyper-threads enabled)
Xeon X5550(2.67GHz, 8M l2 cache?, hyper-threads enabled)
Libraries tested:
* numpy with ATLAS from distribution (FC9) package (1 thread)
* manually compiled numpy and ATLAS with 2 threads
* goto 1.26 with 1, 2, 4 and 8 threads
* goto2 1.13 compiled with multiple threads enabled
Xeon Xeon Xeon Core2 i7 i7 Xeon Xeon
lib/nb threads E5345 E5430 E5450 E8500 930 950 X5560 X5550
numpy 1.3.0 blas 775.92s
numpy_FC9_atlas/1 39.2s 35.0s 30.7s 29.6s 21.5s 19.60s
goto/1 18.7s 16.1s 14.2s 13.7s 16.1s 14.67s
numpy_MAN_atlas/2 12.0s 11.6s 10.2s 9.2s 9.0s
goto/2 9.5s 8.1s 7.1s 7.3s 8.1s 7.4s
goto/4 4.9s 4.4s 3.7s - 4.1s 3.8s
goto/8 2.7s 2.4s 2.0s - 4.1s 3.8s
openblas/1 14.04s
openblas/2 7.16s
openblas/4 3.71s
openblas/8 3.70s
mkl 11.0.083/1 7.97s
mkl 10.2.2.025/1 13.7s
mkl 10.2.2.025/2 7.6s
mkl 10.2.2.025/4 4.0s
mkl 10.2.2.025/8 2.0s
goto2 1.13/1 14.37s
goto2 1.13/2 7.26s
goto2 1.13/4 3.70s
goto2 1.13/8 1.94s
goto2 1.13/16 3.16s
Test time in float32
cuda version 5.5 5.0 4.2 4.1 4.0 3.2 3.0 # note
gpu
K20m/ECC 0.07s
K20/NOECC 0.07s
M2070 0.25s 0.27s 0.32s
M2050(Amazon) 0.25s
C2075 0.25s
C1060 0.46s
GTX Titan(D15U-50)0.06s 0.06s don't work
GTX 680 0.12s 0.154s 0.218s
GTX 580 0.16s 0.16s 0.164s 0.203s
GTX 480 0.19s 0.19s 0.192s 0.237s 0.27s
GTX 470 0.23s 0.23s 0.238s 0.297s 0.34s
GTX 660 0.18s 0.20s 0.23s
GTX 560 0.30s
GTX 650 Ti 0.27s
GTX 460 0.37s 0.45s
GTX 285 0.42s 0.452s 0.452s 0.40s # cuda 3.0 seems faster? driver version?
GTX 550 Ti 0.57s
GT 520 2.68s 3.06s
520M 2.44s 3.19s # with bumblebee on Ubuntu 12.04
GT 220 3.80s
GT 210 6.35s
8500 GT 10.68s
"""
t, impl = execute(not options.print_only, not options.quiet,
M=options.M, N=options.N, K=options.K,
iters=options.iter, order=options.order)
if options.print_only:
pass
elif options.quiet:
print t
else:
print
print "We executed", options.iter,
print "calls to gemm with a and b matrices of shapes",
print "(%d, %d) and (%d, %d)." % (options.M, options.N,
options.N, options.K)
print
print 'Total execution time: %.2fs on %s.' % (t, impl)
print
print ('Try to run this script a few times. Experience shows that'
' the first time is not as fast as followings calls. The'
' difference is not big, but consistent.')
| 41.388889 | 114 | 0.488207 |
121abad352cb15a72176824d4c1acc675f270eb8 | 1,703 | py | Python | arabicToRoman.py | thecrimsoncoder/arabicToRoman | 980d6c2cc0771686e051c9ed98b3e3d81589f42c | [
"MIT"
] | null | null | null | arabicToRoman.py | thecrimsoncoder/arabicToRoman | 980d6c2cc0771686e051c9ed98b3e3d81589f42c | [
"MIT"
] | null | null | null | arabicToRoman.py | thecrimsoncoder/arabicToRoman | 980d6c2cc0771686e051c9ed98b3e3d81589f42c | [
"MIT"
] | null | null | null | import time
def title():
print("++++++++++++++++++++++++++++++++++++++++")
print("+ a r a b i c T o R o m a n . p y +")
print("+ Created By: Sean McElhare +")
print("+ github.com/thecrimsoncoder +")
print("++++++++++++++++++++++++++++++++++++++++")
def promptInput():
arabicInput = input("Please enter a number: ")
if arabicInput.isnumeric():
romanNumerals = decode(arabicInput)
print("Roman Numerals Conversion: " + str(romanNumerals))
else:
print("Please enter something that actually makes sense!")
time.sleep(2)
promptInput()
def decode(input):
arabicRomanMapping = {
1000 : "M",
900 : "CM",
500 : "D",
400 : "CD",
100 : "C",
90 : "XC",
40 : "XL",
10 : "X",
9 : "IX",
5 : "V",
4 : "IV",
1 : "I"
}
romanString = ""
input = int(input)
for divisor in arabicRomanMapping.keys():
print("Input: " + str(input))
print("Divisor: " + str(divisor))
quotient, remainder = divmod(input,divisor)
print("Quotient: " + str(quotient))
print("Remainder: " + str(remainder))
romanString = romanString + str(arabicRomanMapping[divisor] * quotient)
input = input - (divisor * quotient)
return romanString
title()
promptInput()
| 34.06 | 79 | 0.407516 |
3677ee2fd630c64c9aa744e499e21692112cc1aa | 5,039 | py | Python | workloads/mnist_jax/workload.py | hanlint/algorithmic-efficiency | effe55a0e97c82500861e34f52f7894534be79e3 | [
"Apache-2.0"
] | null | null | null | workloads/mnist_jax/workload.py | hanlint/algorithmic-efficiency | effe55a0e97c82500861e34f52f7894534be79e3 | [
"Apache-2.0"
] | null | null | null | workloads/mnist_jax/workload.py | hanlint/algorithmic-efficiency | effe55a0e97c82500861e34f52f7894534be79e3 | [
"Apache-2.0"
] | null | null | null | """MNIST workload implemented in Jax."""
import struct
import time
from typing import Tuple
from flax import linen as nn
import jax
import jax.numpy as jnp
import spec
import tensorflow_datasets as tfds
class _Model(nn.Module):
@nn.compact
def __call__(self, x: spec.Tensor, train: bool):
del train
x = nn.Conv(features=32, kernel_size=(3, 3))(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=64, kernel_size=(3, 3))(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = nn.Dense(features=256)(x)
x = nn.relu(x)
x = nn.Dense(features=10)(x)
x = nn.log_softmax(x)
return x
class MnistWorkload(spec.Workload):
def __init__(self):
self._eval_ds = None
def has_reached_goal(self, eval_result: float) -> bool:
return eval_result > 0.9
def _build_dataset(self,
data_rng: jax.random.PRNGKey,
split: str,
batch_size):
ds = tfds.load('mnist', split=split, try_gcs=True)
ds = ds.cache()
if split == 'train':
ds = ds.shuffle(1024, seed=data_rng[0])
ds = ds.repeat()
ds = ds.batch(batch_size)
return tfds.as_numpy(ds)
def build_input_queue(
self,
data_rng: jax.random.PRNGKey,
split: str,
batch_size: int):
return iter(self._build_dataset(data_rng, split, batch_size))
@property
def param_shapes(self):
init_params, _ = self.init_model_fn(jax.random.PRNGKey(0))
return jax.tree_map(lambda x: spec.ShapeTuple(x.shape), init_params)
@property
def loss_type(self):
return spec.LossType.SOFTMAX_CROSS_ENTROPY
@property
def train_mean(self):
return 0.0
@property
def train_stddev(self):
return 1.0
def model_params_types(self):
pass
@property
def max_allowed_runtime_sec(self):
return 60
@property
def eval_period_time_sec(self):
return 10
# Return whether or not a key in spec.ParameterTree is the output layer
# parameters.
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
pass
def preprocess_for_train(
self,
selected_raw_input_batch: spec.Tensor,
selected_label_batch: spec.Tensor,
rng: spec.RandomState) -> spec.Tensor:
del rng
return self.preprocess_for_eval(
selected_raw_input_batch, selected_label_batch, None, None)
def preprocess_for_eval(
self,
raw_input_batch: spec.Tensor,
raw_label_batch: spec.Tensor,
train_mean: spec.Tensor,
train_stddev: spec.Tensor) -> spec.Tensor:
del train_mean
del train_stddev
return (raw_input_batch, raw_label_batch)
_InitState = Tuple[spec.ParameterTree, spec.ModelAuxillaryState]
def init_model_fn(self, rng: spec.RandomState) -> _InitState:
init_val = jnp.ones((1, 28, 28, 1), jnp.float32)
initial_params = _Model().init(rng, init_val, train=True)['params']
return initial_params, None
def model_fn(
self,
params: spec.ParameterTree,
augmented_and_preprocessed_input_batch: spec.Tensor,
model_state: spec.ModelAuxillaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxillaryState]:
del model_state
del rng
del update_batch_norm
train = mode == spec.ForwardPassMode.TRAIN
logits_batch = _Model().apply(
{'params': params}, augmented_and_preprocessed_input_batch, train=train)
return logits_batch, None
# LossFn = Callable[Tuple[spec.Tensor, spec.Tensor], spec.Tensor]
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor,
logits_batch: spec.Tensor,
loss_type: spec.LossType) -> spec.Tensor: # differentiable
del loss_type
one_hot_targets = jax.nn.one_hot(label_batch, 10)
return -jnp.sum(one_hot_targets * nn.log_softmax(logits_batch), axis=-1)
def eval_model(
self,
params: spec.ParameterTree,
model_state: spec.ModelAuxillaryState,
rng: spec.RandomState):
"""Run a full evaluation of the model."""
data_rng, model_rng = jax.random.split(rng, 2)
eval_batch_size = 2000
num_batches = 10000 // eval_batch_size
if self._eval_ds is None:
self._eval_ds = self._build_dataset(
data_rng, split='test', batch_size=eval_batch_size)
eval_iter = iter(self._eval_ds)
total_loss = 0.
total_accuracy = 0.
for x in eval_iter:
images = x['image']
labels = x['label']
logits, _ = self.model_fn(
params,
images,
model_state,
spec.ForwardPassMode.EVAL,
model_rng,
update_batch_norm=False)
# TODO(znado): add additional eval metrics?
# total_loss += self.loss_fn(labels, logits, self.loss_type)
total_accuracy += jnp.mean(jnp.argmax(logits, axis=-1) == labels)
return float(total_accuracy / num_batches)
| 28.630682 | 80 | 0.673745 |
dbdfda9de4fe2a4d790a60a098e521bbc4538813 | 1,267 | py | Python | tests/core/test_serialization.py | beyondacm/great_expectations | 6977ac9e07c72658c777fec60174db63078b7721 | [
"Apache-2.0"
] | null | null | null | tests/core/test_serialization.py | beyondacm/great_expectations | 6977ac9e07c72658c777fec60174db63078b7721 | [
"Apache-2.0"
] | null | null | null | tests/core/test_serialization.py | beyondacm/great_expectations | 6977ac9e07c72658c777fec60174db63078b7721 | [
"Apache-2.0"
] | null | null | null | import logging
from decimal import Decimal
from great_expectations.core.util import (
convert_to_json_serializable,
requires_lossy_conversion,
)
def test_lossy_serialization_warning(caplog):
caplog.set_level(logging.WARNING, logger="great_expectations.core")
d = Decimal("12345.678901234567890123456789")
convert_to_json_serializable(d)
assert len(caplog.messages) == 1
assert caplog.messages[0].startswith(
"Using lossy conversion for decimal 12345.678901234567890123456789"
)
caplog.clear()
d = Decimal("0.1")
convert_to_json_serializable(d)
print(caplog.messages)
assert len(caplog.messages) == 0
def test_lossy_conversion():
d = Decimal("12345.678901234567890123456789")
assert requires_lossy_conversion(d)
d = Decimal("12345.67890123456")
assert requires_lossy_conversion(d)
d = Decimal("12345.6789012345")
assert not requires_lossy_conversion(d)
d = Decimal("0.12345678901234567890123456789")
assert requires_lossy_conversion(d)
d = Decimal("0.1234567890123456")
assert requires_lossy_conversion(d)
d = Decimal("0.123456789012345")
assert not requires_lossy_conversion(d)
d = Decimal("0.1")
assert not requires_lossy_conversion(d)
| 25.857143 | 75 | 0.737964 |
39805bf138b8206844ff5346586048b2e69120db | 9,542 | py | Python | kats/models/stlf.py | sarvex/Kats | ad85dd235a4488f1e65799005efcc9da45f59100 | [
"MIT"
] | 2 | 2021-12-26T14:15:24.000Z | 2022-03-23T21:12:58.000Z | kats/models/stlf.py | sec-js/Kats | 01fbc4febd317dc641c6b0663dff36b545f38992 | [
"MIT"
] | null | null | null | kats/models/stlf.py | sec-js/Kats | 01fbc4febd317dc641c6b0663dff36b545f38992 | [
"MIT"
] | 1 | 2021-09-21T15:41:13.000Z | 2021-09-21T15:41:13.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""STLF forecasting model
This model starts from decomposing the time series data with STL decomposition
then it fits individual foreasting model on the de-seasonalized components
it re-seasonalizes the forecasted results with seasonal data to produce the final
forecasting results.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import pandas as pd
import math
from copy import copy
import kats.models.model as m
from kats.consts import Params, TimeSeriesData
from typing import List, Dict
from kats.utils.decomposition import TimeSeriesDecomposition
from kats.models import (
linear_model,
prophet,
quadratic_model,
theta,
)
from kats.utils.parameter_tuning_utils import (
get_default_stlf_parameter_search_space
)
MODELS = ['prophet', "linear", "quadratic", "theta"]
class STLFParams(Params):
"""Parameter class for Prophet model
This is the parameter class for STLF model, stands for STL-decomposition based
forecasting model.
Attributes:
method: str, the forecasting model to fit on the de-seasonalized component
it currently supports prophet, linear, quadratic, and theta method.
m: int, the length of one seasonal cycle
"""
def __init__(self, method: str, m: int) -> None:
super().__init__()
if method not in MODELS:
msg = "Only support prophet, linear, quadratic and theta method, but get {name}.".format(
name=method
)
logging.error(msg)
raise ValueError(msg)
self.method = method
self.m = m
logging.debug("Initialized STFLParams instance.")
def validate_params(self):
"""Validate the parameters for STLF model
"""
logging.info("Method validate_params() is not implemented.")
pass
class STLFModel(m.Model):
"""Model class for STLF
This class provides fit, predict, and plot methods for STLF model
Attributes:
data: the input time series data as :class:`kats.consts.TimeSeriesData`
params: the parameter class defined with `STLFParams`
"""
def __init__(self, data: TimeSeriesData, params: STLFParams) -> None:
super().__init__(data, params)
if not isinstance(self.data.value, pd.Series):
msg = "Only support univariate time series, but get {type}.".format(
type=type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
self.n = self.data.value.shape[0]
if self.params.m > self.n:
msg = "The seasonality length m must be smaller than the length of time series"
logging.error(msg)
raise ValueError(msg)
def deseasonalize(self) -> TimeSeriesData:
"""De-seasonalize the time series data
Args:
None
Returns:
The seasonal and de-seasonalized data
"""
# create decomposer for time series decomposition
decomposer = TimeSeriesDecomposition(self.data, "multiplicative")
# pyre-fixme[16]: `STLFModel` has no attribute `decomp`.
self.decomp = decomposer.decomposer()
# pyre-fixme[16]: `STLFModel` has no attribute `sea_data`.
self.sea_data = copy(self.decomp["seasonal"])
# pyre-fixme[16]: `STLFModel` has no attribute `desea_data`.
self.desea_data = copy(self.data)
self.desea_data.value = self.desea_data.value / self.decomp["seasonal"].value
# pyre-fixme[7]: Expected `TimeSeriesData` but got `STLFModel`.
return self
def fit(self, **kwargs) -> None:
"""Fit STLF model
Args:
None
Returns:
The fitted STLF model object
"""
logging.debug("Call fit() with parameters. "
"kwargs:{kwargs}".format(
kwargs=kwargs
))
self.deseasonalize()
if self.params.method == "prophet":
params = prophet.ProphetParams()
model = prophet.ProphetModel(
# pyre-fixme[16]: `STLFModel` has no attribute `desea_data`.
data=self.desea_data,
params=params)
model.fit()
if self.params.method == "theta":
params = theta.ThetaParams(m=1)
model = theta.ThetaModel(
data=self.desea_data,
params=params)
model.fit()
if self.params.method == "linear":
params = linear_model.LinearModelParams()
model = linear_model.LinearModel(
data=self.desea_data,
params=params)
model.fit()
if self.params.method == "quadratic":
params = quadratic_model.QuadraticModelParams()
model = quadratic_model.QuadraticModel(
data=self.desea_data,
params=params)
model.fit()
# pyre-fixme[16]: `STLFModel` has no attribute `model`.
self.model = model
# pyre-fixme[7]: Expected `None` but got `Union[linear_model.LinearModel,
# prophet.ProphetModel, quadratic_model.QuadraticModel, theta.ThetaModel]`.
return model
# pyre-fixme[14]: `predict` overrides method defined in `Model` inconsistently.
def predict(self, steps: int, include_history=False, **kwargs) -> pd.DataFrame:
"""predict with the fitted STLF model
Args:
steps: the steps or length of prediction horizon
include_history: if include the historical data, default as False
Returns:
The predicted dataframe with following columns:
`time`, `fcst`, `fcst_lower`, and `fcst_upper`
"""
logging.debug("Call predict() with parameters. "
"steps:{steps}, kwargs:{kwargs}".format(
steps=steps, kwargs=kwargs
))
# pyre-fixme[16]: `STLFModel` has no attribute `include_history`.
self.include_history = include_history
# pyre-fixme[16]: `STLFModel` has no attribute `freq`.
# pyre-fixme[16]: `STLFModel` has no attribute `data`.
self.freq = kwargs.get("freq", pd.infer_freq(self.data.time))
# pyre-fixme[16]: `STLFModel` has no attribute `alpha`.
self.alpha = kwargs.get("alpha", 0.05)
# trend forecast
# pyre-fixme[16]: `STLFModel` has no attribute `model`.
fcst = self.model.predict(steps=steps, include_history=include_history)
# re-seasonalize
m = self.params.m
rep = math.trunc(1 + fcst.shape[0] / m)
# pyre-fixme[16]: `STLFModel` has no attribute `decomp`.
seasonality = self.decomp["seasonal"].value[-m:]
# pyre-fixme[16]: `STLFModel` has no attribute `y_fcst`.
self.y_fcst = fcst.fcst * np.tile(seasonality, rep)[:fcst.shape[0]]
if ("fcst_lower" in fcst.columns) and ("fcst_upper" in fcst.columns):
# pyre-fixme[16]: `STLFModel` has no attribute `fcst_lower`.
self.fcst_lower = fcst.fcst_lower * np.tile(seasonality, rep)[:fcst.shape[0]]
# pyre-fixme[16]: `STLFModel` has no attribute `fcst_upper`.
self.fcst_upper = fcst.fcst_upper * np.tile(seasonality, rep)[:fcst.shape[0]]
logging.info("Generated forecast data from STLF model.")
logging.debug("Forecast data: {fcst}".format(fcst=self.y_fcst))
# TODO: create empirical uncertainty interval
last_date = self.data.time.max()
dates = pd.date_range(start=last_date, periods=steps + 1, freq=self.freq)
# pyre-fixme[16]: `STLFModel` has no attribute `dates`.
self.dates = dates[dates != last_date] # Return correct number of periods
if include_history:
self.dates = np.concatenate((pd.to_datetime(self.data.time), self.dates))
# pyre-fixme[16]: `STLFModel` has no attribute `fcst_df`.
self.fcst_df = pd.DataFrame(
{
"time": self.dates,
"fcst": self.y_fcst,
"fcst_lower": self.fcst_lower,
"fcst_upper": self.fcst_upper
}
)
logging.debug("Return forecast data: {fcst_df}".format(fcst_df=self.fcst_df))
return self.fcst_df
def plot(self):
"""plot forecasted results from Prophet model
"""
logging.info("Generating chart for forecast result from STLF model.")
m.Model.plot(self.data, self.fcst_df, include_history=self.include_history)
def __str__(self):
"""AR net moddel as a string
Args:
None
Returns:
String representation of the model name
"""
return "STLF"
@staticmethod
def get_parameter_search_space() -> List[Dict[str, object]]:
"""Provide a parameter space for STLF model
Move the implementation of get_parameter_search_space() out of stlf
to keep HPT implementation tighter, and avoid the dependency conflict issue.
Args:
None
Returns:
List of dicts contains parameter search space
"""
# pyre-fixme[7]: Expected `List[Dict[str, object]]` but got `List[Dict[str,
# typing.Union[List[typing.Any], bool, str]]]`.
return get_default_stlf_parameter_search_space()
| 35.210332 | 101 | 0.621777 |
deb4f757632313217f20c90abf8c44505fbad81d | 3,703 | py | Python | tests/test_roi_pooling_conv.py | scilicet64/keras-spp | 23da20561fe92c585208af9bf3e0ef8f51bc5dcc | [
"MIT"
] | null | null | null | tests/test_roi_pooling_conv.py | scilicet64/keras-spp | 23da20561fe92c585208af9bf3e0ef8f51bc5dcc | [
"MIT"
] | null | null | null | tests/test_roi_pooling_conv.py | scilicet64/keras-spp | 23da20561fe92c585208af9bf3e0ef8f51bc5dcc | [
"MIT"
] | null | null | null | import pdb
import keras.backend as K
import numpy as np
from keras.layers import Input
from keras.models import Model
from spp.RoiPoolingConv import RoiPoolingConv
dim_ordering = K.image_data_format()
assert dim_ordering in {'channels_last','channels_first'}, 'dim_ordering must be in {channels_last,channels_first}'
pooling_regions = 2
num_rois = 4
num_channels = 12
if dim_ordering == 'channels_last':
in_img = Input(shape=(None, None, num_channels))
elif dim_ordering == 'channels_first':
in_img = Input(shape=(num_channels, None, None))
in_roi = Input(shape=(num_rois, 4))
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([in_img, in_roi])
model = Model([in_img, in_roi], out_roi_pool)
model.summary()
model.compile(loss='mse', optimizer='sgd')
for img_size in [32]:
if dim_ordering == 'channels_first':
X_img = np.random.rand(1, num_channels, img_size, img_size)
row_length = [float(X_img.shape[2]) / pooling_regions]
col_length = [float(X_img.shape[3]) / pooling_regions]
elif dim_ordering == 'channels_last':
X_img = np.random.rand(1, img_size, img_size, num_channels)
row_length = [float(X_img.shape[1]) / pooling_regions]
col_length = [float(X_img.shape[2]) / pooling_regions]
X_roi = np.array([[0, 0, img_size / 2, img_size / 2],
[0, img_size / 2, img_size / 2, img_size / 2],
[img_size / 2, 0, img_size / 2, img_size / 2],
[img_size / 2, img_size / 2, img_size / 2, img_size / 2]])
X_roi = np.reshape(X_roi, (1, num_rois, 4)).astype(int)
Y = model.predict([X_img, X_roi])
for roi in range(num_rois):
if dim_ordering == 'channels_first':
X_curr = X_img[0, :, X_roi[0, roi, 1]:X_roi[0, roi, 1] + X_roi[0, roi, 3],
X_roi[0, roi, 0]:X_roi[0, roi, 0] + X_roi[0, roi, 2]]
row_length = float(X_curr.shape[1]) / pooling_regions
col_length = float(X_curr.shape[2]) / pooling_regions
elif dim_ordering == 'channels_last':
X_curr = X_img[0, X_roi[0, roi, 1]:X_roi[0, roi, 1] + X_roi[0, roi, 3],
X_roi[0, roi, 0]:X_roi[0, roi, 0] + X_roi[0, roi, 2], :]
row_length = float(X_curr.shape[0]) / pooling_regions
col_length = float(X_curr.shape[1]) / pooling_regions
idx = 0
for ix in range(pooling_regions):
for jy in range(pooling_regions):
for cn in range(num_channels):
x1 = int((ix * col_length))
x2 = int((ix * col_length + col_length))
y1 = int((jy * row_length))
y2 = int((jy * row_length + row_length))
dx = max(1, x2 - x1)
dy = max(1, y2 - y1)
x2 = x1 + dx
y2 = y1 + dy
if dim_ordering == 'channels_first':
m_val = np.max(X_curr[cn, y1:y2, x1:x2])
if abs(m_val - Y[0, roi, cn, jy, ix]) > 0.01:
pdb.set_trace()
np.testing.assert_almost_equal(
m_val, Y[0, roi, cn, jy, ix], decimal=6)
idx += 1
elif dim_ordering == 'channels_last':
m_val = np.max(X_curr[y1:y2, x1:x2, cn])
if abs(m_val - Y[0, roi, jy, ix, cn]) > 0.01:
pdb.set_trace()
np.testing.assert_almost_equal(
m_val, Y[0, roi, jy, ix, cn], decimal=6)
idx += 1
print('Passed roi pooling test')
| 39.393617 | 115 | 0.544423 |
0c5bff14424b1a729de58c0df31ab775f6dbdf45 | 2,184 | py | Python | excursion/active_learning/approximations.py | leonoravesterbacka/excursion | 6716a90d8e34656e97fabe3f0cda95348a1711a5 | [
"Apache-2.0"
] | 2 | 2021-07-09T13:19:06.000Z | 2021-08-20T14:30:20.000Z | excursion/active_learning/approximations.py | leonoravesterbacka/excursion | 6716a90d8e34656e97fabe3f0cda95348a1711a5 | [
"Apache-2.0"
] | null | null | null | excursion/active_learning/approximations.py | leonoravesterbacka/excursion | 6716a90d8e34656e97fabe3f0cda95348a1711a5 | [
"Apache-2.0"
] | 4 | 2020-11-02T10:35:02.000Z | 2022-01-14T18:35:05.000Z | from scipy.linalg import cho_solve
from scipy.stats import norm
import gpytorch
import torch
import numpy as np
torch.cuda.set_device(0)
def h_normal_gpytorch(s):
""" Entropy of a normal distribution """
return torch.log(s * (2 * np.e * np.pi) ** 0.5)
def approx_mi_vec_gpytorch(mu, cov, thresholds):
# Expectation Propagation
mu1 = mu[:, 0]
std1 = cov[:, 0, 0] ** 0.5
mu2 = mu[:, 1]
std2 = cov[:, 1, 1] ** 0.5
rho = cov[:, 0, 1] / (std1 * std2)
std_sx = []
for j in range(len(thresholds) - 1):
alpha_j = (thresholds[j] - mu2) / std2
beta_j = (thresholds[j + 1] - mu2) / std2
alpha_j = alpha_j.detach().numpy()
beta_j = beta_j.detach().numpy()
c_j = norm.cdf(beta_j) - norm.cdf(alpha_j)
# \sigma(Y(X)|S(x')=j)
b_phi_b = beta_j * norm.pdf(beta_j)
b_phi_b[~np.isfinite(beta_j)] = 0.0
a_phi_a = alpha_j * norm.pdf(alpha_j)
a_phi_a[~np.isfinite(alpha_j)] = 0.0
alpha_j = torch.tensor(alpha_j)
beta_j = torch.tensor(beta_j)
mu_cond = mu1 - std1 * rho / torch.tensor(c_j) * (
torch.tensor(norm.pdf(beta_j)) - torch.tensor(norm.pdf(alpha_j))
)
var_cond = (
mu1 ** 2
- 2
* mu1
* std1
* (
rho
/ torch.tensor(c_j)
* (torch.tensor(norm.pdf(beta_j)) - torch.tensor(norm.pdf(alpha_j)))
)
+ std1 ** 2
* (
1.0
- (rho ** 2 / torch.tensor(c_j))
* (torch.tensor(b_phi_b) - torch.tensor(a_phi_a))
)
- mu_cond ** 2
)
std_sx_j = var_cond ** 0.5
std_sx.append(std_sx_j)
# Entropy
h = h_normal_gpytorch(std1)
for j in range(len(thresholds) - 1):
p_j = norm(mu2.detach().numpy(), std2.detach().numpy()).cdf(
thresholds[j + 1]
) - norm(mu2.detach().numpy(), std2.detach().numpy()).cdf(thresholds[j])
print("pj ", p_j)
dec = torch.tensor(p_j) * h_normal_gpytorch(std_sx[j])
h[p_j > 0.0] -= dec[p_j > 0.0]
return h
| 27.3 | 84 | 0.512363 |
630aaef30322b13378c5d4ff7b0688006e50c486 | 187 | py | Python | Beginner/odd_or_even.py | man21/IOSD-UIETKUK-HacktoberFest-Meetup-2019 | 8ca1a8bf95ee98d303d3a909c448288fa5992210 | [
"Apache-2.0"
] | 22 | 2019-10-02T16:48:10.000Z | 2020-11-14T23:28:41.000Z | Beginner/odd_or_even.py | man21/IOSD-UIETKUK-HacktoberFest-Meetup-2019 | 8ca1a8bf95ee98d303d3a909c448288fa5992210 | [
"Apache-2.0"
] | 46 | 2019-10-01T03:53:30.000Z | 2020-10-20T16:34:37.000Z | Beginner/odd_or_even.py | man21/IOSD-UIETKUK-HacktoberFest-Meetup-2019 | 8ca1a8bf95ee98d303d3a909c448288fa5992210 | [
"Apache-2.0"
] | 415 | 2019-10-01T03:48:22.000Z | 2021-02-27T04:57:28.000Z | a = int(input("enter integer :"))
if a == 0:
print("zero is neither even nor odd")
elif a % 2 == 0:
print("Given no. is even")
elif a % 2 == 1:
print("Given no. is odd")
| 20.777778 | 41 | 0.545455 |
cca075a91e7c1fdbe0d313c018a86148d7276d99 | 3,265 | py | Python | sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/_configuration.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/_configuration.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/_configuration.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class MicrosoftStorageSyncConfiguration(Configuration):
"""Configuration for MicrosoftStorageSync.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(MicrosoftStorageSyncConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-09-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-storagesync/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 45.347222 | 129 | 0.685145 |
aeda54abdc13d82a4672376ad86c45a5cbd74d1f | 415 | py | Python | nominatim/validators/validators.py | StefanMavrodiev/trackhub-nominatim | 56738b7cdc435afe04fe83517bf82cb9f1bf7c53 | [
"MIT"
] | null | null | null | nominatim/validators/validators.py | StefanMavrodiev/trackhub-nominatim | 56738b7cdc435afe04fe83517bf82cb9f1bf7c53 | [
"MIT"
] | null | null | null | nominatim/validators/validators.py | StefanMavrodiev/trackhub-nominatim | 56738b7cdc435afe04fe83517bf82cb9f1bf7c53 | [
"MIT"
] | null | null | null | from .country_code import CountryCode
from .country_exclude import CountryExclude
class Validators(object):
country_code = CountryCode.validate
country_exclude = CountryExclude.validate
@staticmethod
def validate(data: str):
"""
Run all validators
:param data:
:return:
"""
Validators.country_code(data)
Validators.country_exclude(data)
| 19.761905 | 45 | 0.66747 |
c85a95b507a665e98443ebd8458d4e1bde2c270a | 75,849 | py | Python | tools/common.py | Moustik06/cats-blender-plugin | ab17a14963c716db5c5cfdb71f3770776b0ddad4 | [
"MIT"
] | null | null | null | tools/common.py | Moustik06/cats-blender-plugin | ab17a14963c716db5c5cfdb71f3770776b0ddad4 | [
"MIT"
] | null | null | null | tools/common.py | Moustik06/cats-blender-plugin | ab17a14963c716db5c5cfdb71f3770776b0ddad4 | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2017 GiveMeAllYourCats
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Code author: GiveMeAllYourCats
# Repo: https://github.com/michaeldegroot/cats-blender-plugin
# Edits by: GiveMeAllYourCats, Hotox
import re
import bpy
import time
import bmesh
import platform
from math import degrees
from mathutils import Vector
from datetime import datetime
from html.parser import HTMLParser
from html.entities import name2codepoint
from . import common as Common
from . import supporter as Supporter
from . import decimation as Decimation
from . import translate as Translate
from . import armature_bones as Bones
from . import settings as Settings
from .register import register_wrap
from .translations import t
from mmd_tools_local import utils
from mmd_tools_local.panels import tool as mmd_tool
from mmd_tools_local.panels import util_tools as mmd_util_tools
from mmd_tools_local.panels import view_prop as mmd_view_prop
# TODO:
# - Add check if hips bone really needs to be rotated
# - Reset Pivot
# - Manual bone selection button for root bones
# - Checkbox for eye blinking/moving
# - Translate progress bar
def version_2_79_or_older():
return bpy.app.version < (2, 80)
def get_objects():
return bpy.context.scene.objects if version_2_79_or_older() else bpy.context.view_layer.objects
class SavedData:
__object_properties = {}
__active_object = None
def __init__(self):
# initialize as instance attributes rather than class attributes
self.__object_properties = {}
self.__active_object = None
for obj in get_objects():
mode = obj.mode
selected = is_selected(obj)
hidden = is_hidden(obj)
pose = None
if obj.type == 'ARMATURE':
pose = obj.data.pose_position
self.__object_properties[obj.name] = [mode, selected, hidden, pose]
active = get_active()
if active:
self.__active_object = active.name
def load(self, ignore=None, load_mode=True, load_select=True, load_hide=True, load_active=True, hide_only=False):
if not ignore:
ignore = []
if hide_only:
load_mode = False
load_select = False
load_active = False
for obj_name, values in self.__object_properties.items():
# print(obj_name, ignore)
if obj_name in ignore:
continue
obj = get_objects().get(obj_name)
if not obj:
continue
mode, selected, hidden, pose = values
# print(obj_name, mode, selected, hidden)
print(obj_name, pose)
if load_mode and obj.mode != mode:
set_active(obj, skip_sel=True)
switch(mode, check_mode=False)
if pose:
obj.data.pose_position = pose
if load_select:
select(obj, selected)
if load_hide:
hide(obj, hidden)
# Set the active object
if load_active and self.__active_object and get_objects().get(self.__active_object):
if self.__active_object not in ignore and self.__active_object != get_active():
set_active(get_objects().get(self.__active_object), skip_sel=True)
def get_armature(armature_name=None):
if not armature_name:
armature_name = bpy.context.scene.armature
for obj in get_objects():
if obj.type == 'ARMATURE':
if (armature_name and obj.name == armature_name) or not armature_name:
return obj
return None
def get_armature_objects():
armatures = []
for obj in get_objects():
if obj.type == 'ARMATURE':
armatures.append(obj)
return armatures
def get_top_parent(child):
if child.parent:
return get_top_parent(child.parent)
return child
def unhide_all_unnecessary():
# TODO: Documentation? What does "unnecessary" mean?
try:
bpy.ops.object.hide_view_clear()
except RuntimeError:
pass
for collection in bpy.data.collections:
collection.hide_select = False
collection.hide_viewport = False
def unhide_all():
for obj in get_objects():
hide(obj, False)
set_unselectable(obj, False)
if not version_2_79_or_older():
unhide_all_unnecessary()
def unhide_children(parent):
for child in parent.children:
hide(child, False)
set_unselectable(child, False)
unhide_children(child)
def unhide_all_of(obj_to_unhide=None):
if not obj_to_unhide:
return
top_parent = get_top_parent(obj_to_unhide)
hide(top_parent, False)
set_unselectable(top_parent, False)
unhide_children(top_parent)
def unselect_all():
for obj in get_objects():
select(obj, False)
def set_active(obj, skip_sel=False):
if not skip_sel:
select(obj)
if version_2_79_or_older():
bpy.context.scene.objects.active = obj
else:
bpy.context.view_layer.objects.active = obj
def get_active():
if version_2_79_or_older():
return bpy.context.scene.objects.active
return bpy.context.view_layer.objects.active
def select(obj, sel=True):
if sel:
hide(obj, False)
if version_2_79_or_older():
obj.select = sel
else:
obj.select_set(sel)
def is_selected(obj):
if version_2_79_or_older():
return obj.select
return obj.select_get()
def hide(obj, val=True):
if hasattr(obj, 'hide'):
obj.hide = val
if not version_2_79_or_older():
obj.hide_set(val)
def is_hidden(obj):
if version_2_79_or_older():
return obj.hide
return obj.hide_get()
def set_unselectable(obj, val=True):
obj.hide_select = val
def switch(new_mode, check_mode=True):
if check_mode and get_active() and get_active().mode == new_mode:
return
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode=new_mode, toggle=False)
def set_default_stage_old():
switch('OBJECT')
unhide_all()
unselect_all()
armature = get_armature()
set_active(armature)
return armature
def set_default_stage():
"""
Selects the armature, unhides everything and sets the modes of every object to object mode
:return: the armature
"""
# Remove rigidbody collections, as they cause issues if they are not in the view_layer
if not version_2_79_or_older() and bpy.context.scene.remove_rigidbodies_joints:
print('Collections:')
for collection in bpy.data.collections:
print(' ' + collection.name, collection.name.lower())
if 'rigidbody' in collection.name.lower():
print('DELETE')
for obj in collection.objects:
delete(obj)
bpy.data.collections.remove(collection)
unhide_all()
unselect_all()
for obj in get_objects():
set_active(obj)
switch('OBJECT')
if obj.type == 'ARMATURE':
# obj.data.pose_position = 'REST'
pass
select(obj, False)
armature = get_armature()
if armature:
set_active(armature)
if version_2_79_or_older():
armature.layers[0] = True
# Fix broken armatures
if not bpy.context.scene.armature:
bpy.context.scene.armature = armature.name
return armature
def apply_modifier(mod, as_shapekey=False):
if bpy.app.version < (2, 90):
bpy.ops.object.modifier_apply(apply_as='SHAPE' if as_shapekey else 'DATA', modifier=mod.name)
return
if as_shapekey:
bpy.ops.object.modifier_apply_as_shapekey(keep_modifier=False, modifier=mod.name)
else:
bpy.ops.object.modifier_apply(modifier=mod.name)
def remove_bone(find_bone):
armature = get_armature()
switch('EDIT')
for bone in armature.data.edit_bones:
if bone.name == find_bone:
armature.data.edit_bones.remove(bone)
def remove_empty():
armature = set_default_stage()
if armature.parent and armature.parent.type == 'EMPTY':
unselect_all()
set_active(armature.parent)
bpy.ops.object.delete(use_global=False)
unselect_all()
def get_bone_angle(p1, p2):
try:
ret = degrees((p1.head - p1.tail).angle(p2.head - p2.tail))
except ValueError:
ret = 0
return ret
def remove_unused_vertex_groups(ignore_main_bones=False):
remove_count = 0
unselect_all()
for mesh in get_meshes_objects(mode=2):
mesh.update_from_editmode()
vgroup_used = {i: False for i, k in enumerate(mesh.vertex_groups)}
for v in mesh.data.vertices:
for g in v.groups:
if g.weight > 0.0:
vgroup_used[g.group] = True
for i, used in sorted(vgroup_used.items(), reverse=True):
if not used:
if ignore_main_bones and mesh.vertex_groups[i].name in Bones.dont_delete_these_main_bones:
continue
mesh.vertex_groups.remove(mesh.vertex_groups[i])
remove_count += 1
return remove_count
def remove_unused_vertex_groups_of_mesh(mesh):
remove_count = 0
unselect_all()
mesh.update_from_editmode()
vgroup_used = {i: False for i, k in enumerate(mesh.vertex_groups)}
for v in mesh.data.vertices:
for g in v.groups:
if g.weight > 0.0:
vgroup_used[g.group] = True
for i, used in sorted(vgroup_used.items(), reverse=True):
if not used:
mesh.vertex_groups.remove(mesh.vertex_groups[i])
remove_count += 1
return remove_count
def find_center_vector_of_vertex_group(mesh, vertex_group):
data = mesh.data
verts = data.vertices
verts_in_group = []
for vert in verts:
i = vert.index
try:
if mesh.vertex_groups[vertex_group].weight(i) > 0:
verts_in_group.append(vert)
except RuntimeError:
# vertex is not in the group
pass
# Find the average vector point of the vertex cluster
divide_by = len(verts_in_group)
total = Vector()
if divide_by == 0:
return False
for vert in verts_in_group:
total += vert.co
average = total / divide_by
return average
def vertex_group_exists(mesh_name, bone_name):
mesh = get_objects()[mesh_name]
data = mesh.data
verts = data.vertices
for vert in verts:
i = vert.index
try:
mesh.vertex_groups[bone_name].weight(i)
return True
except:
pass
return False
def get_meshes(self, context):
# Modes:
# 0 = With Armature only
# 1 = Without armature only
# 2 = All meshes
choices = []
for mesh in get_meshes_objects(mode=0, check=False):
choices.append((mesh.name, mesh.name, mesh.name))
bpy.types.Object.Enum = sorted(choices, key=lambda x: tuple(x[0].lower()))
return bpy.types.Object.Enum
def get_top_meshes(self, context):
choices = []
for mesh in get_meshes_objects(mode=1, check=False):
choices.append((mesh.name, mesh.name, mesh.name))
bpy.types.Object.Enum = sorted(choices, key=lambda x: tuple(x[0].lower()))
return bpy.types.Object.Enum
def get_all_meshes(self, context):
choices = []
for mesh in get_meshes_objects(mode=2, check=False):
choices.append((mesh.name, mesh.name, mesh.name))
bpy.types.Object.Enum = sorted(choices, key=lambda x: tuple(x[0].lower()))
return bpy.types.Object.Enum
def get_armature_list(self, context):
choices = []
for armature in get_armature_objects():
# Set name displayed in list
name = armature.data.name
if name.startswith('Armature ('):
name = armature.name + ' (' + name.replace('Armature (', '')[:-1] + ')'
# 1. Will be returned by context.scene
# 2. Will be shown in lists
# 3. will be shown in the hover description (below description)
choices.append((armature.name, name, armature.name))
if len(choices) == 0:
choices.append(('None', 'None', 'None'))
bpy.types.Object.Enum = sorted(choices, key=lambda x: tuple(x[0].lower()))
return bpy.types.Object.Enum
def get_armature_merge_list(self, context):
choices = []
current_armature = context.scene.merge_armature_into
for armature in get_armature_objects():
if armature.name != current_armature:
# Set name displayed in list
name = armature.data.name
if name.startswith('Armature ('):
name = armature.name + ' (' + name.replace('Armature (', '')[:-1] + ')'
# 1. Will be returned by context.scene
# 2. Will be shown in lists
# 3. will be shown in the hover description (below description)
choices.append((armature.name, name, armature.name))
if len(choices) == 0:
choices.append(('None', 'None', 'None'))
bpy.types.Object.Enum = sorted(choices, key=lambda x: tuple(x[0].lower()))
return bpy.types.Object.Enum
def get_meshes_decimation(self, context):
choices = []
for object in bpy.context.scene.objects:
if object.type == 'MESH':
if object.parent and object.parent.type == 'ARMATURE' and object.parent.name == bpy.context.scene.armature:
if object.name in Decimation.ignore_meshes:
continue
# 1. Will be returned by context.scene
# 2. Will be shown in lists
# 3. will be shown in the hover description (below description)
choices.append((object.name, object.name, object.name))
bpy.types.Object.Enum = sorted(choices, key=lambda x: tuple(x[0].lower()))
return bpy.types.Object.Enum
def get_bones_head(self, context):
return get_bones(names=['Head'])
def get_bones_eye_l(self, context):
return get_bones(names=['Eye_L', 'EyeReturn_L'])
def get_bones_eye_r(self, context):
return get_bones(names=['Eye_R', 'EyeReturn_R'])
def get_bones_merge(self, context):
return get_bones(armature_name=bpy.context.scene.merge_armature_into)
# names - The first object will be the first one in the list. So the first one has to be the one that exists in the most models
def get_bones(names=None, armature_name=None, check_list=False):
if not names:
names = []
if not armature_name:
armature_name = bpy.context.scene.armature
choices = []
armature = get_armature(armature_name=armature_name)
if not armature:
bpy.types.Object.Enum = choices
return bpy.types.Object.Enum
# print("")
# print("START DEBUG UNICODE")
# print("")
for bone in armature.data.bones:
# print(bone.name)
try:
# 1. Will be returned by context.scene
# 2. Will be shown in lists
# 3. will be shown in the hover description (below description)
choices.append((bone.name, bone.name, bone.name))
except UnicodeDecodeError:
print("ERROR", bone.name)
choices.sort(key=lambda x: tuple(x[0].lower()))
choices2 = []
for name in names:
if name in armature.data.bones and choices[0][0] != name:
choices2.append((name, name, name))
if not check_list:
for choice in choices:
choices2.append(choice)
bpy.types.Object.Enum = choices2
return bpy.types.Object.Enum
def get_shapekeys_mouth_ah(self, context):
return get_shapekeys(context, ['MTH A', 'Ah', 'A'], True, False, False, False)
def get_shapekeys_mouth_oh(self, context):
return get_shapekeys(context, ['MTH U', 'Oh', 'O', 'Your'], True, False, False, False)
def get_shapekeys_mouth_ch(self, context):
return get_shapekeys(context, ['MTH I', 'Glue', 'Ch', 'I', 'There'], True, False, False, False)
def get_shapekeys_eye_blink_l(self, context):
return get_shapekeys(context, ['EYE Close L', 'Wink 2', 'Wink', 'Wink left', 'Wink Left', 'Blink (Left)', 'Blink', 'Basis'], False, False, False, False)
def get_shapekeys_eye_blink_r(self, context):
return get_shapekeys(context, ['EYE Close R', 'Wink 2 right', 'Wink 2 Right', 'Wink right 2', 'Wink Right 2', 'Wink right', 'Wink Right', 'Blink (Right)', 'Basis'], False, False, False, False)
def get_shapekeys_eye_low_l(self, context):
return get_shapekeys(context, ['Basis'], False, False, False, False)
def get_shapekeys_eye_low_r(self, context):
return get_shapekeys(context, ['Basis'], False, False, False, False)
def get_shapekeys_decimation(self, context):
return get_shapekeys(context,
['MTH A', 'Ah', 'A', 'MTH U', 'Oh', 'O', 'Your', 'MTH I', 'Glue', 'Ch', 'I', 'There', 'Wink 2', 'Wink', 'Wink left', 'Wink Left', 'Blink (Left)', 'Wink 2 right',
'EYE Close R', 'EYE Close L', 'Wink 2 Right', 'Wink right 2', 'Wink Right 2', 'Wink right', 'Wink Right', 'Blink (Right)', 'Blink'], False, True, True, False)
def get_shapekeys_decimation_list(self, context):
return get_shapekeys(context,
['MTH A', 'Ah', 'A', 'MTH U', 'Oh', 'O', 'Your', 'MTH I', 'Glue', 'Ch', 'I', 'There', 'Wink 2', 'Wink', 'Wink left', 'Wink Left', 'Blink (Left)', 'Wink 2 right',
'EYE Close R', 'EYE Close L', 'Wink 2 Right', 'Wink right 2', 'Wink Right 2', 'Wink right', 'Wink Right', 'Blink (Right)', 'Blink'], False, True, True, True)
# names - The first object will be the first one in the list. So the first one has to be the one that exists in the most models
# no_basis - If this is true the Basis will not be available in the list
def get_shapekeys(context, names, is_mouth, no_basis, decimation, return_list):
choices = []
choices_simple = []
meshes_list = get_meshes_objects(check=False)
if decimation:
meshes = meshes_list
elif meshes_list:
if is_mouth:
meshes = [get_objects().get(context.scene.mesh_name_viseme)]
else:
meshes = [get_objects().get(context.scene.mesh_name_eye)]
else:
bpy.types.Object.Enum = choices
return bpy.types.Object.Enum
for mesh in meshes:
if not mesh or not has_shapekeys(mesh):
bpy.types.Object.Enum = choices
return bpy.types.Object.Enum
for shapekey in mesh.data.shape_keys.key_blocks:
name = shapekey.name
if name in choices_simple:
continue
if no_basis and name == 'Basis':
continue
if decimation and name in Decimation.ignore_shapes:
continue
# 1. Will be returned by context.scene
# 2. Will be shown in lists
# 3. will be shown in the hover description (below description)
choices.append((name, name, name))
choices_simple.append(name)
choices.sort(key=lambda x: tuple(x[0].lower()))
choices2 = []
for name in names:
if name in choices_simple and len(choices) > 1 and choices[0][0] != name:
if decimation and name in Decimation.ignore_shapes:
continue
choices2.append((name, name, name))
for choice in choices:
choices2.append(choice)
bpy.types.Object.Enum = choices2
if return_list:
shape_list = []
for choice in choices2:
shape_list.append(choice[0])
return shape_list
return bpy.types.Object.Enum
def fix_armature_names(armature_name=None):
if not armature_name:
armature_name = bpy.context.scene.armature
base_armature = get_armature(armature_name=bpy.context.scene.merge_armature_into)
merge_armature = get_armature(armature_name=bpy.context.scene.merge_armature)
# Armature should be named correctly (has to be at the end because of multiple armatures)
armature = get_armature(armature_name=armature_name)
armature.name = 'Armature'
if not armature.data.name.startswith('Armature'):
Translate.update_dictionary(armature.data.name)
armature.data.name = 'Armature (' + Translate.translate(armature.data.name, add_space=True)[0] + ')'
# Reset the armature lists
try:
bpy.context.scene.armature = armature.name
except TypeError:
pass
try:
if base_armature:
bpy.context.scene.merge_armature_into = base_armature.name
except TypeError:
pass
try:
if merge_armature:
bpy.context.scene.merge_armature = merge_armature.name
except TypeError:
pass
def get_texture_sizes(self, context):
bpy.types.Object.Enum = [
("1024", "1024 (low)", "1024"),
("2048", "2048 (medium)", "2048"),
("4096", "4096 (high)", "4096")
]
return bpy.types.Object.Enum
def get_meshes_objects(armature_name=None, mode=0, check=True, visible_only=False):
# Modes:
# 0 = With armatures only
# 1 = Top level only
# 2 = All meshes
# 3 = Selected only
if not armature_name:
armature = get_armature()
if armature:
armature_name = armature.name
meshes = []
for ob in get_objects():
if ob.type == 'MESH':
if mode == 0 or mode == 5:
if ob.parent:
if ob.parent.type == 'ARMATURE' and ob.parent.name == armature_name:
meshes.append(ob)
elif ob.parent.parent and ob.parent.parent.type == 'ARMATURE' and ob.parent.parent.name == armature_name:
meshes.append(ob)
elif mode == 1:
if not ob.parent:
meshes.append(ob)
elif mode == 2:
meshes.append(ob)
elif mode == 3:
if is_selected(ob):
meshes.append(ob)
if visible_only:
for mesh in meshes:
if is_hidden(mesh):
meshes.remove(mesh)
# Check for broken meshes and delete them
if check:
current_active = get_active()
to_remove = []
for mesh in meshes:
selected = is_selected(mesh)
# print(mesh.name, mesh.users)
set_active(mesh)
if not get_active():
to_remove.append(mesh)
if not selected:
select(mesh, False)
for mesh in to_remove:
print('DELETED CORRUPTED MESH:', mesh.name, mesh.users)
meshes.remove(mesh)
delete(mesh)
if current_active:
set_active(current_active)
return meshes
def join_meshes(armature_name=None, mode=0, apply_transformations=True, repair_shape_keys=True):
# Modes:
# 0 - Join all meshes
# 1 - Join selected only
if not armature_name:
armature_name = bpy.context.scene.armature
# Get meshes to join
meshes_to_join = get_meshes_objects(armature_name=armature_name, mode=3 if mode == 1 else 0)
if not meshes_to_join:
reset_context_scenes()
return None
set_default_stage()
unselect_all()
if apply_transformations:
apply_transforms(armature_name=armature_name)
unselect_all()
# Apply existing decimation modifiers and select the meshes for joining
for mesh in meshes_to_join:
set_active(mesh)
# Apply decimation modifiers
for mod in mesh.modifiers:
if mod.type == 'DECIMATE':
if mod.decimate_type == 'COLLAPSE' and mod.ratio == 1:
mesh.modifiers.remove(mod)
continue
if mod.decimate_type == 'UNSUBDIV' and mod.iterations == 0:
mesh.modifiers.remove(mod)
continue
if has_shapekeys(mesh):
bpy.ops.object.shape_key_remove(all=True)
apply_modifier(mod)
elif mod.type == 'SUBSURF':
mesh.modifiers.remove(mod)
elif mod.type == 'MIRROR':
if not has_shapekeys(mesh):
apply_modifier(mod)
# Standardize UV maps name
if version_2_79_or_older():
if mesh.data.uv_textures:
mesh.data.uv_textures[0].name = 'UVMap'
for mat_slot in mesh.material_slots:
if mat_slot and mat_slot.material:
for tex_slot in mat_slot.material.texture_slots:
if tex_slot and tex_slot.texture and tex_slot.texture_coords == 'UV':
tex_slot.uv_layer = 'UVMap'
else:
if mesh.data.uv_layers:
mesh.data.uv_layers[0].name = 'UVMap'
# Get the name of the active mesh in order to check if it was deleted later
active_mesh_name = get_active().name
# Join the meshes
if bpy.ops.object.join.poll():
bpy.ops.object.join()
else:
print('NO MESH COMBINED!')
# Delete meshes that somehow weren't deleted. Both pre and post join mesh deletion methods are needed!
for mesh in get_meshes_objects(armature_name=armature_name):
if mesh.name == active_mesh_name:
set_active(mesh)
elif mesh.name in meshes_to_join:
delete(mesh)
print('DELETED', mesh.name, mesh.users)
# Rename result to Body and correct modifiers
mesh = get_active()
if mesh:
# If its the only mesh in the armature left, rename it to Body
if len(get_meshes_objects(armature_name=armature_name)) == 1:
mesh.name = 'Body'
mesh.parent_type = 'OBJECT'
repair_mesh(mesh, armature_name)
if repair_shape_keys:
repair_shapekey_order(mesh.name)
reset_context_scenes()
# Update the material list of the Material Combiner
update_material_list()
return mesh
def repair_mesh(mesh, armature_name):
mesh.parent_type = 'OBJECT'
# Remove duplicate armature modifiers
mod_count = 0
for mod in mesh.modifiers:
mod.show_expanded = False
if mod.type == 'ARMATURE':
mod_count += 1
if mod_count > 1:
bpy.ops.object.modifier_remove(modifier=mod.name)
continue
mod.object = get_armature(armature_name=armature_name)
mod.show_viewport = True
# Add armature mod if there is none
if mod_count == 0:
mod = mesh.modifiers.new("Armature", 'ARMATURE')
mod.object = get_armature(armature_name=armature_name)
def apply_transforms(armature_name=None):
if not armature_name:
armature_name = bpy.context.scene.armature
armature = get_armature(armature_name=armature_name)
# Apply transforms on armature
unselect_all()
set_active(armature)
switch('OBJECT')
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
# Apply transforms of meshes
for mesh in get_meshes_objects(armature_name=armature_name):
unselect_all()
set_active(mesh)
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
def apply_all_transforms():
def apply_transforms_with_children(parent):
unselect_all()
set_active(parent)
switch('OBJECT')
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
for child in parent.children:
apply_transforms_with_children(child)
for obj in get_objects():
if not obj.parent:
apply_transforms_with_children(obj)
def reset_transforms(armature_name=None):
if not armature_name:
armature_name = bpy.context.scene.armature
armature = get_armature(armature_name=armature_name)
# Reset transforms on armature
for i in range(0, 3):
armature.location[i] = 0
armature.rotation_euler[i] = 0
armature.scale[i] = 1
# Apply transforms of meshes
for mesh in get_meshes_objects(armature_name=armature_name):
for i in range(0, 3):
mesh.location[i] = 0
mesh.rotation_euler[i] = 0
mesh.scale[i] = 1
def separate_by_materials(context, mesh):
prepare_separation(mesh)
utils.separateByMaterials(mesh)
for ob in context.selected_objects:
if ob.type == 'MESH':
hide(ob, False)
clean_shapekeys(ob)
utils.clearUnusedMeshes()
# Update the material list of the Material Combiner
update_material_list()
def separate_by_loose_parts(context, mesh):
prepare_separation(mesh)
# Correctly put mesh together. This is done to prevent extremely small pieces.
# This essentially does nothing but merges the extremely small parts together.
remove_doubles(mesh, 0, save_shapes=True)
utils.separateByMaterials(mesh)
meshes = []
for ob in context.selected_objects:
if ob.type == 'MESH':
hide(ob, False)
meshes.append(ob)
wm = bpy.context.window_manager
current_step = 0
wm.progress_begin(current_step, len(meshes))
for mesh in meshes:
unselect_all()
set_active(mesh)
bpy.ops.mesh.separate(type='LOOSE')
meshes2 = []
for ob in context.selected_objects:
if ob.type == 'MESH':
meshes2.append(ob)
## This crashes blender, but would be better
# unselect_all()
# for mesh2 in meshes2:
# if len(mesh2.data.vertices) <= 3:
# select(mesh2)
# elif bpy.ops.object.join.poll():
# bpy.ops.object.join()
# unselect_all()
for mesh2 in meshes2:
clean_shapekeys(mesh2)
current_step += 1
wm.progress_update(current_step)
wm.progress_end()
utils.clearUnusedMeshes()
# Update the material list of the Material Combiner
update_material_list()
def separate_by_shape_keys(context, mesh):
prepare_separation(mesh)
switch('EDIT')
bpy.ops.mesh.select_mode(type="VERT")
bpy.ops.mesh.select_all(action='DESELECT')
switch('OBJECT')
selected_count = 0
max_count = 0
if has_shapekeys(mesh):
for kb in mesh.data.shape_keys.key_blocks:
for i, (v0, v1) in enumerate(zip(kb.relative_key.data, kb.data)):
max_count += 1
if v0.co != v1.co:
mesh.data.vertices[i].select = True
selected_count += 1
if not selected_count or selected_count == max_count:
return False
switch('EDIT')
bpy.ops.mesh.select_all(action='INVERT')
bpy.ops.mesh.separate(type='SELECTED')
for ob in context.selected_objects:
if ob.type == 'MESH':
if ob != get_active():
print('not active', ob.name)
active_tmp = get_active()
ob.name = ob.name.replace('.001', '') + '.no_shapes'
set_active(ob)
bpy.ops.object.shape_key_remove(all=True)
set_active(active_tmp)
select(ob, False)
else:
print('active', ob.name)
clean_shapekeys(ob)
switch('OBJECT')
utils.clearUnusedMeshes()
# Update the material list of the Material Combiner
update_material_list()
return True
def separate_by_cats_protection(context, mesh):
prepare_separation(mesh)
switch('EDIT')
bpy.ops.mesh.select_mode(type="VERT")
bpy.ops.mesh.select_all(action='DESELECT')
switch('OBJECT')
selected_count = 0
max_count = 0
if has_shapekeys(mesh):
for kb in mesh.data.shape_keys.key_blocks:
if kb.name == 'Basis Original':
for i, (v0, v1) in enumerate(zip(kb.relative_key.data, kb.data)):
max_count += 1
if v0.co != v1.co:
mesh.data.vertices[i].select = True
selected_count += 1
if not selected_count or selected_count == max_count:
return False
switch('EDIT')
bpy.ops.mesh.select_all(action='INVERT')
bpy.ops.mesh.separate(type='SELECTED')
for ob in context.selected_objects:
if ob.type == 'MESH':
if ob != get_active():
print('not active', ob.name)
active_tmp = get_active()
ob.name = ob.name.replace('.001', '') + '.no_shapes'
set_active(ob)
bpy.ops.object.shape_key_remove(all=True)
set_active(active_tmp)
select(ob, False)
else:
print('active', ob.name)
clean_shapekeys(ob)
switch('OBJECT')
utils.clearUnusedMeshes()
# Update the material list of the Material Combiner
update_material_list()
return True
def prepare_separation(mesh):
set_default_stage()
unselect_all()
# Remove Rigidbodies and joints
if bpy.context.scene.remove_rigidbodies_joints:
for obj in get_objects():
if 'rigidbodies' in obj.name or 'joints' in obj.name:
delete_hierarchy(obj)
save_shapekey_order(mesh.name)
set_active(mesh)
for mod in mesh.modifiers:
if mod.type == 'DECIMATE':
mesh.modifiers.remove(mod)
else:
mod.show_expanded = False
clean_material_names(mesh)
def clean_shapekeys(mesh):
# Remove empty shapekeys
if has_shapekeys(mesh):
for kb in mesh.data.shape_keys.key_blocks:
if can_remove_shapekey(kb):
mesh.shape_key_remove(kb)
if len(mesh.data.shape_keys.key_blocks) == 1:
mesh.shape_key_remove(mesh.data.shape_keys.key_blocks[0])
def can_remove_shapekey(key_block):
if 'mmd_' in key_block.name:
return True
if key_block.relative_key == key_block:
return False # Basis
for v0, v1 in zip(key_block.relative_key.data, key_block.data):
if v0.co != v1.co:
return False
return True
def separate_by_verts():
for obj in bpy.context.selected_objects:
if obj.type == 'MESH' and len(obj.vertex_groups) > 0:
Common.set_active(obj)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='VERT')
for vgroup in obj.vertex_groups:
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.vertex_group_set_active(group=vgroup.name)
bpy.ops.object.vertex_group_select()
bpy.ops.mesh.separate(type='SELECTED')
bpy.ops.object.mode_set(mode='OBJECT')
def reset_context_scenes():
head_bones = get_bones_head(None, bpy.context)
if len(head_bones) > 0:
bpy.context.scene.head = head_bones[0][0]
bpy.context.scene.eye_left = get_bones_eye_l(None, bpy.context)[0][0]
bpy.context.scene.eye_right = get_bones_eye_r(None, bpy.context)[0][0]
meshes = get_meshes(None, bpy.context)
if len(meshes) > 0:
mesh = meshes[0][0]
if not bpy.context.scene.mesh_name_eye:
bpy.context.scene.mesh_name_eye = mesh
if not bpy.context.scene.mesh_name_viseme:
bpy.context.scene.mesh_name_viseme = mesh
if not bpy.context.scene.merge_mesh:
bpy.context.scene.merge_mesh = mesh
def save_shapekey_order(mesh_name):
mesh = get_objects()[mesh_name]
armature = get_armature()
if not armature:
return
# Get current custom data
custom_data = armature.get('CUSTOM')
if not custom_data:
# print('NEW DATA!')
custom_data = {}
# Create shapekey order
shape_key_order = []
if has_shapekeys(mesh):
for index, shapekey in enumerate(mesh.data.shape_keys.key_blocks):
shape_key_order.append(shapekey.name)
# Check if there is already a shapekey order
if custom_data.get('shape_key_order'):
# print('SHAPEKEY ORDER ALREADY EXISTS!')
# print(custom_data['shape_key_order'])
old_len = len(custom_data.get('shape_key_order'))
if type(shape_key_order) is str:
old_len = len(shape_key_order.split(',,,'))
if len(shape_key_order) <= old_len:
# print('ABORT')
return
# Save order to custom data
# print('SAVE NEW ORDER')
custom_data['shape_key_order'] = shape_key_order
# Save custom data in armature
armature['CUSTOM'] = custom_data
# print(armature.get('CUSTOM').get('shape_key_order'))
def repair_shapekey_order(mesh_name):
# Get current custom data
armature = get_armature()
custom_data = armature.get('CUSTOM')
if not custom_data:
custom_data = {}
# Extract shape keys from string
shape_key_order = custom_data.get('shape_key_order')
if not shape_key_order:
custom_data['shape_key_order'] = []
armature['CUSTOM'] = custom_data
if type(shape_key_order) is str:
shape_key_order_temp = []
for shape_name in shape_key_order.split(',,,'):
shape_key_order_temp.append(shape_name)
custom_data['shape_key_order'] = shape_key_order_temp
armature['CUSTOM'] = custom_data
sort_shape_keys(mesh_name, custom_data['shape_key_order'])
def update_shapekey_orders():
for armature in get_armature_objects():
shape_key_order_translated = []
# Get current custom data
custom_data = armature.get('CUSTOM')
if not custom_data:
continue
order = custom_data.get('shape_key_order')
if not order:
continue
if type(order) is str:
shape_key_order_temp = order.split(',,,')
order = []
for shape_name in shape_key_order_temp:
order.append(shape_name)
# Get shape keys and translate them
for shape_name in order:
shape_key_order_translated.append(Translate.translate(shape_name, add_space=True, translating_shapes=True)[0])
# print(armature.name, shape_key_order_translated)
custom_data['shape_key_order'] = shape_key_order_translated
armature['CUSTOM'] = custom_data
def sort_shape_keys(mesh_name, shape_key_order=None):
mesh = get_objects()[mesh_name]
if not has_shapekeys(mesh):
return
set_active(mesh)
if not shape_key_order:
shape_key_order = []
order = [
'Basis',
'vrc.blink_left',
'vrc.blink_right',
'vrc.lowerlid_left',
'vrc.lowerlid_right',
'vrc.v_aa',
'vrc.v_ch',
'vrc.v_dd',
'vrc.v_e',
'vrc.v_ff',
'vrc.v_ih',
'vrc.v_kk',
'vrc.v_nn',
'vrc.v_oh',
'vrc.v_ou',
'vrc.v_pp',
'vrc.v_rr',
'vrc.v_sil',
'vrc.v_ss',
'vrc.v_th',
'Basis Original'
]
for shape in shape_key_order:
if shape not in order:
order.append(shape)
wm = bpy.context.window_manager
current_step = 0
wm.progress_begin(current_step, len(order))
i = 0
for name in order:
if name == 'Basis' and 'Basis' not in mesh.data.shape_keys.key_blocks:
i += 1
current_step += 1
wm.progress_update(current_step)
continue
for index, shapekey in enumerate(mesh.data.shape_keys.key_blocks):
if shapekey.name == name:
mesh.active_shape_key_index = index
new_index = i
index_diff = (index - new_index)
if new_index >= len(mesh.data.shape_keys.key_blocks):
bpy.ops.object.shape_key_move(type='BOTTOM')
break
position_correct = False
if 0 <= index_diff <= (new_index - 1):
while position_correct is False:
if mesh.active_shape_key_index != new_index:
bpy.ops.object.shape_key_move(type='UP')
else:
position_correct = True
else:
if mesh.active_shape_key_index > new_index:
bpy.ops.object.shape_key_move(type='TOP')
position_correct = False
while position_correct is False:
if mesh.active_shape_key_index != new_index:
bpy.ops.object.shape_key_move(type='DOWN')
else:
position_correct = True
i += 1
break
current_step += 1
wm.progress_update(current_step)
mesh.active_shape_key_index = 0
wm.progress_end()
def isEmptyGroup(group_name):
mesh = get_objects().get('Body')
if mesh is None:
return True
vgroup = mesh.vertex_groups.get(group_name)
if vgroup is None:
return True
for vert in mesh.data.vertices:
for group in vert.groups:
if group.group == vgroup.index:
if group.weight > 0:
return False
return True
def removeEmptyGroups(obj, thres=0):
z = []
for v in obj.data.vertices:
for g in v.groups:
if g.weight > thres:
if g not in z:
z.append(obj.vertex_groups[g.group])
for r in obj.vertex_groups:
if r not in z:
obj.vertex_groups.remove(r)
def removeZeroVerts(obj, thres=0):
for v in obj.data.vertices:
z = []
for g in v.groups:
if not g.weight > thres:
z.append(g)
for r in z:
obj.vertex_groups[g.group].remove([v.index])
def delete_hierarchy(parent):
unselect_all()
to_delete = []
def get_child_names(obj):
for child in obj.children:
to_delete.append(child)
if child.children:
get_child_names(child)
get_child_names(parent)
to_delete.append(parent)
objs = bpy.data.objects
for obj in to_delete:
objs.remove(objs[obj.name], do_unlink=True)
def delete(obj):
if obj.parent:
for child in obj.children:
child.parent = obj.parent
objs = bpy.data.objects
objs.remove(objs[obj.name], do_unlink=True)
def days_between(d1, d2, time_format):
d1 = datetime.strptime(d1, time_format)
d2 = datetime.strptime(d2, time_format)
return abs((d2 - d1).days)
def delete_bone_constraints(armature_name=None):
if not armature_name:
armature_name = bpy.context.scene.armature
armature = get_armature(armature_name=armature_name)
switch('POSE')
for bone in armature.pose.bones:
if len(bone.constraints) > 0:
for constraint in bone.constraints:
bone.constraints.remove(constraint)
switch('EDIT')
def delete_zero_weight(armature_name=None, ignore=''):
if not armature_name:
armature_name = bpy.context.scene.armature
armature = get_armature(armature_name=armature_name)
switch('EDIT')
bone_names_to_work_on = set([bone.name for bone in armature.data.edit_bones])
bone_name_to_edit_bone = dict()
for edit_bone in armature.data.edit_bones:
bone_name_to_edit_bone[edit_bone.name] = edit_bone
vertex_group_names_used = set()
vertex_group_name_to_objects_having_same_named_vertex_group = dict()
for objects in get_meshes_objects(armature_name=armature_name):
vertex_group_id_to_vertex_group_name = dict()
for vertex_group in objects.vertex_groups:
vertex_group_id_to_vertex_group_name[vertex_group.index] = vertex_group.name
if vertex_group.name not in vertex_group_name_to_objects_having_same_named_vertex_group:
vertex_group_name_to_objects_having_same_named_vertex_group[vertex_group.name] = set()
vertex_group_name_to_objects_having_same_named_vertex_group[vertex_group.name].add(objects)
for vertex in objects.data.vertices:
for group in vertex.groups:
if group.weight > 0:
vertex_group_names_used.add(vertex_group_id_to_vertex_group_name.get(group.group))
not_used_bone_names = bone_names_to_work_on - vertex_group_names_used
count = 0
for bone_name in not_used_bone_names:
if not bpy.context.scene.keep_end_bones or not is_end_bone(bone_name, armature_name):
if bone_name not in Bones.dont_delete_these_bones and 'Root_' not in bone_name and bone_name != ignore:
armature.data.edit_bones.remove(bone_name_to_edit_bone[bone_name]) # delete bone
count += 1
if bone_name in vertex_group_name_to_objects_having_same_named_vertex_group:
for objects in vertex_group_name_to_objects_having_same_named_vertex_group[bone_name]: # delete vertex groups
vertex_group = objects.vertex_groups.get(bone_name)
if vertex_group is not None:
objects.vertex_groups.remove(vertex_group)
return count
def remove_unused_objects():
default_scene_objects = []
for obj in get_objects():
if (obj.type == 'CAMERA' and obj.name == 'Camera') \
or (obj.type == 'LAMP' and obj.name == 'Lamp') \
or (obj.type == 'LIGHT' and obj.name == 'Light') \
or (obj.type == 'MESH' and obj.name == 'Cube'):
default_scene_objects.append(obj)
if len(default_scene_objects) == 3:
for obj in default_scene_objects:
delete_hierarchy(obj)
def remove_no_user_objects():
# print('\nREMOVE OBJECTS')
for block in get_objects():
# print(block.name, block.users)
if block.users == 0:
print('Removing obj ', block.name)
delete(block)
# print('\nREMOVE MESHES')
for block in bpy.data.meshes:
# print(block.name, block.users)
if block.users == 0:
print('Removing mesh ', block.name)
bpy.data.meshes.remove(block)
# print('\nREMOVE MATERIALS')
for block in bpy.data.materials:
# print(block.name, block.users)
if block.users == 0:
print('Removing material ', block.name)
bpy.data.materials.remove(block)
# print('\nREMOVE MATS')
# for block in bpy.data.materials:
# print(block.name, block.users)
# if block.users == 0:
# bpy.data.materials.remove(block)
def is_end_bone(name, armature_name):
armature = get_armature(armature_name=armature_name)
end_bone = armature.data.edit_bones.get(name)
if end_bone and end_bone.parent and len(end_bone.parent.children) == 1:
return True
return False
def correct_bone_positions(armature_name=None):
if not armature_name:
armature_name = bpy.context.scene.armature
armature = get_armature(armature_name=armature_name)
upper_chest = armature.data.edit_bones.get('Upper Chest')
chest = armature.data.edit_bones.get('Chest')
neck = armature.data.edit_bones.get('Neck')
head = armature.data.edit_bones.get('Head')
if chest and neck:
if upper_chest and bpy.context.scene.keep_upper_chest:
chest.tail = upper_chest.head
upper_chest.tail = neck.head
else:
chest.tail = neck.head
if neck and head:
neck.tail = head.head
if 'Left shoulder' in armature.data.edit_bones:
if 'Left arm' in armature.data.edit_bones:
if 'Left elbow' in armature.data.edit_bones:
if 'Left wrist' in armature.data.edit_bones:
shoulder = armature.data.edit_bones.get('Left shoulder')
arm = armature.data.edit_bones.get('Left arm')
elbow = armature.data.edit_bones.get('Left elbow')
wrist = armature.data.edit_bones.get('Left wrist')
shoulder.tail = arm.head
arm.tail = elbow.head
elbow.tail = wrist.head
if 'Right shoulder' in armature.data.edit_bones:
if 'Right arm' in armature.data.edit_bones:
if 'Right elbow' in armature.data.edit_bones:
if 'Right wrist' in armature.data.edit_bones:
shoulder = armature.data.edit_bones.get('Right shoulder')
arm = armature.data.edit_bones.get('Right arm')
elbow = armature.data.edit_bones.get('Right elbow')
wrist = armature.data.edit_bones.get('Right wrist')
shoulder.tail = arm.head
arm.tail = elbow.head
elbow.tail = wrist.head
if 'Left leg' in armature.data.edit_bones:
if 'Left knee' in armature.data.edit_bones:
if 'Left ankle' in armature.data.edit_bones:
leg = armature.data.edit_bones.get('Left leg')
knee = armature.data.edit_bones.get('Left knee')
ankle = armature.data.edit_bones.get('Left ankle')
if 'Left leg 2' in armature.data.edit_bones:
leg = armature.data.edit_bones.get('Left leg 2')
leg.tail = knee.head
knee.tail = ankle.head
if 'Right leg' in armature.data.edit_bones:
if 'Right knee' in armature.data.edit_bones:
if 'Right ankle' in armature.data.edit_bones:
leg = armature.data.edit_bones.get('Right leg')
knee = armature.data.edit_bones.get('Right knee')
ankle = armature.data.edit_bones.get('Right ankle')
if 'Right leg 2' in armature.data.edit_bones:
leg = armature.data.edit_bones.get('Right leg 2')
leg.tail = knee.head
knee.tail = ankle.head
dpi_scale = 3
error = []
override = False
def show_error(scale, error_list, override_header=False):
global override, dpi_scale, error
override = override_header
dpi_scale = scale
if type(error_list) is str:
error_list = error_list.split('\n')
error = error_list
header = t('ShowError.label')
if override:
header = error_list[0]
ShowError.bl_label = header
try:
bpy.utils.register_class(ShowError)
except ValueError:
bpy.utils.unregister_class(ShowError)
bpy.utils.register_class(ShowError)
bpy.ops.cats_common.show_error('INVOKE_DEFAULT')
print('')
print('Report: Error')
for line in error:
print(' ' + line)
@register_wrap
class ShowError(bpy.types.Operator):
bl_idname = 'cats_common.show_error'
bl_label = t('ShowError.label')
def execute(self, context):
return {'FINISHED'}
def invoke(self, context, event):
dpi_value = Common.get_user_preferences().system.dpi
return context.window_manager.invoke_props_dialog(self, width=int(dpi_value * dpi_scale))
def draw(self, context):
if not error or len(error) == 0:
return
if override and len(error) == 1:
return
layout = self.layout
col = layout.column(align=True)
first_line = False
for i, line in enumerate(error):
if i == 0 and override:
continue
if line == '':
col.separator()
else:
row = col.row(align=True)
row.scale_y = 0.85
if not first_line:
row.label(text=line, icon='ERROR')
first_line = True
else:
row.label(text=line, icon_value=Supporter.preview_collections["custom_icons"]["empty"].icon_id)
def remove_doubles(mesh, threshold, save_shapes=True):
if not mesh:
return 0
# If the mesh has no shapekeys, don't remove doubles
if not has_shapekeys(mesh) or len(mesh.data.shape_keys.key_blocks) == 1:
return 0
pre_tris = len(mesh.data.polygons)
set_active(mesh)
switch('EDIT')
bpy.ops.mesh.select_mode(type="VERT")
bpy.ops.mesh.select_all(action='DESELECT')
if save_shapes and has_shapekeys(mesh):
switch('OBJECT')
for kb in mesh.data.shape_keys.key_blocks:
i = 0
for v0, v1 in zip(kb.relative_key.data, kb.data):
if v0.co != v1.co:
mesh.data.vertices[i].select = True
i += 1
switch('EDIT')
bpy.ops.mesh.select_all(action='INVERT')
else:
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles(threshold=threshold)
bpy.ops.mesh.select_all(action='DESELECT')
switch('OBJECT')
return pre_tris - len(mesh.data.polygons)
def get_tricount(obj):
# Triangulates with Bmesh to avoid messing with the original geometry
bmesh_mesh = bmesh.new()
bmesh_mesh.from_mesh(obj.data)
bmesh.ops.triangulate(bmesh_mesh, faces=bmesh_mesh.faces[:])
return len(bmesh_mesh.faces)
def get_bone_orientations(armature):
x_cord = 0
y_cord = 1
z_cord = 2
fbx = False
# armature = get_armature()
#
# for index, bone in enumerate(armature.pose.bones):
# if 'Head' in bone.name:
# #if index == 5:
# bone_pos = bone.matrix
# print(bone_pos)
# world_pos = armature.matrix_world * bone.matrix
# print(world_pos)
# print(bone_pos[0][0], world_pos[0][0])
# if round(abs(bone_pos[0][0]), 4) != round(abs(world_pos[0][0]), 4):
# z_cord = 1
# y_cord = 2
# fbx = True
# break
return x_cord, y_cord, z_cord, fbx
def clean_material_names(mesh):
for j, mat in enumerate(mesh.material_slots):
if mat.name.endswith('.001'):
mesh.active_material_index = j
mesh.active_material.name = mat.name[:-4]
if mat.name.endswith(('. 001', ' .001')):
mesh.active_material_index = j
mesh.active_material.name = mat.name[:-5]
def mix_weights(mesh, vg_from, vg_to, mix_strength=1.0, mix_mode='ADD', delete_old_vg=True):
mesh.active_shape_key_index = 0
mod = mesh.modifiers.new("VertexWeightMix", 'VERTEX_WEIGHT_MIX')
mod.vertex_group_a = vg_to
mod.vertex_group_b = vg_from
mod.mix_mode = mix_mode
mod.mix_set = 'B'
mod.mask_constant = mix_strength
apply_modifier(mod)
if delete_old_vg:
mesh.vertex_groups.remove(mesh.vertex_groups.get(vg_from))
mesh.active_shape_key_index = 0 # This line fixes a visual bug in 2.80 which causes random weights to be stuck after being merged
def get_user_preferences():
return bpy.context.user_preferences if hasattr(bpy.context, 'user_preferences') else bpy.context.preferences
def has_shapekeys(mesh):
if not hasattr(mesh.data, 'shape_keys'):
return False
return hasattr(mesh.data.shape_keys, 'key_blocks')
def matmul(a, b):
if version_2_79_or_older():
return a * b
return a @ b
def ui_refresh():
# A way to refresh the ui
refreshed = False
while not refreshed:
if hasattr(bpy.data, 'window_managers'):
for windowManager in bpy.data.window_managers:
for window in windowManager.windows:
for area in window.screen.areas:
area.tag_redraw()
refreshed = True
# print('Refreshed UI')
else:
time.sleep(0.5)
def fix_zero_length_bones(armature, x_cord, y_cord, z_cord):
pre_mode = armature.mode
set_active(armature)
switch('EDIT')
for bone in armature.data.edit_bones:
if round(bone.head[x_cord], 4) == round(bone.tail[x_cord], 4) \
and round(bone.head[y_cord], 4) == round(bone.tail[y_cord], 4) \
and round(bone.head[z_cord], 4) == round(bone.tail[z_cord], 4):
bone.tail[z_cord] += 0.1
switch(pre_mode)
def fix_bone_orientations(armature):
# Connect all bones with their children if they have exactly one
for bone in armature.data.edit_bones:
if len(bone.children) == 1 and bone.name not in ['LeftEye', 'RightEye', 'Head', 'Hips']:
p1 = bone.head
p2 = bone.children[0].head
dist = ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2 + (p2[2] - p1[2]) ** 2) ** (1/2)
# Only connect them if the other bone is a certain distance away, otherwise blender will delete them
if dist > 0.005:
bone.tail = bone.children[0].head
if len(bone.parent.children) == 1: # if the bone's parent bone only has one child, connect the bones (Don't connect them all because that would mess up hand/finger bones)
bone.use_connect = True
def update_material_list(self=None, context=None):
try:
if hasattr(bpy.context.scene, 'smc_ob_data') and bpy.context.scene.smc_ob_data:
bpy.ops.smc.refresh_ob_data()
except AttributeError:
print('Material Combiner not found')
def unify_materials():
textures = [] # TODO
for ob in get_objects():
if ob.type == "MESH":
for mat_slot in ob.material_slots:
if mat_slot.material:
mat_slot.material.blend_method = 'HASHED'
# mat_slot.material.blend_method = 'BLEND' # Use this for transparent textures only
print('MAT: ', mat_slot.material.name)
if mat_slot.material.node_tree:
nodes = mat_slot.material.node_tree.nodes
image = None
for node in nodes:
# print(' ' + node.name + ', ' + node.type + ', ' + node.label)
if node.type == 'TEX_IMAGE' and 'toon' not in node.name and 'sphere' not in node.name:
image = node.image
# textures.append(node.image.name)
mat_slot.material.node_tree.nodes.remove(node)
# Create Image node
node_texture = nodes.new(type='ShaderNodeTexImage')
node_texture.location = 0, 0
node_texture.image = image
node_texture.label = 'Cats Texture'
# Create Principled BSDF node
node_prinipled = nodes.new(type='ShaderNodeBsdfPrincipled')
node_prinipled.location = 300, -220
node_prinipled.label = 'Cats Emission'
node_prinipled.inputs['Specular'].default_value = 0
node_prinipled.inputs['Roughness'].default_value = 0
node_prinipled.inputs['Sheen Tint'].default_value = 0
node_prinipled.inputs['Clearcoat Roughness'].default_value = 0
node_prinipled.inputs['IOR'].default_value = 0
# Create Transparency BSDF node
node_transparent = nodes.new(type='ShaderNodeBsdfTransparent')
node_transparent.location = 325, -100
node_transparent.label = 'Cats Transparency'
# Create Mix Shader node
node_mix = nodes.new(type='ShaderNodeMixShader')
node_mix.location = 600, 0
node_mix.label = 'Cats Mix'
# Create Output node
node_output = nodes.new(type='ShaderNodeOutputMaterial')
node_output.location = 800, 0
node_output.label = 'Cats Output'
# Create 2nd Output node
node_output2 = nodes.new(type='ShaderNodeOutputMaterial')
node_output2.location = 800, -200
node_output2.label = 'Cats Export'
# Link nodes together
mat_slot.material.node_tree.links.new(node_texture.outputs['Color'], node_prinipled.inputs['Base Color'])
mat_slot.material.node_tree.links.new(node_texture.outputs['Alpha'], node_mix.inputs['Fac'])
mat_slot.material.node_tree.links.new(node_prinipled.outputs['BSDF'], node_mix.inputs[2])
mat_slot.material.node_tree.links.new(node_transparent.outputs['BSDF'], node_mix.inputs[1])
mat_slot.material.node_tree.links.new(node_mix.outputs['Shader'], node_output.inputs['Surface'])
mat_slot.material.node_tree.links.new(node_prinipled.outputs['BSDF'], node_output2.inputs['Surface'])
# break
print(textures, len(textures))
return {'FINISHED'}
def add_principled_shader(mesh):
# This adds a principled shader and material output node in order for
# Unity to automatically detect exported materials
principled_shader_pos = (501, -500)
output_shader_pos = (801, -500)
principled_shader_label = 'Cats Export Shader'
output_shader_label = 'Cats Export'
for mat_slot in mesh.material_slots:
if mat_slot.material and mat_slot.material.node_tree:
nodes = mat_slot.material.node_tree.nodes
node_image = None
node_image_count = 0
# Check if the new nodes should be added and to which image node they should be attached to
for node in nodes:
# Cancel if the cats nodes are already found
if node.type == 'BSDF_PRINCIPLED' and node.label == principled_shader_label:
node_image = None
break
if node.type == 'OUTPUT_MATERIAL' and node.label == output_shader_label:
node_image = None
break
# Skip if this node is not an image node
if node.type != 'TEX_IMAGE':
continue
node_image_count += 1
# If an mmd_texture is found, link it to the principled shader later
if node.name == 'mmd_base_tex' or node.label == 'MainTexture':
node_image = node
node_image_count = 0
break
# This is an image node, so link it to the principled shader later
node_image = node
if not node_image or node_image_count > 1:
continue
# Create Principled BSDF node
node_prinipled = nodes.new(type='ShaderNodeBsdfPrincipled')
node_prinipled.label = 'Cats Export Shader'
node_prinipled.location = principled_shader_pos
node_prinipled.inputs['Specular'].default_value = 0
node_prinipled.inputs['Roughness'].default_value = 0
node_prinipled.inputs['Sheen Tint'].default_value = 0
node_prinipled.inputs['Clearcoat Roughness'].default_value = 0
node_prinipled.inputs['IOR'].default_value = 0
# Create Output node for correct image exports
node_output = nodes.new(type='ShaderNodeOutputMaterial')
node_output.label = 'Cats Export'
node_output.location = output_shader_pos
# Link nodes together
mat_slot.material.node_tree.links.new(node_image.outputs['Color'], node_prinipled.inputs['Base Color'])
mat_slot.material.node_tree.links.new(node_prinipled.outputs['BSDF'], node_output.inputs['Surface'])
def remove_toon_shader(mesh):
for mat_slot in mesh.material_slots:
if mat_slot.material and mat_slot.material.node_tree:
nodes = mat_slot.material.node_tree.nodes
for node in nodes:
if node.name == 'mmd_toon_tex':
print('Toon tex removed from material', mat_slot.material.name)
nodes.remove(node)
# if not node.image or not node.image.filepath:
# print('Toon tex removed: Empty, from material', mat_slot.material.name)
# nodes.remove(node)
# continue
#
# image_filepath = bpy.path.abspath(node.image.filepath)
# if not os.path.isfile(image_filepath):
# print('Toon tex removed:', node.image.name, 'from material', mat_slot.material.name)
# nodes.remove(node)
def fix_mmd_shader(mesh):
for mat_slot in mesh.material_slots:
if mat_slot.material and mat_slot.material.node_tree:
nodes = mat_slot.material.node_tree.nodes
for node in nodes:
if node.name == 'mmd_shader':
node.inputs['Reflect'].default_value = 1
def fix_vrm_shader(mesh):
for mat_slot in mesh.material_slots:
if mat_slot.material and mat_slot.material.node_tree:
is_vrm_mat = False
nodes = mat_slot.material.node_tree.nodes
for node in nodes:
if hasattr(node, 'node_tree') and 'MToon_unversioned' in node.node_tree.name:
node.location[0] = 200
node.inputs['ReceiveShadow_Texture_alpha'].default_value = -10000
node.inputs['ShadeTexture'].default_value = (1.0, 1.0, 1.0, 1.0)
node.inputs['Emission_Texture'].default_value = (0.0, 0.0, 0.0, 0.0)
node.inputs['SphereAddTexture'].default_value = (0.0, 0.0, 0.0, 0.0)
# Support typo in old vrm importer
node_input = node.inputs.get('NomalmapTexture')
if not node_input:
node_input = node.inputs.get('NormalmapTexture')
node_input.default_value = (1.0, 1.0, 1.0, 1.0)
is_vrm_mat = True
break
if not is_vrm_mat:
continue
nodes_to_keep = ['DiffuseColor', 'MainTexture', 'Emission_Texture']
if 'HAIR' in mat_slot.material.name:
nodes_to_keep = ['DiffuseColor', 'MainTexture', 'Emission_Texture', 'SphereAddTexture']
for node in nodes:
# Delete all unneccessary nodes
if 'RGB' in node.name \
or 'Value' in node.name \
or 'Image Texture' in node.name \
or 'UV Map' in node.name \
or 'Mapping' in node.name:
if node.label not in nodes_to_keep:
for output in node.outputs:
for link in output.links:
mat_slot.material.node_tree.links.remove(link)
continue
# if hasattr(node, 'node_tree') and 'matcap_vector' in node.node_tree.name:
# for output in node.outputs:
# for link in output.links:
# mat_slot.material.node_tree.links.remove(link)
# continue
def fix_twist_bones(mesh, bones_to_delete):
# This will fix MMD twist bones
for bone_type in ['Hand', 'Arm']:
for suffix in ['L', 'R']:
prefix = 'Left' if suffix == 'L' else 'Right'
bone_parent_name = prefix + ' ' + ('elbow' if bone_type == 'Hand' else 'arm')
vg_twist = mesh.vertex_groups.get(bone_type + 'Twist_' + suffix)
vg_parent = mesh.vertex_groups.get(bone_parent_name)
if not vg_twist:
print('1. no ' + bone_type + 'Twist_' + suffix)
continue
if not vg_parent:
print('2. no ' + bone_parent_name)
vg_parent = mesh.vertex_groups.new(name=bone_parent_name)
vg_twist1 = mesh.vertex_groups.get(bone_type + 'Twist1_' + suffix)
vg_twist2 = mesh.vertex_groups.get(bone_type + 'Twist2_' + suffix)
vg_twist3 = mesh.vertex_groups.get(bone_type + 'Twist3_' + suffix)
mix_weights(mesh, vg_twist.name, vg_parent.name, mix_strength=0.2, delete_old_vg=False)
mix_weights(mesh, vg_twist.name, vg_twist.name, mix_strength=0.2, mix_mode='SUB', delete_old_vg=False)
if vg_twist1:
bones_to_delete.append(vg_twist1.name)
mix_weights(mesh, vg_twist1.name, vg_twist.name, mix_strength=0.25, delete_old_vg=False)
mix_weights(mesh, vg_twist1.name, vg_parent.name, mix_strength=0.75)
if vg_twist2:
bones_to_delete.append(vg_twist2.name)
mix_weights(mesh, vg_twist2.name, vg_twist.name, mix_strength=0.5, delete_old_vg=False)
mix_weights(mesh, vg_twist2.name, vg_parent.name, mix_strength=0.5)
if vg_twist3:
bones_to_delete.append(vg_twist3.name)
mix_weights(mesh, vg_twist3.name, vg_twist.name, mix_strength=0.75, delete_old_vg=False)
mix_weights(mesh, vg_twist3.name, vg_parent.name, mix_strength=0.25)
def fix_twist_bone_names(armature):
# This will fix MMD twist bone names after the vertex groups have been fixed
for bone_type in ['Hand', 'Arm']:
for suffix in ['L', 'R']:
bone_twist = armature.data.edit_bones.get(bone_type + 'Twist_' + suffix)
if bone_twist:
bone_twist.name = 'z' + bone_twist.name
def toggle_mmd_tabs_update(self, context):
toggle_mmd_tabs()
def toggle_mmd_tabs(shutdown_plugin=False):
mmd_cls = [
mmd_tool.MMDToolsObjectPanel,
mmd_tool.MMDDisplayItemsPanel,
mmd_tool.MMDMorphToolsPanel,
mmd_tool.MMDRigidbodySelectorPanel,
mmd_tool.MMDJointSelectorPanel,
mmd_util_tools.MMDMaterialSorter,
mmd_util_tools.MMDMeshSorter,
]
mmd_cls_shading = [
mmd_view_prop.MMDViewPanel,
mmd_view_prop.MMDSDEFPanel,
]
if not version_2_79_or_older():
mmd_cls = mmd_cls + mmd_cls_shading
# If the plugin is shutting down, load the mmd_tools tabs before that, to avoid issues when unregistering mmd_tools
if bpy.context.scene.show_mmd_tabs or shutdown_plugin:
for cls in mmd_cls:
try:
bpy.utils.register_class(cls)
except:
pass
else:
for cls in reversed(mmd_cls):
try:
bpy.utils.unregister_class(cls)
except:
pass
if not shutdown_plugin:
Settings.update_settings(None, None)
"""
HTML <-> text conversions.
http://stackoverflow.com/questions/328356/extracting-text-from-html-file-using-python
"""
class _HTMLToText(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._buf = []
self.hide_output = False
def handle_starttag(self, tag, attrs):
if tag in ('p', 'br') and not self.hide_output:
self._buf.append('\n')
elif tag in ('script', 'style'):
self.hide_output = True
def handle_startendtag(self, tag, attrs):
if tag == 'br':
self._buf.append('\n')
def handle_endtag(self, tag):
if tag == 'p':
self._buf.append('\n')
elif tag in ('script', 'style'):
self.hide_output = False
def handle_data(self, text):
if text and not self.hide_output:
self._buf.append(re.sub(r'\s+', ' ', text))
def handle_entityref(self, name):
if name in name2codepoint and not self.hide_output:
c = chr(name2codepoint[name])
self._buf.append(c)
def handle_charref(self, name):
if not self.hide_output:
n = int(name[1:], 16) if name.startswith('x') else int(name)
self._buf.append(chr(n))
def get_text(self):
return re.sub(r' +', ' ', ''.join(self._buf))
def html_to_text(html):
"""
Given a piece of HTML, return the plain text it contains.
This handles entities and char refs, but not javascript and stylesheets.
"""
parser = _HTMLToText()
try:
parser.feed(html)
parser.close()
except: # HTMLParseError: No good replacement?
pass
return parser.get_text()
""" === THIS CODE COULD BE USEFUL === """
# def addvertex(meshname, shapekey_name):
# mesh = get_objects()[meshname].data
# bm = bmesh.new()
# bm.from_mesh(mesh)
# bm.verts.ensure_lookup_table()
#
# print(" ")
# if shapekey_name in bm.verts.layers.shape.keys():
# val = bm.verts.layers.shape.get(shapekey_name)
# print("%s = %s" % (shapekey_name, val))
# sk = mesh.shape_keys.key_blocks[shapekey_name]
# print("v=%f, f=%f" % (sk.value, sk.frame))
# for i in range(len(bm.verts)):
# v = bm.verts[i]
# delta = v[val] - v.co
# if (delta.length > 0):
# print("v[%d]+%s" % (i, delta))
#
# print(" ")
# === THIS CODE COULD BE USEFUL ===
# Check which shape keys will be deleted on export by Blender
# def checkshapekeys():
# for ob in get_objects():
# if ob.type == 'MESH':
# mesh = ob
# bm = bmesh.new()
# bm.from_mesh(mesh.data)
# bm.verts.ensure_lookup_table()
#
# deleted_shapes = []
# for key in bm.verts.layers.shape.keys():
# if key == 'Basis':
# continue
# val = bm.verts.layers.shape.get(key)
# delete = True
# for vert in bm.verts:
# delta = vert[val] - vert.co
# if delta.length > 0:
# delete = False
# break
# if delete:
# deleted_shapes.append(key)
#
# return deleted_shapes
# # Repair vrc shape keys old
# def repair_shapekeys():
# for ob in get_objects():
# if ob.type == 'MESH':
# mesh = ob
# bm = bmesh.new()
# bm.from_mesh(mesh.data)
# bm.verts.ensure_lookup_table()
#
# for key in bm.verts.layers.shape.keys():
# if not key.startswith('vrc'):
# continue
#
# value = bm.verts.layers.shape.get(key)
# for vert in bm.verts:
# shapekey = vert
# shapekey_coords = mesh.matrix_world * shapekey[value]
# shapekey_coords[2] -= 0.00001
# shapekey[value] = mesh.matrix_world.inverted() * shapekey_coords
# break
#
# bm.to_mesh(mesh.data)
# === THIS CODE COULD BE USEFUL ===
| 33.092932 | 196 | 0.605914 |
f13b61de39c92b7baafe41ea00572547c9293140 | 808 | py | Python | pipeline/__init__.py | sdgdsffdsfff/bk-sops-tencent | e8aff91f822e79031e12b0f66943830f44ced506 | [
"Apache-2.0"
] | 1 | 2020-09-24T07:39:16.000Z | 2020-09-24T07:39:16.000Z | pipeline/__init__.py | sdgdsffdsfff/bk-sops-tencent | e8aff91f822e79031e12b0f66943830f44ced506 | [
"Apache-2.0"
] | 5 | 2021-02-08T20:46:54.000Z | 2021-06-10T22:54:45.000Z | pipeline/__init__.py | sdgdsffdsfff/bk-sops-tencent | e8aff91f822e79031e12b0f66943830f44ced506 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
default_app_config = 'pipeline.apps.PipelineConfig'
__version__ = '0.9.8'
| 47.529412 | 115 | 0.785891 |
79391ab57d5a76f494af11423d7ab0942e0394cb | 6,029 | py | Python | data/dataset_srmd.py | WestCityInstitute/KAIR | 3eb3cc7776fa8c57e8ed7c71bfa8039beb4c6677 | [
"MIT"
] | 3 | 2021-06-24T17:33:44.000Z | 2021-10-13T14:34:25.000Z | data/dataset_srmd.py | WestCityInstitute/KAIR | 3eb3cc7776fa8c57e8ed7c71bfa8039beb4c6677 | [
"MIT"
] | null | null | null | data/dataset_srmd.py | WestCityInstitute/KAIR | 3eb3cc7776fa8c57e8ed7c71bfa8039beb4c6677 | [
"MIT"
] | 1 | 2021-06-08T03:11:15.000Z | 2021-06-08T03:11:15.000Z | import random
import numpy as np
import torch
import torch.utils.data as data
import utils.utils_image as util
from utils import utils_sisr
import hdf5storage
import os
class DatasetSRMD(data.Dataset):
'''
# -----------------------------------------
# Get L/H/M for noisy image SR with Gaussian kernels.
# Only "paths_H" is needed, sythesize bicubicly downsampled L on-the-fly.
# -----------------------------------------
# e.g., SRMD, H = f(L, kernel, sigma), sigma is noise level
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetSRMD, self).__init__()
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.sf = opt['scale'] if opt['scale'] else 4
self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96
self.L_size = self.patch_size // self.sf
self.sigma = opt['sigma'] if opt['sigma'] else [0, 50]
self.sigma_min, self.sigma_max = self.sigma[0], self.sigma[1]
self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else 0
# -------------------------------------
# PCA projection matrix
# -------------------------------------
self.p = hdf5storage.loadmat(os.path.join('kernels', 'srmd_pca_pytorch.mat'))['p']
self.ksize = int(np.sqrt(self.p.shape[-1])) # kernel size
# ------------------------------------
# get paths of L/H
# ------------------------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
self.paths_L = util.get_image_paths(opt['dataroot_L'])
def __getitem__(self, index):
# ------------------------------------
# get H image
# ------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
img_H = util.uint2single(img_H)
# ------------------------------------
# modcrop for SR
# ------------------------------------
img_H = util.modcrop(img_H, self.sf)
# ------------------------------------
# kernel
# ------------------------------------
if self.opt['phase'] == 'train':
l_max = 50
theta = np.pi*np.random.rand(1)
l1 = 0.1+l_max*np.random.rand(1)
l2 = 0.1+(l1-0.1)*np.random.rand(1)
kernel = utils_sisr.anisotropic_Gaussian(ksize=self.ksize, theta=theta[0], l1=l1[0], l2=l2[0])
else:
kernel = utils_sisr.anisotropic_Gaussian(ksize=self.ksize, theta=np.pi, l1=0.1, l2=0.1)
k = np.reshape(kernel, (-1), order="F")
k_reduced = np.dot(self.p, k)
k_reduced = torch.from_numpy(k_reduced).float()
# ------------------------------------
# sythesize L image via specified degradation model
# ------------------------------------
H, W, _ = img_H.shape
img_L = utils_sisr.srmd_degradation(img_H, kernel, self.sf)
img_L = np.float32(img_L)
if self.opt['phase'] == 'train':
"""
# --------------------------------
# get L/H patch pairs
# --------------------------------
"""
H, W, C = img_L.shape
# --------------------------------
# randomly crop L patch
# --------------------------------
rnd_h = random.randint(0, max(0, H - self.L_size))
rnd_w = random.randint(0, max(0, W - self.L_size))
img_L = img_L[rnd_h:rnd_h + self.L_size, rnd_w:rnd_w + self.L_size, :]
# --------------------------------
# crop corresponding H patch
# --------------------------------
rnd_h_H, rnd_w_H = int(rnd_h * self.sf), int(rnd_w * self.sf)
img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size, rnd_w_H:rnd_w_H + self.patch_size, :]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = np.random.randint(0, 8)
img_L, img_H = util.augment_img(img_L, mode=mode), util.augment_img(img_H, mode=mode)
# --------------------------------
# get patch pairs
# --------------------------------
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
# --------------------------------
# select noise level and get Gaussian noise
# --------------------------------
if random.random() < 0.1:
noise_level = torch.zeros(1).float()
else:
noise_level = torch.FloatTensor([np.random.uniform(self.sigma_min, self.sigma_max)])/255.0
# noise_level = torch.rand(1)*50/255.0
# noise_level = torch.min(torch.from_numpy(np.float32([7*np.random.chisquare(2.5)/255.0])),torch.Tensor([50./255.]))
else:
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
noise_level = noise_level = torch.FloatTensor([self.sigma_test])
# ------------------------------------
# add noise
# ------------------------------------
noise = torch.randn(img_L.size()).mul_(noise_level).float()
img_L.add_(noise)
# ------------------------------------
# get degradation map M
# ------------------------------------
M_vector = torch.cat((k_reduced, noise_level), 0).unsqueeze(1).unsqueeze(1)
M = M_vector.repeat(1, img_L.size()[-2], img_L.size()[-1])
"""
# -------------------------------------
# concat L and noise level map M
# -------------------------------------
"""
img_L = torch.cat((img_L, M), 0)
L_path = H_path
return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 38.647436 | 132 | 0.437718 |
2f3bda07a0b7784467c3ddf9734db7eead86fe0d | 18,098 | py | Python | gokart/task.py | skmatz/gokart | ba1dc497dca1c7901bc861f49b1f081adc2a1888 | [
"MIT"
] | null | null | null | gokart/task.py | skmatz/gokart | ba1dc497dca1c7901bc861f49b1f081adc2a1888 | [
"MIT"
] | null | null | null | gokart/task.py | skmatz/gokart | ba1dc497dca1c7901bc861f49b1f081adc2a1888 | [
"MIT"
] | null | null | null | import hashlib
import os
import sys
from importlib import import_module
from logging import getLogger
from typing import Union, List, Any, Callable, Set, Optional, Dict
import luigi
import pandas as pd
import gokart
from gokart.file_processor import FileProcessor
from gokart.pandas_type_config import PandasTypeConfigMap
from gokart.parameter import TaskInstanceParameter, ListTaskInstanceParameter
from gokart.target import TargetOnKart
logger = getLogger(__name__)
class TaskOnKart(luigi.Task):
"""
This is a wrapper class of luigi.Task.
The key methods of a TaskOnKart are:
* :py:meth:`make_target` - this makes output target with a relative file path.
* :py:meth:`make_model_target` - this makes output target for models which generate multiple files to save.
* :py:meth:`load` - this loads input files of this task.
* :py:meth:`dump` - this save a object as output of this task.
"""
workspace_directory = luigi.Parameter(default='./resources/',
description='A directory to set outputs on. Please use a path starts with s3:// when you use s3.',
significant=False) # type: str
local_temporary_directory = luigi.Parameter(default='./resources/tmp/', description='A directory to save temporary files.', significant=False) # type: str
rerun = luigi.BoolParameter(default=False, description='If this is true, this task will run even if all output files exist.', significant=False)
strict_check = luigi.BoolParameter(default=False,
description='If this is true, this task will not run only if all input and output files exist.',
significant=False)
modification_time_check = luigi.BoolParameter(default=False,
description='If this is true, this task will not run only if all input and output files exist,'
' and all input files are modified before output file are modified.',
significant=False)
delete_unnecessary_output_files = luigi.BoolParameter(default=False, description='If this is true, delete unnecessary output files.', significant=False)
significant = luigi.BoolParameter(default=True,
description='If this is false, this task is not treated as a part of dependent tasks for the unique id.',
significant=False)
fix_random_seed_methods = luigi.ListParameter(default=['random.seed', 'numpy.random.seed'], description='Fix random seed method list.', significant=False)
fix_random_seed_value = luigi.IntParameter(default=None, description='Fix random seed method value.', significant=False)
def __init__(self, *args, **kwargs):
self._add_configuration(kwargs, self.get_task_family())
self._add_configuration(kwargs, 'TaskOnKart')
# 'This parameter is dumped into "workspace_directory/log/task_log/" when this task finishes with success.'
self.task_log = dict()
self.task_unique_id = None
super(TaskOnKart, self).__init__(*args, **kwargs)
self._rerun_state = self.rerun
def output(self):
file_path = self.__module__.replace(".", "/")
return self.make_target(os.path.join(file_path, f"{type(self).__name__}.pkl"))
def requires(self):
tasks = self.make_task_instance_dictionary()
return tasks or [] # when tasks is empty dict, then this returns empty list.
def make_task_instance_dictionary(self) -> Dict[str, 'TaskOnKart']:
return {key: var for key, var in vars(self).items() if isinstance(var, TaskOnKart)}
@classmethod
def _add_configuration(cls, kwargs, section):
config = luigi.configuration.get_config()
class_variables = dict(TaskOnKart.__dict__)
class_variables.update(dict(cls.__dict__))
if section not in config:
return
for key, value in dict(config[section]).items():
if key not in kwargs and key in class_variables:
kwargs[key] = class_variables[key].parse(value)
def complete(self) -> bool:
if self._rerun_state:
for target in luigi.task.flatten(self.output()):
target.remove()
self._rerun_state = False
return False
is_completed = all([t.exists() for t in luigi.task.flatten(self.output())])
if self.strict_check or self.modification_time_check:
requirements = luigi.task.flatten(self.requires())
inputs = luigi.task.flatten(self.input())
is_completed = is_completed and all([task.complete() for task in requirements]) and all([i.exists() for i in inputs])
if not self.modification_time_check or not is_completed or not self.input():
return is_completed
return self._check_modification_time()
def _check_modification_time(self):
common_path = set(t.path() for t in luigi.task.flatten(self.input())) & set(t.path() for t in luigi.task.flatten(self.output()))
input_tasks = [t for t in luigi.task.flatten(self.input()) if t.path() not in common_path]
output_tasks = [t for t in luigi.task.flatten(self.output()) if t.path() not in common_path]
input_modification_time = max([target.last_modification_time() for target in input_tasks]) if input_tasks else None
output_modification_time = min([target.last_modification_time() for target in output_tasks]) if output_tasks else None
if input_modification_time is None or output_modification_time is None:
return True
# "=" must be required in the following statements, because some tasks use input targets as output targets.
return input_modification_time <= output_modification_time
def clone(self, cls=None, **kwargs):
if cls is None:
cls = self.__class__
new_k = {}
for param_name, param_class in cls.get_params():
if param_name in {'rerun', 'strict_check', 'modification_time_check'}:
continue
if param_name in kwargs:
new_k[param_name] = kwargs[param_name]
elif hasattr(self, param_name):
new_k[param_name] = getattr(self, param_name)
return cls(**new_k)
def make_target(self, relative_file_path: str, use_unique_id: bool = True, processor: Optional[FileProcessor] = None) -> TargetOnKart:
file_path = os.path.join(self.workspace_directory, relative_file_path)
unique_id = self.make_unique_id() if use_unique_id else None
return gokart.target.make_target(file_path=file_path, unique_id=unique_id, processor=processor)
def make_large_data_frame_target(self, relative_file_path: str, use_unique_id: bool = True, max_byte=int(2**26)) -> TargetOnKart:
file_path = os.path.join(self.workspace_directory, relative_file_path)
unique_id = self.make_unique_id() if use_unique_id else None
return gokart.target.make_model_target(file_path=file_path,
temporary_directory=self.local_temporary_directory,
unique_id=unique_id,
save_function=gokart.target.LargeDataFrameProcessor(max_byte=max_byte).save,
load_function=gokart.target.LargeDataFrameProcessor.load)
def make_model_target(self,
relative_file_path: str,
save_function: Callable[[Any, str], None],
load_function: Callable[[str], Any],
use_unique_id: bool = True):
"""
Make target for models which generate multiple files in saving, e.g. gensim.Word2Vec, Tensorflow, and so on.
:param relative_file_path: A file path to save.
:param save_function: A function to save a model. This takes a model object and a file path.
:param load_function: A function to load a model. This takes a file path and returns a model object.
:param use_unique_id: If this is true, add an unique id to a file base name.
"""
file_path = os.path.join(self.workspace_directory, relative_file_path)
assert relative_file_path[-3:] == 'zip', f'extension must be zip, but {relative_file_path} is passed.'
unique_id = self.make_unique_id() if use_unique_id else None
return gokart.target.make_model_target(file_path=file_path,
temporary_directory=self.local_temporary_directory,
unique_id=unique_id,
save_function=save_function,
load_function=load_function)
def load(self, target: Union[None, str, TargetOnKart] = None) -> Any:
def _load(targets):
if isinstance(targets, list) or isinstance(targets, tuple):
return [_load(t) for t in targets]
if isinstance(targets, dict):
return {k: _load(t) for k, t in targets.items()}
return targets.load()
return _load(self._get_input_targets(target))
def load_generator(self, target: Union[None, str, TargetOnKart] = None) -> Any:
def _load(targets):
if isinstance(targets, list) or isinstance(targets, tuple):
for t in targets:
yield from _load(t)
elif isinstance(targets, dict):
for k, t in targets.items():
yield from {k: _load(t)}
else:
yield targets.load()
return _load(self._get_input_targets(target))
def load_data_frame(self,
target: Union[None, str, TargetOnKart] = None,
required_columns: Optional[Set[str]] = None,
drop_columns: bool = False) -> pd.DataFrame:
data = self.load(target=target)
if isinstance(data, list):
def _pd_concat(dfs):
if isinstance(dfs, list):
return pd.concat([_pd_concat(df) for df in dfs])
else:
return dfs
data = _pd_concat(data)
required_columns = required_columns or set()
if data.empty:
return pd.DataFrame(columns=required_columns)
assert required_columns.issubset(set(data.columns)), f'data must have columns {required_columns}, but actually have only {data.columns}.'
if drop_columns:
data = data[required_columns]
return data
def dump(self, obj, target: Union[None, str, TargetOnKart] = None) -> None:
PandasTypeConfigMap().check(obj, task_namespace=self.task_namespace)
self._get_output_target(target).dump(obj)
def make_unique_id(self):
self.task_unique_id = self.task_unique_id or self._make_hash_id()
return self.task_unique_id
def _make_hash_id(self):
def _to_str_params(task):
if isinstance(task, TaskOnKart):
return str(task.make_unique_id()) if task.significant else None
return task.to_str_params(only_significant=True)
dependencies = [_to_str_params(task) for task in luigi.task.flatten(self.requires())]
dependencies = [d for d in dependencies if d is not None]
dependencies.append(self.to_str_params(only_significant=True))
dependencies.append(self.__class__.__name__)
return hashlib.md5(str(dependencies).encode()).hexdigest()
def _get_input_targets(self, target: Union[None, str, TargetOnKart]) -> Union[TargetOnKart, List[TargetOnKart]]:
if target is None:
return self.input()
if isinstance(target, str):
return self.input()[target]
return target
def _get_output_target(self, target: Union[None, str, TargetOnKart]) -> TargetOnKart:
if target is None:
return self.output()
if isinstance(target, str):
return self.output()[target]
return target
def get_info(self, only_significant=False):
params_str = {}
params = dict(self.get_params())
for param_name, param_value in self.param_kwargs.items():
if (not only_significant) or params[param_name].significant:
if type(params[param_name]) == gokart.TaskInstanceParameter:
params_str[param_name] = type(param_value).__name__ + '-' + param_value.make_unique_id()
else:
params_str[param_name] = params[param_name].serialize(param_value)
return params_str
def _get_task_log_target(self):
return self.make_target(f'log/task_log/{type(self).__name__}.pkl')
def get_task_log(self) -> Dict:
target = self._get_task_log_target()
if self.task_log:
return self.task_log
if target.exists():
return self.load(target)
return dict()
@luigi.Task.event_handler(luigi.Event.SUCCESS)
def _dump_task_log(self):
self.task_log['file_path'] = [target.path() for target in luigi.task.flatten(self.output())]
self.dump(self.task_log, self._get_task_log_target())
def _get_task_params_target(self):
return self.make_target(f'log/task_params/{type(self).__name__}.pkl')
def get_task_params(self) -> Dict:
target = self._get_task_log_target()
if target.exists():
return self.load(target)
return dict()
@luigi.Task.event_handler(luigi.Event.START)
def _set_random_seed(self):
random_seed = self._get_random_seed()
seed_methods = self.try_set_seed(self.fix_random_seed_methods, random_seed)
self.dump({'seed': random_seed, 'seed_methods': seed_methods}, self._get_random_seeds_target())
def _get_random_seeds_target(self):
return self.make_target(f'log/random_seed/{type(self).__name__}.pkl')
@staticmethod
def try_set_seed(methods: List[str], random_seed: int) -> List[str]:
success_methods = []
for method_name in methods:
try:
for i, x in enumerate(method_name.split('.')):
if i == 0:
m = import_module(x)
else:
m = getattr(m, x)
m(random_seed)
success_methods.append(method_name)
except ModuleNotFoundError:
pass
except AttributeError:
pass
return success_methods
def _get_random_seed(self):
if self.fix_random_seed_value:
return self.fix_random_seed_value
return int(self.make_unique_id(), 16) % (2**32 - 1) # maximum numpy.random.seed
@luigi.Task.event_handler(luigi.Event.START)
def _dump_task_params(self):
self.dump(self.to_str_params(only_significant=True), self._get_task_params_target())
def _get_processing_time_target(self):
return self.make_target(f'log/processing_time/{type(self).__name__}.pkl')
def get_processing_time(self) -> str:
target = self._get_processing_time_target()
if target.exists():
return self.load(target)
return 'unknown'
@luigi.Task.event_handler(luigi.Event.PROCESSING_TIME)
def _dump_processing_time(self, processing_time):
self.dump(processing_time, self._get_processing_time_target())
@classmethod
def restore(cls, unique_id):
params = TaskOnKart().make_target(f'log/task_params/{cls.__name__}_{unique_id}.pkl', use_unique_id=False).load()
return cls.from_str_params(params)
@luigi.Task.event_handler(luigi.Event.FAILURE)
def _log_unique_id(self, exception):
logger.info(f'FAILURE:\n task name={type(self).__name__}\n unique id={self.make_unique_id()}')
@luigi.Task.event_handler(luigi.Event.START)
def _dump_module_versions(self):
self.dump(self._get_module_versions(), self._get_module_versions_target())
def _get_module_versions_target(self):
return self.make_target(f'log/module_versions/{type(self).__name__}.txt')
def _get_module_versions(self) -> str:
module_versions = []
for x in set([x.split('.')[0] for x in sys.modules.keys() if '_' not in x]):
module = import_module(x)
if '__version__' in dir(module):
if type(module.__version__) == str:
version = module.__version__.split(" ")[0]
else:
version = '.'.join([str(v) for v in module.__version__])
module_versions.append(f'{x}=={version}')
return '\n'.join(module_versions)
def __repr__(self):
"""
Build a task representation like `MyTask(param1=1.5, param2='5', data_task=DataTask(id=35tyi))`
"""
params = self.get_params()
param_values = self.get_param_values(params, [], self.param_kwargs)
# Build up task id
repr_parts = []
param_objs = dict(params)
for param_name, param_value in param_values:
param_obj = param_objs[param_name]
if param_obj.significant:
repr_parts.append(f'{param_name}={self._make_representation(param_obj, param_value)}')
task_str = f'{self.get_task_family()}({", ".join(repr_parts)})'
return task_str
def _make_representation(self, param_obj: luigi.Parameter, param_value):
if isinstance(param_obj, TaskInstanceParameter):
return f'{param_value.get_task_family()}({param_value.make_unique_id()})'
if isinstance(param_obj, ListTaskInstanceParameter):
return f"[{', '.join(f'{v.get_task_family()}({v.make_unique_id()})' for v in param_value)}]"
return param_obj.serialize(param_value)
| 47.007792 | 159 | 0.635761 |
8855d0b64aa770a180601c992b3ac029e14c582a | 138 | py | Python | tests/spam.py | pji/imgeaser | e8062ec792479eb18accb2936e73cf42dded1498 | [
"MIT"
] | null | null | null | tests/spam.py | pji/imgeaser | e8062ec792479eb18accb2936e73cf42dded1498 | [
"MIT"
] | null | null | null | tests/spam.py | pji/imgeaser | e8062ec792479eb18accb2936e73cf42dded1498 | [
"MIT"
] | null | null | null | """
spam
~~~~
Toy functions for testing.
"""
def spam_eggs():
pass
def spam_bacon():
pass
def spam_baked_beans():
pass
| 7.263158 | 26 | 0.594203 |
2b6287d867cb35414567ee9a78e50c41c4a1340b | 16,703 | py | Python | gs_api_client/swagger/models/template_update.py | gridscale/gridscale_api_client_python | 755b8e8a017784a4f5c6b3a577338ff988c41a9a | [
"MIT"
] | 7 | 2019-07-12T13:59:45.000Z | 2021-03-16T08:46:20.000Z | gs_api_client/swagger/models/template_update.py | gridscale/gridscale_api_client_python | 755b8e8a017784a4f5c6b3a577338ff988c41a9a | [
"MIT"
] | 13 | 2020-01-23T07:50:29.000Z | 2022-03-21T14:32:40.000Z | gs_api_client/swagger/models/template_update.py | gridscale/gridscale_api_client_python | 755b8e8a017784a4f5c6b3a577338ff988c41a9a | [
"MIT"
] | null | null | null | # coding: utf-8
"""
API Specification
# Introduction Welcome to gridscales API documentation. A REST API is a programming interface that allows you to access and send data directly to our systems using HTTPS requests, without the need to use a web GUI. All the functionality you are already familiar with in your control panel is accessible through the API, including expert methods that are only available through the API. Allowing you to script any actions you require, regardless of their complexity. First we will start with a general overview about how the API works, followed by an extensive list of each endpoint, describing them in great detail. ## Requests For security, gridscale requires all API requests are made through the HTTPS protocol so that traffic is encrypted. The following table displays the different type of requests that the interface responds to, depending on the action you require. | Method | Description | | --- | --- | | GET | A simple search of information. The response is a JSON object. Requests using GET are always read-only. | | POST | Adds new objects and object relations. The POST request must contain all the required parameters in the form of a JSON object. | | PATCH | Changes an object or an object relation. The parameters in PATCH requests are usually optional, so only the changed parameters must be specified in a JSON object. | | DELETE | Deletes an object or object relation. The object is deleted if it exists. | | OPTIONS | Get an extensive list of the servers support methods and characteristics. We will not give example OPTION requests on each endpoint, as they are extensive and self-descriptive. | <aside class=\"notice\"> The methods PATCH and DELETE are idempotent - that is, a request with identical parameters can be sent several times, and it doesn't change the result. </aside> ## Status Codes | HTTP Status | `Message` | Description | | --- | --- | --- | | 200 | `OK` | The request has been successfully processed and the result of the request is transmitted in the response. | | 202 | `Accepted` | The request has been accepted, but will run at a later date. Meaning we can not guarantee the success of the request. You should poll the request to be notified once the resource has been provisioned - see the requests endpoint on how to poll. | | 204 | `No Content` | The request was successful, but the answer deliberately contains no data. | | 400 | `Bad Request` | The request message was built incorrectly. | | 401 | `Unauthorised` | The request can not be performed without a valid authentication. X-Auth UserId or X-Auth token HTTP header is not set or the userID / token is invalid. | | 402 | `Payment Required` | Action can not be executed - not provided any or invalid payment methods. | | 403 | `Forbidden` | The request was not carried out due to lack of authorization of the user or because an impossible action was requested. | | 404 | `Not Found` | The requested resource was not found. Will also be used if you do a resource exists, but the user does not have permission for it. | | 405 | `Method Not Allowed` | The request may be made only with other HTTP methods (eg GET rather than POST). | | 409 | `Conflict` | The request was made under false assumptions. For example, a user can not be created twice with the same email. | | 415 | `Unsupported Media Type` | The contents of the request have been submitted with an invalid media type. All POST or PATCH requests must have \"Content-Type : application / json\" as a header, and send a JSON object as a payload. | | 416 | `Requested Range Not Satisfiable` | The request could not be fulfilled. It is possible that a resource limit was reached or an IPv4 address pool is exhausted. | | 424 | `Failed Dependency` | The request could not be performed because the object is in the wrong status. | | 429 | `Too Many Requests` | The request has been rejected because rate limits have been exceeded. | <aside class=\"success\"> Status 200-204 indicates that the request has been accepted and is processed. </aside> <aside class=\"notice\"> Status 400-429 indicates that there was a problem with the request that originated on the client. You will find more information about the problem in the body of 4xx response. </aside> <aside class=\"warning\"> A status 500 means that there was a server-side problem and your request can not be processed now. </aside> ## Request Headers | Header | Description | | --- | --- | | Content-Type | Always \"application/json\". | | X-Auth-userId | The user UUID. This can be found in the panel under \"API\" and will never change ( even after the change of user e-mail). | | X-Auth-Token | Is generated from the API hash and must be sent with all API requests. Both the token and its permissions can be configured in the panel.| ## Response Headers | Header | Description | | --- | --- | | Content-Type | Always \"application/json\". | | X-Time-Provisioning | The time taken to process the request (in ms). | | X-Api-Identity | The currently active Provisioning API version. Useful when reporting bugs to us. | | X-Request-Id | The unique identifier of the request, be sure to include it when referring to a request. | | RateLimit-Limit | The number of requests that can be made per minute. | | RateLimit-Remaining | The number of requests that still remain before you hit your request limit. | | RateLimit-Reset | A [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) in milliseconds of when the rate limit will reset, or the time at which a request no longer will return 429 - Too Many Requests. | ## Timestamp Format All timestamps follow <a href=\"https://de.wikipedia.org/wiki/ISO_8601\" target=\"_blank_\">ISO 8601</a> and issued in <a href=\"https://www.timeanddate.de/zeitzonen/utc-gmt\" target=\"_blank_\">UTC</a> ## CORS ### Cross Origin Resource Sharing To allow API access from other domains that supports the API CORS (Cross Origin Resource Sharing). See: enable-cors.org/ . This allows direct use the API in the browser running a JavaScript web control panel. All this is done in the background by the browser. The following HTTP headers are set by the API: Header | Parameter | Description --- | --- | --- Access-Control-Allow-Methods | GET, POST, PUT, PATCH, DELETE, OPTIONS | Contains all available methods that may be used for queries. Access-Control-Allow-Credentials | true | Is set to \"true\". Allows the browser to send the authentication data via X-Auth HTTP header. Access-Control-Allow-Headers | Origin, X-Requested-With, Content-Type, Accept, X-Auth-UserId, X-Auth-Token, X-Exec-Time, X-API-Version, X-Api-Client | The HTTP headers available for requests. Access-Control-Allow-Origin | * | The domain sent by the browser as a source of demand. Access-Control-Expose-Headers | X-Exec-Time, X-Api-Version | The HTTP headers that can be used by a browser application. ## Rate Limits The number of requests that can be made through our API is currently limited to 210 requests per 60 seconds. The current state of rate limiting is returned within the response headers of each request. The relevant response headers are - RateLimit-Limit - RateLimit-Remaining - RateLimit-Reset See the Response Headers section for details. As long as the `RateLimit-Remaining` count is above zero, you will be able to make further requests. As soon as the `RateLimit-Remaining` header value is zero, subsequent requests will return the 429 status code. This will stay until the timestamp given in `RateLimit-Reset` has been reached. ### Example rate limiting response ```shell HTTP/1.0 429 TOO MANY REQUESTS Content-Length: 66 Content-Type: application/json; charset=utf-8 Date: Mon, 11 Nov 2019 11:11:33 GMT RateLimit-Limit: 210 RateLimit-Remaining: 0 RateLimit-Reset: 1573468299256 { \"id\": \"too_many_requests\", \"message\": \"API Rate limit exceeded.\" } ``` It is important to understand how rate limits are reset in order to use the API efficiently. Rate limits are reset for all counted requests at once. This means that that once the timestamp `RateLimit-Remaining` has arrived all counted request are reset and you can again start sending requests to the API. This allows for short burst of traffic. The downside is once you have hit the request limit no more requests are allowed until the rate limit duration is reset. ## Object Relations Relationships describe resource objects (storages, networks, IPs, etc.) that are connected to a server. These relationships are treated like objects themselves and can have properties specific to this relation. One example would be, that the MAC address of a private network connected to a server (Server-to-Network relation) can be found as property of the relation itself - the relation is the _network interface_ in the server. Another example is storage, where the SCSI LUN is also part of the Server-to-Storage relation object. This information is especially interesting if some kind of network boot is used on the servers, where the properties of the server need to be known beforehand. ## Deleted Objects Objects that are deleted are no longer visible on their *regular* endpoints. For historical reasons these objects are still available read-only on a special endpoint named /deleted. If objects have been deleted but have not yet been billed in the current period, the yet-to-be-billed price is still shown. <!-- #strip_js --> ## Node.js / Javascript Library We have a JavaScript library for you to use our API with ease. <a href=\"https://badge.fury.io/js/%40gridscale%2Fgsclient-js\"><img src=\"https://badge.fury.io/js/%40gridscale%2Fgsclient-js.svg\" alt=\"npm version\" height=\"18\"></a> <aside class=\"success\"> We want to make it even easier for you to manage your Infrastructure via our API - so feel free to contact us with any ideas, or languages you would like to see included. </aside> Requests with our Node.js lib return a little differently. Everything is the same except it allows you to add URL parameters to customize your requests. To get started <a href=\"https://www.npmjs.com/package/@gridscale/gsclient-js\" target=\"_blank\">click here</a> . <!-- #strip_js_end --> <!-- #strip_go --> ## Golang Library We also have a Golang library for Gophers. Requests with our Golang lib return a little differently. Everything is the same except it allows you to add URL parameters to customize your requests. To get started <a href=\"https://github.com/gridscale/gsclient-go\" target=\"_blank\">click here</a> . <!-- #strip_go_end --> <!-- #strip_python --> ## Python Library We have a Python library, that optionally also simplifies handling of asynchronous requests by mimicking synchronous blocking behaviour. To get started <a href=\"https://pypi.org/project/gs-api-client/\" target=\"_blank\">click here</a> . <!-- #strip_python_end --> # Authentication In order to use the API, the User-UUID and an API_Token are required. Both are available via the web GUI which can be found here on <a href=\"https://my.gridscale.io/APIs/\" target=\"_blank\">Your Account</a> <aside class=\"success\"> If you are logged in, your UUID and Token will be pulled dynamically from your account, so you can copy request examples straight into your code. </aside> The User-UUID remains the same, even if the users email address is changed. The API_Token is a randomly generated hash that allows read/write access. ## API_Token <table class=\"security-details\"><tbody><tr><th> Security scheme type: </th><td> API Key </td></tr><tr><th> header parameter name:</th><td> X-Auth-Token </td></tr></tbody></table> ## User_UUID <table class=\"security-details\"><tbody><tr><th> Security scheme type: </th><td> API Key </td></tr><tr><th> header parameter name:</th><td> X-Auth-UserId </td></tr></tbody></table> ## Examples <!-- #strip_js --> > Node.js ``` // to get started // read the docs @ https://www.npmjs.com/package/@gs_js_auth/api var gs_js_auth = require('@gs_js_auth/api').gs_js_auth; var client = new gs_js_auth.Client(\"##API_TOKEN##\",\"##USER_UUID##\"); ``` <!-- #strip_js_end --> <!-- #strip_go --> > Golang ``` // to get started // read the docs @ https://github.com/gridscale/gsclient-go config := gsclient.NewConfiguration( \"https://api.gridscale.io\", \"##USER_UUID##\", \"##API_TOKEN##\", false, //set debug mode ) client := gsclient.NewClient(config) ``` <!-- #strip_go_end --> > Shell Authentication Headers ``` -H \"X-Auth-UserId: ##USER_UUID##\" \\ -H \"X-Auth-Token: ##API_TOKEN##\" \\ ``` > Setting Authentication in your Environment variables ``` export API_TOKEN=\"##API_TOKEN##\" USER_UUID=\"##USER_UUID##\" ``` <aside class=\"notice\"> You must replace <code>USER_UUID</code> and <code>API_Token</code> with your personal UUID and API key respectively. </aside> # noqa: E501
OpenAPI spec version: 1.0.50
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TemplateUpdate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'labels': 'list[str]'
}
attribute_map = {
'name': 'name',
'labels': 'labels'
}
def __init__(self, name=None, labels=None): # noqa: E501
"""TemplateUpdate - a model defined in Swagger""" # noqa: E501
self._name = None
self._labels = None
self.discriminator = None
if name is not None:
self.name = name
if labels is not None:
self.labels = labels
@property
def name(self):
"""Gets the name of this TemplateUpdate. # noqa: E501
The human-readable name of the object. It supports the full UTF-8 character set, with a maximum of 64 characters. # noqa: E501
:return: The name of this TemplateUpdate. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TemplateUpdate.
The human-readable name of the object. It supports the full UTF-8 character set, with a maximum of 64 characters. # noqa: E501
:param name: The name of this TemplateUpdate. # noqa: E501
:type: str
"""
self._name = name
@property
def labels(self):
"""Gets the labels of this TemplateUpdate. # noqa: E501
List of labels. # noqa: E501
:return: The labels of this TemplateUpdate. # noqa: E501
:rtype: list[str]
"""
return self._labels
@labels.setter
def labels(self, labels):
"""Sets the labels of this TemplateUpdate.
List of labels. # noqa: E501
:param labels: The labels of this TemplateUpdate. # noqa: E501
:type: list[str]
"""
self._labels = labels
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TemplateUpdate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TemplateUpdate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 114.40411 | 12,787 | 0.699994 |
269ddc21d1774042d00fa8fcd2803ede4b31bfdf | 544 | py | Python | plotly/validators/scatterpolargl/marker/line/_color.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/scatterpolargl/marker/line/_color.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/validators/scatterpolargl/marker/line/_color.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='scatterpolargl.marker.line',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=True,
edit_type='calc',
role='style',
colorscale_path='scatterpolargl.marker.line.colorscale',
**kwargs
)
| 25.904762 | 68 | 0.604779 |
3f164d5256cda3e67d573142dcd0e9421fd54664 | 3,427 | py | Python | dxm/lib/DxAsyncTask/DxAsyncTask.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 5 | 2018-08-23T15:47:05.000Z | 2022-01-19T23:38:18.000Z | dxm/lib/DxAsyncTask/DxAsyncTask.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 59 | 2018-10-15T10:37:00.000Z | 2022-03-22T20:49:25.000Z | dxm/lib/DxAsyncTask/DxAsyncTask.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 12 | 2019-03-08T19:59:13.000Z | 2021-12-16T03:28:04.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2019 by Delphix. All rights reserved.
#
# Author : Marcin Przepiorowski
# Date : September 2019
import logging
from time import sleep
from dxm.lib.DxLogging import print_error
from dxm.lib.DxLogging import print_message
from dxm.lib.DxEngine.DxMaskingEngine import DxMaskingEngine
from dxm.lib.masking_api.api.async_task_api import AsyncTaskApi
from dxm.lib.masking_api.rest import ApiException
class DxAsyncTask(object):
swagger_types = {
'async_task_id': 'int',
'operation': 'str',
'reference': 'str',
'status': 'str',
'start_time': 'datetime',
'end_time': 'datetime',
'cancellable': 'bool'
}
swagger_map = {
'async_task_id': 'asyncTaskId',
'operation': 'operation',
'reference': 'reference',
'status': 'status',
'start_time': 'startTime',
'end_time': 'endTime',
'cancellable': 'cancellable'
}
def __init__(self):
"""
Constructor
:param engine: DxMaskingEngine object
"""
#AsyncTask.__init__(self)
self.__engine = DxMaskingEngine
self.__logger = logging.getLogger()
self.__logger.debug("creating DxAsyncTask object")
self.__api = AsyncTaskApi
self.__obj = None
self.__apiexc = ApiException
@property
def obj(self):
if self.__obj is not None:
return self.__obj
else:
return None
def from_asynctask(self, task):
"""
Set a obj property using a AsyncTask
:param con: DatabaseConnector object
"""
self.__obj = task
self.__obj.swagger_map = self.swagger_map
self.__obj.swagger_types = self.swagger_types
def wait_for_task(self):
"""
"""
api_instance = self.__api(self.__engine.api_client)
try:
running = True
while(running):
self.__logger.debug("wait async input %s" % str(self))
response = api_instance.get_async_task(
self.obj.async_task_id,
_request_timeout=self.__engine.get_timeout())
self.__logger.debug("wait async response %s"
% str(response))
if response.status != "RUNNING":
running = False
sleep(1)
print_message("Waiting for task %s to complete " % self.obj.async_task_id)
if response.status == "SUCCEEDED":
print_message("Task finished sucesfully")
return 0
else:
print_error("Task finished with status %s" % response.status)
return 1
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
| 29.290598 | 90 | 0.600233 |
5ca0ec76f16c6abe6a86cccdf1ff3375f4fcfef7 | 33,571 | py | Python | tests/functional/test_lambda_trigger_events.py | igorlg/aws-lambda-powertools-python | 7addeab6fcc57924a64ccd867caf312d1153b55a | [
"MIT-0"
] | null | null | null | tests/functional/test_lambda_trigger_events.py | igorlg/aws-lambda-powertools-python | 7addeab6fcc57924a64ccd867caf312d1153b55a | [
"MIT-0"
] | null | null | null | tests/functional/test_lambda_trigger_events.py | igorlg/aws-lambda-powertools-python | 7addeab6fcc57924a64ccd867caf312d1153b55a | [
"MIT-0"
] | null | null | null | import base64
import json
import os
from secrets import compare_digest
from urllib.parse import quote_plus
from aws_lambda_powertools.utilities.data_classes import (
ALBEvent,
APIGatewayProxyEvent,
APIGatewayProxyEventV2,
CloudWatchLogsEvent,
EventBridgeEvent,
KinesisStreamEvent,
S3Event,
SESEvent,
SNSEvent,
SQSEvent,
)
from aws_lambda_powertools.utilities.data_classes.cognito_user_pool_event import (
CreateAuthChallengeTriggerEvent,
CustomMessageTriggerEvent,
DefineAuthChallengeTriggerEvent,
PostAuthenticationTriggerEvent,
PostConfirmationTriggerEvent,
PreAuthenticationTriggerEvent,
PreSignUpTriggerEvent,
PreTokenGenerationTriggerEvent,
UserMigrationTriggerEvent,
VerifyAuthChallengeResponseTriggerEvent,
)
from aws_lambda_powertools.utilities.data_classes.common import BaseProxyEvent
from aws_lambda_powertools.utilities.data_classes.dynamo_db_stream_event import (
AttributeValue,
DynamoDBRecordEventName,
DynamoDBStreamEvent,
StreamViewType,
)
def load_event(file_name: str) -> dict:
full_file_name = os.path.dirname(os.path.realpath(__file__)) + "/../events/" + file_name
with open(full_file_name) as fp:
return json.load(fp)
def test_cloud_watch_trigger_event():
event = CloudWatchLogsEvent(load_event("cloudWatchLogEvent.json"))
decompressed_logs_data = event.decompress_logs_data
assert event.decompress_logs_data == decompressed_logs_data
json_logs_data = event.parse_logs_data()
assert event.parse_logs_data()._data == json_logs_data._data
log_events = json_logs_data.log_events
log_event = log_events[0]
assert json_logs_data.owner == "123456789123"
assert json_logs_data.log_group == "testLogGroup"
assert json_logs_data.log_stream == "testLogStream"
assert json_logs_data.subscription_filters == ["testFilter"]
assert json_logs_data.message_type == "DATA_MESSAGE"
assert log_event.get_id == "eventId1"
assert log_event.timestamp == 1440442987000
assert log_event.message == "[ERROR] First test message"
assert log_event.extracted_fields is None
event2 = CloudWatchLogsEvent(load_event("cloudWatchLogEvent.json"))
assert event._data == event2._data
def test_cognito_pre_signup_trigger_event():
event = PreSignUpTriggerEvent(load_event("cognitoPreSignUpEvent.json"))
# Verify BaseTriggerEvent properties
assert event.version == "string"
assert event.trigger_source == "PreSignUp_SignUp"
assert event.region == "us-east-1"
assert event.user_pool_id == "string"
assert event.user_name == "userName"
caller_context = event.caller_context
assert caller_context.aws_sdk_version == "awsSdkVersion"
assert caller_context.client_id == "clientId"
# Verify properties
user_attributes = event.request.user_attributes
assert user_attributes["email"] == "user@example.com"
assert event.request.validation_data is None
assert event.request.client_metadata is None
# Verify setters
event.response.auto_confirm_user = True
assert event.response.auto_confirm_user is True
event.response.auto_verify_phone = True
assert event.response.auto_verify_phone is True
event.response.auto_verify_email = True
assert event.response.auto_verify_email is True
assert event["response"]["autoVerifyEmail"] is True
def test_cognito_post_confirmation_trigger_event():
event = PostConfirmationTriggerEvent(load_event("cognitoPostConfirmationEvent.json"))
assert event.trigger_source == "PostConfirmation_ConfirmSignUp"
user_attributes = event.request.user_attributes
assert user_attributes["email"] == "user@example.com"
assert event.request.client_metadata is None
def test_cognito_user_migration_trigger_event():
event = UserMigrationTriggerEvent(load_event("cognitoUserMigrationEvent.json"))
assert event.trigger_source == "UserMigration_Authentication"
assert compare_digest(event.request.password, event["request"]["password"])
assert event.request.validation_data is None
assert event.request.client_metadata is None
event.response.user_attributes = {"username": "username"}
assert event.response.user_attributes == event["response"]["userAttributes"]
assert event.response.user_attributes == {"username": "username"}
assert event.response.final_user_status is None
assert event.response.message_action is None
assert event.response.force_alias_creation is None
assert event.response.desired_delivery_mediums is None
event.response.final_user_status = "CONFIRMED"
assert event.response.final_user_status == "CONFIRMED"
event.response.message_action = "SUPPRESS"
assert event.response.message_action == "SUPPRESS"
event.response.force_alias_creation = True
assert event.response.force_alias_creation is True
event.response.desired_delivery_mediums = ["EMAIL"]
assert event.response.desired_delivery_mediums == ["EMAIL"]
def test_cognito_custom_message_trigger_event():
event = CustomMessageTriggerEvent(load_event("cognitoCustomMessageEvent.json"))
assert event.trigger_source == "CustomMessage_AdminCreateUser"
assert event.request.code_parameter == "####"
assert event.request.username_parameter == "username"
assert event.request.user_attributes["phone_number_verified"] is False
assert event.request.client_metadata is None
event.response.sms_message = "sms"
assert event.response.sms_message == event["response"]["smsMessage"]
event.response.email_message = "email"
assert event.response.email_message == event["response"]["emailMessage"]
event.response.email_subject = "subject"
assert event.response.email_subject == event["response"]["emailSubject"]
def test_cognito_pre_authentication_trigger_event():
event = PreAuthenticationTriggerEvent(load_event("cognitoPreAuthenticationEvent.json"))
assert event.trigger_source == "PreAuthentication_Authentication"
assert event.request.user_not_found is None
event["request"]["userNotFound"] = True
assert event.request.user_not_found is True
assert event.request.user_attributes["email"] == "pre-auth@mail.com"
assert event.request.validation_data is None
def test_cognito_post_authentication_trigger_event():
event = PostAuthenticationTriggerEvent(load_event("cognitoPostAuthenticationEvent.json"))
assert event.trigger_source == "PostAuthentication_Authentication"
assert event.request.new_device_used is True
assert event.request.user_attributes["email"] == "post-auth@mail.com"
assert event.request.client_metadata is None
def test_cognito_pre_token_generation_trigger_event():
event = PreTokenGenerationTriggerEvent(load_event("cognitoPreTokenGenerationEvent.json"))
assert event.trigger_source == "TokenGeneration_Authentication"
group_configuration = event.request.group_configuration
assert group_configuration.groups_to_override == []
assert group_configuration.iam_roles_to_override == []
assert group_configuration.preferred_role is None
assert event.request.user_attributes["email"] == "test@mail.com"
assert event.request.client_metadata is None
event["request"]["groupConfiguration"]["preferredRole"] = "temp"
group_configuration = event.request.group_configuration
assert group_configuration.preferred_role == "temp"
assert event["response"].get("claimsOverrideDetails") is None
claims_override_details = event.response.claims_override_details
assert event["response"]["claimsOverrideDetails"] == {}
assert claims_override_details.claims_to_add_or_override is None
assert claims_override_details.claims_to_suppress is None
assert claims_override_details.group_configuration is None
claims_override_details.group_configuration = {}
assert claims_override_details.group_configuration._data == {}
assert event["response"]["claimsOverrideDetails"]["groupOverrideDetails"] == {}
expected_claims = {"test": "value"}
claims_override_details.claims_to_add_or_override = expected_claims
assert claims_override_details.claims_to_add_or_override["test"] == "value"
assert event["response"]["claimsOverrideDetails"]["claimsToAddOrOverride"] == expected_claims
claims_override_details.claims_to_suppress = ["email"]
assert claims_override_details.claims_to_suppress[0] == "email"
assert event["response"]["claimsOverrideDetails"]["claimsToSuppress"] == ["email"]
expected_groups = ["group-A", "group-B"]
claims_override_details.set_group_configuration_groups_to_override(expected_groups)
assert claims_override_details.group_configuration.groups_to_override == expected_groups
assert event["response"]["claimsOverrideDetails"]["groupOverrideDetails"]["groupsToOverride"] == expected_groups
claims_override_details.set_group_configuration_iam_roles_to_override(["role"])
assert claims_override_details.group_configuration.iam_roles_to_override == ["role"]
assert event["response"]["claimsOverrideDetails"]["groupOverrideDetails"]["iamRolesToOverride"] == ["role"]
claims_override_details.set_group_configuration_preferred_role("role_name")
assert claims_override_details.group_configuration.preferred_role == "role_name"
assert event["response"]["claimsOverrideDetails"]["groupOverrideDetails"]["preferredRole"] == "role_name"
def test_cognito_define_auth_challenge_trigger_event():
event = DefineAuthChallengeTriggerEvent(load_event("cognitoDefineAuthChallengeEvent.json"))
assert event.trigger_source == "DefineAuthChallenge_Authentication"
# Verify properties
assert event.request.user_attributes["email"] == "define-auth@mail.com"
assert event.request.user_not_found is True
session = event.request.session
assert len(session) == 2
assert session[0].challenge_name == "PASSWORD_VERIFIER"
assert session[0].challenge_result is True
assert session[0].challenge_metadata is None
assert session[1].challenge_metadata == "CAPTCHA_CHALLENGE"
assert event.request.client_metadata is None
# Verify setters
event.response.challenge_name = "CUSTOM_CHALLENGE"
assert event.response.challenge_name == event["response"]["challengeName"]
assert event.response.challenge_name == "CUSTOM_CHALLENGE"
event.response.fail_authentication = True
assert event.response.fail_authentication is True
assert event.response.fail_authentication == event["response"]["failAuthentication"]
event.response.issue_tokens = True
assert event.response.issue_tokens is True
assert event.response.issue_tokens == event["response"]["issueTokens"]
def test_create_auth_challenge_trigger_event():
event = CreateAuthChallengeTriggerEvent(load_event("cognitoCreateAuthChallengeEvent.json"))
assert event.trigger_source == "CreateAuthChallenge_Authentication"
# Verify properties
assert event.request.user_attributes["email"] == "create-auth@mail.com"
assert event.request.user_not_found is False
assert event.request.challenge_name == "PASSWORD_VERIFIER"
session = event.request.session
assert len(session) == 1
assert session[0].challenge_name == "CUSTOM_CHALLENGE"
assert session[0].challenge_metadata == "CAPTCHA_CHALLENGE"
assert event.request.client_metadata is None
# Verify setters
event.response.public_challenge_parameters = {"test": "value"}
assert event.response.public_challenge_parameters == event["response"]["publicChallengeParameters"]
assert event.response.public_challenge_parameters["test"] == "value"
event.response.private_challenge_parameters = {"private": "value"}
assert event.response.private_challenge_parameters == event["response"]["privateChallengeParameters"]
assert event.response.private_challenge_parameters["private"] == "value"
event.response.challenge_metadata = "meta"
assert event.response.challenge_metadata == event["response"]["challengeMetadata"]
assert event.response.challenge_metadata == "meta"
def test_verify_auth_challenge_response_trigger_event():
event = VerifyAuthChallengeResponseTriggerEvent(load_event("cognitoVerifyAuthChallengeResponseEvent.json"))
assert event.trigger_source == "VerifyAuthChallengeResponse_Authentication"
# Verify properties
assert event.request.user_attributes["email"] == "verify-auth@mail.com"
assert event.request.private_challenge_parameters["answer"] == "challengeAnswer"
assert event.request.challenge_answer == "challengeAnswer"
assert event.request.client_metadata is not None
assert event.request.client_metadata["foo"] == "value"
assert event.request.user_not_found is True
# Verify setters
event.response.answer_correct = True
assert event.response.answer_correct == event["response"]["answerCorrect"]
assert event.response.answer_correct is True
def test_dynamo_db_stream_trigger_event():
event = DynamoDBStreamEvent(load_event("dynamoStreamEvent.json"))
records = list(event.records)
record = records[0]
assert record.aws_region == "us-west-2"
dynamodb = record.dynamodb
assert dynamodb is not None
assert dynamodb.approximate_creation_date_time is None
keys = dynamodb.keys
assert keys is not None
id_key = keys["Id"]
assert id_key.b_value is None
assert id_key.bs_value is None
assert id_key.bool_value is None
assert id_key.list_value is None
assert id_key.map_value is None
assert id_key.n_value == "101"
assert id_key.ns_value is None
assert id_key.null_value is None
assert id_key.s_value is None
assert id_key.ss_value is None
message_key = dynamodb.new_image["Message"]
assert message_key is not None
assert message_key.s_value == "New item!"
assert dynamodb.old_image is None
assert dynamodb.sequence_number == "111"
assert dynamodb.size_bytes == 26
assert dynamodb.stream_view_type == StreamViewType.NEW_AND_OLD_IMAGES
assert record.event_id == "1"
assert record.event_name is DynamoDBRecordEventName.INSERT
assert record.event_source == "aws:dynamodb"
assert record.event_source_arn == "eventsource_arn"
assert record.event_version == "1.0"
assert record.user_identity is None
def test_dynamo_attribute_value_list_value():
example_attribute_value = {"L": [{"S": "Cookies"}, {"S": "Coffee"}, {"N": "3.14159"}]}
attribute_value = AttributeValue(example_attribute_value)
list_value = attribute_value.list_value
assert list_value is not None
item = list_value[0]
assert item.s_value == "Cookies"
def test_dynamo_attribute_value_map_value():
example_attribute_value = {"M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}}}
attribute_value = AttributeValue(example_attribute_value)
map_value = attribute_value.map_value
assert map_value is not None
item = map_value["Name"]
assert item.s_value == "Joe"
def test_event_bridge_event():
event = EventBridgeEvent(load_event("eventBridgeEvent.json"))
assert event.get_id == event["id"]
assert event.version == event["version"]
assert event.account == event["account"]
assert event.time == event["time"]
assert event.region == event["region"]
assert event.resources == event["resources"]
assert event.source == event["source"]
assert event.detail_type == event["detail-type"]
assert event.detail == event["detail"]
assert event.replay_name == "replay_archive"
def test_s3_trigger_event():
event = S3Event(load_event("s3Event.json"))
records = list(event.records)
assert len(records) == 1
record = records[0]
assert record.event_version == "2.1"
assert record.event_source == "aws:s3"
assert record.aws_region == "us-east-2"
assert record.event_time == "2019-09-03T19:37:27.192Z"
assert record.event_name == "ObjectCreated:Put"
user_identity = record.user_identity
assert user_identity.principal_id == "AWS:AIDAINPONIXQXHT3IKHL2"
request_parameters = record.request_parameters
assert request_parameters.source_ip_address == "205.255.255.255"
assert record.response_elements["x-amz-request-id"] == "D82B88E5F771F645"
s3 = record.s3
assert s3.s3_schema_version == "1.0"
assert s3.configuration_id == "828aa6fc-f7b5-4305-8584-487c791949c1"
bucket = s3.bucket
assert bucket.name == "lambda-artifacts-deafc19498e3f2df"
assert bucket.owner_identity.principal_id == "A3I5XTEXAMAI3E"
assert bucket.arn == "arn:aws:s3:::lambda-artifacts-deafc19498e3f2df"
assert s3.get_object.key == "b21b84d653bb07b05b1e6b33684dc11b"
assert s3.get_object.size == 1305107
assert s3.get_object.etag == "b21b84d653bb07b05b1e6b33684dc11b"
assert s3.get_object.version_id is None
assert s3.get_object.sequencer == "0C0F6F405D6ED209E1"
assert record.glacier_event_data is None
assert event.record._data == event["Records"][0]
assert event.bucket_name == "lambda-artifacts-deafc19498e3f2df"
assert event.object_key == "b21b84d653bb07b05b1e6b33684dc11b"
def test_s3_key_unquote_plus():
tricky_name = "foo name+value"
event_dict = {"Records": [{"s3": {"object": {"key": quote_plus(tricky_name)}}}]}
event = S3Event(event_dict)
assert event.object_key == tricky_name
def test_s3_glacier_event():
example_event = {
"Records": [
{
"glacierEventData": {
"restoreEventData": {
"lifecycleRestorationExpiryTime": "1970-01-01T00:01:00.000Z",
"lifecycleRestoreStorageClass": "standard",
}
}
}
]
}
event = S3Event(example_event)
record = next(event.records)
glacier_event_data = record.glacier_event_data
assert glacier_event_data is not None
assert glacier_event_data.restore_event_data.lifecycle_restoration_expiry_time == "1970-01-01T00:01:00.000Z"
assert glacier_event_data.restore_event_data.lifecycle_restore_storage_class == "standard"
def test_ses_trigger_event():
event = SESEvent(load_event("sesEvent.json"))
expected_address = "johndoe@example.com"
records = list(event.records)
record = records[0]
assert record.event_source == "aws:ses"
assert record.event_version == "1.0"
mail = record.ses.mail
assert mail.timestamp == "1970-01-01T00:00:00.000Z"
assert mail.source == "janedoe@example.com"
assert mail.message_id == "o3vrnil0e2ic28tr"
assert mail.destination == [expected_address]
assert mail.headers_truncated is False
headers = list(mail.headers)
assert len(headers) == 10
assert headers[0].name == "Return-Path"
assert headers[0].value == "<janedoe@example.com>"
common_headers = mail.common_headers
assert common_headers.return_path == "janedoe@example.com"
assert common_headers.get_from == common_headers._data["from"]
assert common_headers.date == "Wed, 7 Oct 2015 12:34:56 -0700"
assert common_headers.to == [expected_address]
assert common_headers.message_id == "<0123456789example.com>"
assert common_headers.subject == "Test Subject"
receipt = record.ses.receipt
assert receipt.timestamp == "1970-01-01T00:00:00.000Z"
assert receipt.processing_time_millis == 574
assert receipt.recipients == [expected_address]
assert receipt.spam_verdict.status == "PASS"
assert receipt.virus_verdict.status == "PASS"
assert receipt.spf_verdict.status == "PASS"
assert receipt.dmarc_verdict.status == "PASS"
action = receipt.action
assert action.get_type == action._data["type"]
assert action.function_arn == action._data["functionArn"]
assert action.invocation_type == action._data["invocationType"]
assert event.record._data == event["Records"][0]
assert event.mail._data == event["Records"][0]["ses"]["mail"]
assert event.receipt._data == event["Records"][0]["ses"]["receipt"]
def test_sns_trigger_event():
event = SNSEvent(load_event("snsEvent.json"))
records = list(event.records)
assert len(records) == 1
record = records[0]
assert record.event_version == "1.0"
assert record.event_subscription_arn == "arn:aws:sns:us-east-2:123456789012:sns-la ..."
assert record.event_source == "aws:sns"
sns = record.sns
assert sns.signature_version == "1"
assert sns.timestamp == "2019-01-02T12:45:07.000Z"
assert sns.signature == "tcc6faL2yUC6dgZdmrwh1Y4cGa/ebXEkAi6RibDsvpi+tE/1+82j...65r=="
assert sns.signing_cert_url == "https://sns.us-east-2.amazonaws.com/SimpleNotification"
assert sns.message_id == "95df01b4-ee98-5cb9-9903-4c221d41eb5e"
assert sns.message == "Hello from SNS!"
message_attributes = sns.message_attributes
test_message_attribute = message_attributes["Test"]
assert test_message_attribute.get_type == "String"
assert test_message_attribute.value == "TestString"
assert sns.get_type == "Notification"
assert sns.unsubscribe_url == "https://sns.us-east-2.amazonaws.com/?Action=Unsubscribe"
assert sns.topic_arn == "arn:aws:sns:us-east-2:123456789012:sns-lambda"
assert sns.subject == "TestInvoke"
assert event.record._data == event["Records"][0]
assert event.sns_message == "Hello from SNS!"
def test_seq_trigger_event():
event = SQSEvent(load_event("sqsEvent.json"))
records = list(event.records)
record = records[0]
attributes = record.attributes
message_attributes = record.message_attributes
test_attr = message_attributes["testAttr"]
assert len(records) == 2
assert record.message_id == "059f36b4-87a3-44ab-83d2-661975830a7d"
assert record.receipt_handle == "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a..."
assert record.body == "Test message."
assert attributes.aws_trace_header is None
assert attributes.approximate_receive_count == "1"
assert attributes.sent_timestamp == "1545082649183"
assert attributes.sender_id == "AIDAIENQZJOLO23YVJ4VO"
assert attributes.approximate_first_receive_timestamp == "1545082649185"
assert attributes.sequence_number is None
assert attributes.message_group_id is None
assert attributes.message_deduplication_id is None
assert message_attributes["NotFound"] is None
assert message_attributes.get("NotFound") is None
assert test_attr.string_value == "100"
assert test_attr.binary_value == "base64Str"
assert test_attr.data_type == "Number"
assert record.md5_of_body == "e4e68fb7bd0e697a0ae8f1bb342846b3"
assert record.event_source == "aws:sqs"
assert record.event_source_arn == "arn:aws:sqs:us-east-2:123456789012:my-queue"
assert record.aws_region == "us-east-2"
def test_api_gateway_proxy_event():
event = APIGatewayProxyEvent(load_event("apiGatewayProxyEvent.json"))
assert event.version == event["version"]
assert event.resource == event["resource"]
assert event.path == event["path"]
assert event.http_method == event["httpMethod"]
assert event.headers == event["headers"]
assert event.multi_value_headers == event["multiValueHeaders"]
assert event.query_string_parameters == event["queryStringParameters"]
assert event.multi_value_query_string_parameters == event["multiValueQueryStringParameters"]
request_context = event.request_context
assert request_context.account_id == event["requestContext"]["accountId"]
assert request_context.api_id == event["requestContext"]["apiId"]
authorizer = request_context.authorizer
assert authorizer.claims is None
assert authorizer.scopes is None
assert request_context.domain_name == event["requestContext"]["domainName"]
assert request_context.domain_prefix == event["requestContext"]["domainPrefix"]
assert request_context.extended_request_id == event["requestContext"]["extendedRequestId"]
assert request_context.http_method == event["requestContext"]["httpMethod"]
identity = request_context.identity
assert identity.access_key == event["requestContext"]["identity"]["accessKey"]
assert identity.account_id == event["requestContext"]["identity"]["accountId"]
assert identity.caller == event["requestContext"]["identity"]["caller"]
assert (
identity.cognito_authentication_provider == event["requestContext"]["identity"]["cognitoAuthenticationProvider"]
)
assert identity.cognito_authentication_type == event["requestContext"]["identity"]["cognitoAuthenticationType"]
assert identity.cognito_identity_id == event["requestContext"]["identity"]["cognitoIdentityId"]
assert identity.cognito_identity_pool_id == event["requestContext"]["identity"]["cognitoIdentityPoolId"]
assert identity.principal_org_id == event["requestContext"]["identity"]["principalOrgId"]
assert identity.source_ip == event["requestContext"]["identity"]["sourceIp"]
assert identity.user == event["requestContext"]["identity"]["user"]
assert identity.user_agent == event["requestContext"]["identity"]["userAgent"]
assert identity.user_arn == event["requestContext"]["identity"]["userArn"]
assert request_context.path == event["requestContext"]["path"]
assert request_context.protocol == event["requestContext"]["protocol"]
assert request_context.request_id == event["requestContext"]["requestId"]
assert request_context.request_time == event["requestContext"]["requestTime"]
assert request_context.request_time_epoch == event["requestContext"]["requestTimeEpoch"]
assert request_context.resource_id == event["requestContext"]["resourceId"]
assert request_context.resource_path == event["requestContext"]["resourcePath"]
assert request_context.stage == event["requestContext"]["stage"]
assert event.path_parameters == event["pathParameters"]
assert event.stage_variables == event["stageVariables"]
assert event.body == event["body"]
assert event.is_base64_encoded == event["isBase64Encoded"]
assert request_context.connected_at is None
assert request_context.connection_id is None
assert request_context.event_type is None
assert request_context.message_direction is None
assert request_context.message_id is None
assert request_context.route_key is None
assert identity.api_key is None
assert identity.api_key_id is None
def test_api_gateway_proxy_v2_event():
event = APIGatewayProxyEventV2(load_event("apiGatewayProxyV2Event.json"))
assert event.version == event["version"]
assert event.route_key == event["routeKey"]
assert event.raw_path == event["rawPath"]
assert event.raw_query_string == event["rawQueryString"]
assert event.cookies == event["cookies"]
assert event.cookies[0] == "cookie1"
assert event.headers == event["headers"]
assert event.query_string_parameters == event["queryStringParameters"]
assert event.query_string_parameters["parameter2"] == "value"
request_context = event.request_context
assert request_context.account_id == event["requestContext"]["accountId"]
assert request_context.api_id == event["requestContext"]["apiId"]
assert request_context.authorizer.jwt_claim == event["requestContext"]["authorizer"]["jwt"]["claims"]
assert request_context.authorizer.jwt_scopes == event["requestContext"]["authorizer"]["jwt"]["scopes"]
assert request_context.domain_name == event["requestContext"]["domainName"]
assert request_context.domain_prefix == event["requestContext"]["domainPrefix"]
http = request_context.http
assert http.method == "POST"
assert http.path == "/my/path"
assert http.protocol == "HTTP/1.1"
assert http.source_ip == "IP"
assert http.user_agent == "agent"
assert request_context.request_id == event["requestContext"]["requestId"]
assert request_context.route_key == event["requestContext"]["routeKey"]
assert request_context.stage == event["requestContext"]["stage"]
assert request_context.time == event["requestContext"]["time"]
assert request_context.time_epoch == event["requestContext"]["timeEpoch"]
assert event.body == event["body"]
assert event.path_parameters == event["pathParameters"]
assert event.is_base64_encoded == event["isBase64Encoded"]
assert event.stage_variables == event["stageVariables"]
def test_api_gateway_proxy_v2_lambda_authorizer_event():
event = APIGatewayProxyEventV2(load_event("apiGatewayProxyV2LambdaAuthorizerEvent.json"))
request_context = event.request_context
assert request_context is not None
lambda_props = request_context.authorizer.get_lambda
assert lambda_props is not None
assert lambda_props["key"] == "value"
def test_api_gateway_proxy_v2_iam_event():
event = APIGatewayProxyEventV2(load_event("apiGatewayProxyV2IamEvent.json"))
iam = event.request_context.authorizer.iam
assert iam is not None
assert iam.access_key == "ARIA2ZJZYVUEREEIHAKY"
assert iam.account_id == "1234567890"
assert iam.caller_id == "AROA7ZJZYVRE7C3DUXHH6:CognitoIdentityCredentials"
assert iam.cognito_amr == ["foo"]
assert iam.cognito_identity_id == "us-east-1:3f291106-8703-466b-8f2b-3ecee1ca56ce"
assert iam.cognito_identity_pool_id == "us-east-1:4f291106-8703-466b-8f2b-3ecee1ca56ce"
assert iam.principal_org_id == "AwsOrgId"
assert iam.user_arn == "arn:aws:iam::1234567890:user/Admin"
assert iam.user_id == "AROA2ZJZYVRE7Y3TUXHH6"
def test_base_proxy_event_get_query_string_value():
default_value = "default"
set_value = "value"
event = BaseProxyEvent({})
value = event.get_query_string_value("test", default_value)
assert value == default_value
event._data["queryStringParameters"] = {"test": set_value}
value = event.get_query_string_value("test", default_value)
assert value == set_value
value = event.get_query_string_value("unknown", default_value)
assert value == default_value
value = event.get_query_string_value("unknown")
assert value is None
def test_base_proxy_event_get_header_value():
default_value = "default"
set_value = "value"
event = BaseProxyEvent({"headers": {}})
value = event.get_header_value("test", default_value)
assert value == default_value
event._data["headers"] = {"test": set_value}
value = event.get_header_value("test", default_value)
assert value == set_value
# Verify that the default look is case insensitive
value = event.get_header_value("Test")
assert value == set_value
value = event.get_header_value("unknown", default_value)
assert value == default_value
value = event.get_header_value("unknown")
assert value is None
def test_base_proxy_event_get_header_value_case_insensitive():
default_value = "default"
set_value = "value"
event = BaseProxyEvent({"headers": {}})
event._data["headers"] = {"Test": set_value}
value = event.get_header_value("test", case_sensitive=True)
assert value is None
value = event.get_header_value("test", default_value=default_value, case_sensitive=True)
assert value == default_value
value = event.get_header_value("Test", case_sensitive=True)
assert value == set_value
value = event.get_header_value("unknown", default_value, case_sensitive=True)
assert value == default_value
value = event.get_header_value("unknown", case_sensitive=True)
assert value is None
def test_kinesis_stream_event():
event = KinesisStreamEvent(load_event("kinesisStreamEvent.json"))
records = list(event.records)
assert len(records) == 2
record = records[0]
assert record.aws_region == "us-east-2"
assert record.event_id == "shardId-000000000006:49590338271490256608559692538361571095921575989136588898"
assert record.event_name == "aws:kinesis:record"
assert record.event_source == "aws:kinesis"
assert record.event_source_arn == "arn:aws:kinesis:us-east-2:123456789012:stream/lambda-stream"
assert record.event_version == "1.0"
assert record.invoke_identity_arn == "arn:aws:iam::123456789012:role/lambda-role"
kinesis = record.kinesis
assert kinesis._data["kinesis"] == event["Records"][0]["kinesis"]
assert kinesis.approximate_arrival_timestamp == 1545084650.987
assert kinesis.data == event["Records"][0]["kinesis"]["data"]
assert kinesis.kinesis_schema_version == "1.0"
assert kinesis.partition_key == "1"
assert kinesis.sequence_number == "49590338271490256608559692538361571095921575989136588898"
assert kinesis.data_as_text() == "Hello, this is a test."
def test_kinesis_stream_event_json_data():
json_value = {"test": "value"}
data = base64.b64encode(bytes(json.dumps(json_value), "utf-8")).decode("utf-8")
event = KinesisStreamEvent({"Records": [{"kinesis": {"data": data}}]})
assert next(event.records).kinesis.data_as_json() == json_value
def test_alb_event():
event = ALBEvent(load_event("albEvent.json"))
assert event.request_context.elb_target_group_arn == event["requestContext"]["elb"]["targetGroupArn"]
assert event.http_method == event["httpMethod"]
assert event.path == event["path"]
assert event.query_string_parameters == event["queryStringParameters"]
assert event.headers == event["headers"]
assert event.multi_value_query_string_parameters == event.get("multiValueQueryStringParameters")
assert event.multi_value_headers == event.get("multiValueHeaders")
assert event.body == event["body"]
assert event.is_base64_encoded == event["isBase64Encoded"]
| 43.094994 | 120 | 0.74612 |
79c585de7cc78ce12173048f8e6c56889b38f3b9 | 143 | py | Python | tests/conftest.py | kringen/wingnut | 73be4f8393720ff0932ab069543e5f2d2308296d | [
"MIT"
] | null | null | null | tests/conftest.py | kringen/wingnut | 73be4f8393720ff0932ab069543e5f2d2308296d | [
"MIT"
] | null | null | null | tests/conftest.py | kringen/wingnut | 73be4f8393720ff0932ab069543e5f2d2308296d | [
"MIT"
] | null | null | null | import pytest
from ui import ui
@pytest.fixture
def app():
yield ui.app
@pytest.fixture
def client(app):
return app.test_client()
| 10.214286 | 28 | 0.699301 |
8d29967cfc6f4539b5704abf1f9fe67ec40f3e96 | 3,149 | py | Python | sonarqube/measures.py | alexcfpho/python-sonarqube-api | 97a637e3d8083d1d72af0f1ba2e5ab7ec88a7d75 | [
"MIT"
] | null | null | null | sonarqube/measures.py | alexcfpho/python-sonarqube-api | 97a637e3d8083d1d72af0f1ba2e5ab7ec88a7d75 | [
"MIT"
] | null | null | null | sonarqube/measures.py | alexcfpho/python-sonarqube-api | 97a637e3d8083d1d72af0f1ba2e5ab7ec88a7d75 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from sonarqube.config import (
API_MEASURES_COMPONENT_ENDPOINT,
API_MEASURES_SEARCH_HISTORY_ENDPOINT
)
class SonarQubeMeasure:
metricKeys = 'code_smells,bugs,vulnerabilities,new_bugs,new_vulnerabilities,new_code_smells,coverage,\
new_reliability_rating,reliability_rating,new_security_rating,security_rating,new_maintainability_rating,\
sqale_rating,tests,test_failures,test_errors,skipped_tests,test_success_density,ncloc,duplicated_lines_density,\
comment_lines_density'
def __init__(self, sonarqube):
self.sonarqube = sonarqube
def get_measures_component(self, component, branch):
"""
Return component with specified measures.
:param component:
:param branch:
:return:
"""
params = {
'additionalFields': 'metrics,periods',
'metricKeys': self.metricKeys,
'component': component,
'branch': branch
}
resp = self.sonarqube.make_call('get', API_MEASURES_COMPONENT_ENDPOINT, **params)
data = resp.json()
return data
def get_measures_history(self, component, branch, **kwargs):
"""
Search measures history of a component
:param component:
:param branch:
:return:
"""
params = {
'metrics': 'code_smells,bugs,vulnerabilities,new_bugs,new_vulnerabilities,\
new_code_smells,coverage,new_coverage',
'component': component,
'branch': branch
}
if kwargs:
self.sonarqube.copy_dict(params, kwargs)
page_num = 1
page_size = 1
total = 2
while page_num * page_size < total:
resp = self.sonarqube.make_call('get', API_MEASURES_SEARCH_HISTORY_ENDPOINT, **params)
response = resp.json()
page_num = response['paging']['pageIndex']
page_size = response['paging']['pageSize']
total = response['paging']['total']
params['p'] = page_num + 1
for measure in response['measures']:
yield measure
def get_project_measures_component(self, project_key, branch, fc=None):
"""
获取项目measures(可以过滤不需要的数据)
:param project_key:
:param branch:
:param fc:
:return:
"""
if fc:
if isinstance(fc, str):
fc = fc.split(',')
measures_component = self.get_measures_component(project_key, branch)['component']['measures']
measures = {}
for metric in measures_component:
metric_key = metric['metric']
if metric_key in self.metricKeys.split(','):
measures[metric_key] = metric.get('value', None) or metric.get('periods', None)[0]['value']
# try:
# measures[metric_key] = metric['value']
# except:
# measures[metric_key] = metric['periods'][0]['value']
for item in list(measures.keys()):
if fc and item in fc:
del measures[item]
return measures
| 32.463918 | 112 | 0.599238 |
106ec863b06ddf2321cd98737b8d3049bac53f8d | 13,688 | py | Python | src/generate.py | mgwein/DeepTreeAttention | 22bd0beb836a570b556baa82b747f69caded05c7 | [
"MIT"
] | null | null | null | src/generate.py | mgwein/DeepTreeAttention | 22bd0beb836a570b556baa82b747f69caded05c7 | [
"MIT"
] | null | null | null | src/generate.py | mgwein/DeepTreeAttention | 22bd0beb836a570b556baa82b747f69caded05c7 | [
"MIT"
] | 1 | 2022-02-28T19:46:03.000Z | 2022-02-28T19:46:03.000Z | #Convert NEON field sample points into bounding boxes of cropped image data for model training
import glob
import geopandas as gpd
import rasterio
import numpy as np
import shapely
import os
import pandas as pd
from src.neon_paths import find_sensor_path, lookup_and_convert, bounds_to_geoindex
from src import patches
from distributed import wait
from deepforest import main
import traceback
import warnings
warnings.filterwarnings('ignore')
def predict_trees(deepforest_model, rgb_path, bounds, expand=40):
"""Predict an rgb path at specific utm bounds
Args:
deepforest_model: a deepforest model object used for prediction
rgb_path: full path to image
bounds: utm extent given by geopandas.total_bounds
expand: numeric meters to add to edges to reduce edge effects
"""
#DeepForest is trained on 400m crops, easiest to mantain this approximate size centered on points
left, bottom, right, top = bounds
expand_width = (expand - (right - left))/2
left = left - expand_width
right = right + expand_width
expand_height = (expand - (top - bottom))/2
bottom = bottom - expand_height
top = top + expand_height
src = rasterio.open(rgb_path)
pixelSizeX, pixelSizeY = src.res
img = src.read(window=rasterio.windows.from_bounds(left, bottom, right, top, transform=src.transform))
src.close()
#roll to channels last
img = np.rollaxis(img, 0,3)
boxes = deepforest_model.predict_image(image = img, return_plot=False)
if boxes is None:
return boxes
#subtract origin. Recall that numpy origin is top left! Not bottom left.
boxes["xmin"] = (boxes["xmin"] *pixelSizeX) + left
boxes["xmax"] = (boxes["xmax"] * pixelSizeX) + left
boxes["ymin"] = top - (boxes["ymin"] * pixelSizeY)
boxes["ymax"] = top - (boxes["ymax"] * pixelSizeY)
# combine column to a shapely Box() object, save shapefile
boxes['geometry'] = boxes.apply(lambda x: shapely.geometry.box(x.xmin,x.ymin,x.xmax,x.ymax), axis=1)
boxes = gpd.GeoDataFrame(boxes, geometry='geometry')
#Give an id field
boxes["box_id"] = np.arange(boxes.shape[0])
return boxes
def choose_box(group, plot_data):
"""Given a set of overlapping bounding boxes and predictions, just choose the closest to stem box by centroid if there are multiples"""
if group.shape[0] == 1:
return group
else:
#Find centroid
individual_id = group.individual.unique()[0]
stem_location = plot_data[plot_data["individual"]==individual_id].geometry.iloc[0]
closest_stem = group.centroid.distance(stem_location).sort_values().index[0]
return group.loc[[closest_stem]]
def create_boxes(plot_data, size=1):
"""If there are no deepforest boxes, fall back on selecting a fixed area around stem point"""
fixed_boxes = plot_data.buffer(size).envelope
fixed_boxes = gpd.GeoDataFrame(geometry=fixed_boxes)
#Mimic the existing structure
fixed_boxes = gpd.sjoin(fixed_boxes, plot_data)
fixed_boxes["score"] = None
fixed_boxes["label"] = "Tree"
fixed_boxes["xmin"] = None
fixed_boxes["xmax"] = None
fixed_boxes["ymax"] = None
fixed_boxes["ymin"] = None
fixed_boxes["box_id"] = fixed_boxes.index.to_series().apply(lambda x: "fixed_box_{}".format(x))
return fixed_boxes
def process_plot(plot_data, rgb_pool, deepforest_model=None):
"""For a given NEON plot, find the correct sensor data, predict trees and associate bounding boxes with field data
Args:
plot_data: geopandas dataframe in a utm projection
deepforest_model: deepforest model used for prediction
Returns:
merged_boxes: geodataframe of bounding box predictions with species labels
"""
#DeepForest prediction
try:
rgb_sensor_path = find_sensor_path(bounds=plot_data.total_bounds, lookup_pool=rgb_pool)
except Exception as e:
raise ValueError("cannot find RGB sensor for {}".format(plot_data.plotID.unique()))
boxes = predict_trees(deepforest_model=deepforest_model, rgb_path=rgb_sensor_path, bounds=plot_data.total_bounds)
if boxes is None:
raise ValueError("No trees predicted in plot: {}, skipping.".format(plot_data.plotID.unique()[0]))
#Merge results with field data, buffer on edge
merged_boxes = gpd.sjoin(boxes, plot_data)
##If no remaining boxes just take a box around center
missing_ids = plot_data[~plot_data.individual.isin(merged_boxes.individual)]
if not missing_ids.empty:
created_boxes= create_boxes(missing_ids)
merged_boxes = merged_boxes.append(created_boxes)
#If there are multiple boxes per point, take the center box
grouped = merged_boxes.groupby("individual")
cleaned_boxes = []
for value, group in grouped:
choosen_box = choose_box(group, plot_data)
cleaned_boxes.append(choosen_box)
merged_boxes = gpd.GeoDataFrame(pd.concat(cleaned_boxes),crs=merged_boxes.crs)
merged_boxes = merged_boxes.drop(columns=["xmin","xmax","ymin","ymax"])
##if there are multiple points per box, take the tallest point.
cleaned_points = []
for value, group in merged_boxes.groupby("box_id"):
if group.shape[0] > 1:
print("removing {} points from {} within a deepforest box {}".format(group.shape[0]-1, group.plotID.unique(),group.box_id.unique()))
selected_point = group[group.height == group.height.max()]
if selected_point.shape[0] > 1:
try:
selected_point = selected_point[selected_point.CHM_height == selected_point.CHM_height.max()]
except:
selected_point.head(1)
cleaned_points.append(selected_point)
else:
cleaned_points.append(group)
merged_boxes = gpd.GeoDataFrame(pd.concat(cleaned_points),crs=merged_boxes.crs)
#Add tile information
boxes["RGB_tile"] = rgb_sensor_path
merged_boxes["RGB_tile"] = rgb_sensor_path
return merged_boxes, boxes
def run(plot, df, savedir, raw_box_savedir, rgb_pool=None, saved_model=None, deepforest_model=None):
"""wrapper function for dask, see main.py"""
if deepforest_model is None:
from deepforest import main
deepforest_model = main.deepforest()
deepforest_model.use_release(check_release=False)
#Filter data and process
plot_data = df[df.plotID == plot]
try:
predicted_trees, raw_boxes = process_plot(plot_data, rgb_pool, deepforest_model)
except ValueError as e:
print(e)
return None
if predicted_trees.empty:
return None
#Write merged boxes to file as an interim piece of data to inspect.
if savedir is not None:
predicted_trees.to_file("{}/{}_boxes.shp".format(savedir, plot))
if raw_box_savedir is not None:
raw_boxes.to_file("{}/{}_boxes.shp".format(raw_box_savedir, plot))
return predicted_trees
def points_to_crowns(
field_data,
rgb_dir,
savedir,
raw_box_savedir,
client=None):
"""Prepare NEON field data int
Args:
field_data: shp file with location and class of each field collected point
rgb_dir: glob to search RGB images
savedir: direcory to save predicted bounding boxes
raw_box_savedir: directory save all bounding boxes in the image
client: dask client object to use
Returns:
None: .shp bounding boxes are written to savedir
"""
df = gpd.read_file(field_data)
plot_names = df.plotID.unique()
rgb_pool = glob.glob(rgb_dir, recursive=True)
results = []
if client:
futures = []
for plot in plot_names:
future = client.submit(
run,
plot=plot,
df=df,
rgb_pool=rgb_pool,
savedir=savedir,
raw_box_savedir=raw_box_savedir
)
futures.append(future)
wait(futures)
for x in futures:
try:
result = x.result()
results.append(result)
except Exception as e:
print(e)
continue
else:
#IMPORTS at runtime due to dask pickling, kinda ugly.
deepforest_model = main.deepforest()
deepforest_model.use_release(check_release=False)
for plot in plot_names:
try:
result = run(plot=plot, df=df, savedir=savedir, raw_box_savedir=raw_box_savedir, rgb_pool=rgb_pool, deepforest_model=deepforest_model)
results.append(result)
except Exception as e:
print("{} failed with {}".format(plot, e))
results = pd.concat(results)
#In case any contrib data has the same CHM and height and sitting in the same deepforest box.Should be rare.
results = results.groupby(["plotID","box_id"]).apply(lambda x: x.head(1)).reset_index(drop=True)
return results
def write_crop(row, img_path, savedir, replace=True):
"""Wrapper to write a crop based on size and savedir"""
if replace == False:
filename = "{}/{}.tif".format(savedir, row["individual"])
file_exists = os.path.exists(filename)
if file_exists:
annotation = pd.DataFrame({"image_path":[os.path.basename(filename)], "taxonID":[row["taxonID"]], "plotID":[row["plotID"]], "individualID":[row["individual"]], "RGB_tile":[row["RGB_tile"]], "siteID":[row["siteID"]],"box_id":[row["box_id"]]})
return annotation
else:
filename = patches.crop(bounds=row["geometry"].bounds, sensor_path=img_path, savedir=savedir, basename=row["individual"])
else:
filename = patches.crop(bounds=row["geometry"].bounds, sensor_path=img_path, savedir=savedir, basename=row["individual"])
annotation = pd.DataFrame({"image_path":[os.path.basename(filename)], "taxonID":[row["taxonID"]], "plotID":[row["plotID"]], "individualID":[row["individual"]], "RGB_tile":[row["RGB_tile"]], "siteID":[row["siteID"]],"box_id":[row["box_id"]]})
return annotation
def generate_crops(gdf, sensor_glob, savedir, rgb_glob, client=None, convert_h5=False, HSI_tif_dir=None, replace=True):
"""
Given a shapefile of crowns in a plot, create pixel crops and a dataframe of unique names and labels"
Args:
shapefile: a .shp with geometry objects and an taxonID column
savedir: path to save image crops
img_pool: glob to search remote sensing files. This can be either RGB of .tif hyperspectral data, as long as it can be read by rasterio
client: optional dask client
convert_h5: If HSI data is passed, make sure .tif conversion is complete
rgb_glob: glob to search images to match when converting h5s -> tif.
HSI_tif_dir: if converting H5 -> tif, where to save .tif files. Only needed if convert_h5 is True
Returns:
annotations: pandas dataframe of filenames and individual IDs to link with data
"""
annotations = []
img_pool = glob.glob(sensor_glob, recursive=True)
rgb_pool = glob.glob(rgb_glob, recursive=True)
#There were erroneous point cloud .tif
img_pool = [x for x in img_pool if not "point_cloud" in x]
rgb_pool = [x for x in rgb_pool if not "point_cloud" in x]
#Looking up the rgb -> HSI tile naming is expensive and repetitive. Create a dictionary first.
gdf["geo_index"] = gdf.geometry.apply(lambda x: bounds_to_geoindex(x.bounds))
tiles = gdf["geo_index"].unique()
tile_to_path = {}
for geo_index in tiles:
try:
#Check if h5 -> tif conversion is complete
if convert_h5:
if rgb_glob is None:
raise ValueError("rgb_glob is None, but convert_h5 is True, please supply glob to search for rgb images")
else:
img_path = lookup_and_convert(rgb_pool=rgb_pool, hyperspectral_pool=img_pool, savedir=HSI_tif_dir, geo_index = geo_index)
else:
img_path = find_sensor_path(lookup_pool = img_pool, geo_index = geo_index)
except:
print("{} failed to find sensor path with traceback {}".format(geo_index, traceback.print_exc()))
continue
tile_to_path[geo_index] = img_path
if client:
futures = []
for index, row in gdf.iterrows():
try:
img_path = tile_to_path[row["geo_index"]]
except:
continue
future = client.submit(write_crop,row=row,img_path=img_path, savedir=savedir, replace=replace)
futures.append(future)
wait(futures)
for x in futures:
try:
annotation = x.result()
annotations.append(annotation)
except:
print("Future failed with {}".format(traceback.print_exc()))
else:
for index, row in gdf.iterrows():
try:
img_path = tile_to_path[row["geo_index"]]
except:
continue
try:
annotation = write_crop(row=row, img_path=img_path, savedir=savedir, replace=replace)
except Exception as e:
print("index {} failed with {}".format(index,e))
continue
annotations.append(annotation)
annotations = pd.concat(annotations)
return annotations
| 40.859701 | 253 | 0.646698 |
829e207fd7e9b0907a37d0c0a23af8d02281c4e6 | 6,960 | py | Python | exp/train_mask.py | ddz16/query-selector | f90f3003f5027f1d00e8f9494dcfd8e32e253927 | [
"Apache-2.0"
] | null | null | null | exp/train_mask.py | ddz16/query-selector | f90f3003f5027f1d00e8f9494dcfd8e32e253927 | [
"Apache-2.0"
] | null | null | null | exp/train_mask.py | ddz16/query-selector | f90f3003f5027f1d00e8f9494dcfd8e32e253927 | [
"Apache-2.0"
] | null | null | null | # coding=UTF-8
from data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom
from model import Transformer, Transformer_Mask
from torch.utils.tensorboard import SummaryWriter
from utils.tools import EarlyStopping
from metrics import metric
from metrics import AverageMeter
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader
import os
import warnings
warnings.filterwarnings('ignore')
def acquire_device(args):
if args.use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) if not args.use_multi_gpu else args.devices
device = torch.device('cuda:{}'.format(args.gpu))
print('Use GPU: cuda:{}'.format(args.gpu))
else:
device = torch.device('cpu')
print('Use CPU')
return device
def build_model(args, device):
model = Transformer_Mask(args.embedding_size,
args.hidden_size,
args.input_dim,
args.dec_seq_len,
args.pred_len,
args.n_decoder_layers,
args.n_encoder_layers,
args.encoder_attention,
args.decoder_attention,
args.n_heads,
args.dropout,
args.output_dim,
).float()
if args.use_gpu and args.use_multi_gpu:
model = nn.DataParallel(model, device_ids=args.device_ids) # multi gpu
model = model.to(device) # 单gpu或cpu
return model
def get_data(args, flag):
data_dict = {
'ETTh1': Dataset_ETT_hour,
'ETTh2': Dataset_ETT_hour,
'ETTm1': Dataset_ETT_minute,
'WTH': Dataset_Custom,
'ECL': Dataset_Custom
}
Data = data_dict[args.data]
if flag == 'test':
shuffle_flag = False
drop_last = True
batch_size = args.batch_size
elif flag == 'pred':
shuffle_flag = False
drop_last = False
batch_size = 1
else:
shuffle_flag = True
drop_last = True
batch_size = args.batch_size
data_set = Data(
root_path='data',
data_path=args.data + '.csv',
flag=flag,
size=[args.seq_len, 0, args.pred_len],
features=args.features,
target=args.target,
inverse=args.inverse,
)
print(flag, len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last
)
return data_set, data_loader
def run_metrics(preds, trues):
preds = np.array(preds)
trues = np.array(trues)
preds = preds.reshape((-1, preds.shape[-2], preds.shape[-1]))
trues = trues.reshape((-1, trues.shape[-2], trues.shape[-1]))
mae, mse, rmse, mape, mspe = metric(preds, trues)
return mse, mae
def save_result(preds, trues, args):
true_save_path = './result/true' # 真实结果保存地址
pred_save_path = './result/pred' # 预测结果保存地址
if not os.path.exists(true_save_path):
os.makedirs(true_save_path)
if not os.path.exists(pred_save_path):
os.makedirs(pred_save_path)
save_name = args.features+"_" + args.data + "_" + str(args.pred_len) + ".npy"
preds = np.array(preds)
trues = np.array(trues)
preds = preds.reshape((-1, preds.shape[-2], preds.shape[-1]))
trues = trues.reshape((-1, trues.shape[-2], trues.shape[-1]))
np.save(os.path.join(true_save_path, save_name), trues)
np.save(os.path.join(pred_save_path, save_name), preds)
return
def random_mask(x_in, mask_rate, device):
batch_size = x_in.size(0)
length = x_in.size(1)
mask_len = int(length*mask_rate)
mask = torch.ones((batch_size, length, length), device=device)
begin_index = torch.randint(0, length-mask_len, (batch_size,))
end_index = begin_index + mask_len
for ii in range(batch_size):
mask[ii, :, begin_index[ii]:end_index[ii]] = 0
return mask
def run_one_epoch_mask(model, device, data_loader, args, training=True, epoch=1):
preds = []
trues = []
total_loss = 0
iter_count = 0
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(data_loader):
iter_count += 1
model.optim.zero_grad()
batch = batch_x.float().to(device)
target = batch_y.float().to(device)
mask = random_mask(batch, 0.2, device)
result, reconstruct = model(batch, mask)
if training:
loss = F.mse_loss(result.squeeze(2), target.squeeze(2), reduction='mean') # 当第2维为1时才会去掉,此外不会去掉
loss += F.mse_loss(reconstruct.squeeze(2), batch.squeeze(2), reduction='mean')
total_loss += loss.item()
if iter_count % 10 == 0:
print("\tEpoch: {0}, iters: {1} | loss at iter: {2:.7f}, mean loss for epoch: {3:.7f}".format(
epoch, iter_count, loss.item(), total_loss / iter_count))
loss.backward()
model.optim.step()
pred = result.detach().cpu().numpy()
true = target.detach().cpu().numpy()
preds.append(pred)
trues.append(true)
return preds, trues
def run_mask(args):
device = acquire_device(args)
model = build_model(args, device)
model.optim = optim.Adam(model.parameters(), lr=args.lr)
# tb = SummaryWriter(log_dir='runs/')
train_data, train_loader = get_data(args, flag='train')
test_data, test_loader = get_data(args, flag='test')
train_iters = len(train_loader)
all_test_mse = AverageMeter()
all_test_mae = AverageMeter()
for epoch in range(1, args.epochs+1):
model.train()
train_preds, train_trues = run_one_epoch_mask(model, device, train_loader, args, True, epoch)
train_mse, train_mae = run_metrics(train_preds, train_trues)
# tb.add_scalars("MSE Loss", {'Train': train_mse}, epoch)
# tb.add_scalars("MAE Loss", {'Train': train_mae}, epoch)
model.eval()
test_preds, test_trues = run_one_epoch_mask(model, device, test_loader, args, False)
test_mse, test_mae = run_metrics(test_preds, test_trues)
all_test_mse.update(test_mse)
all_test_mae.update(test_mae)
if all_test_mse.update_min_flag:
best_test_preds = test_preds
# tb.add_scalars("MSE Loss", {'Test': test_mse}, epoch)
# tb.add_scalars("MAE Loss", {'Test': test_mae}, epoch)
print("Epoch: {0}, total iters: {1} | Train MSE: {2:.3f} Train MAE: {3:.3f} Test MSE: {4:.3f} Test MAE: {5:.3f}"
.format(epoch, train_iters, train_mse, train_mae, test_mse, test_mae))
save_result(best_test_preds, test_trues, args)
print("Min Test MSE: {0:.3f}, Min Test MAE: {1:.3f}".format(all_test_mse.min, all_test_mae.min))
| 33.623188 | 120 | 0.618103 |
660765aca50d2161480def6500a6eafd59c5755b | 1,525 | py | Python | setup.py | jairhenrique/dataclasses-avroschema | 4aceed130765a273ff42a38bf7a800cb6b50e005 | [
"MIT"
] | null | null | null | setup.py | jairhenrique/dataclasses-avroschema | 4aceed130765a273ff42a38bf7a800cb6b50e005 | [
"MIT"
] | 2 | 2022-03-22T20:24:30.000Z | 2022-03-22T20:24:50.000Z | setup.py | jairhenrique/dataclasses-avroschema | 4aceed130765a273ff42a38bf7a800cb6b50e005 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" setup.py for dataclasses-avroschema."""
from setuptools import find_packages, setup
__version__ = "0.27.2"
with open("README.md") as readme_file:
long_description = readme_file.read()
setup(
name="dataclasses-avroschema",
version=__version__,
description="Generate Avro Schemas from a Python class",
long_description=long_description,
long_description_content_type="text/markdown",
author="Marcos Schroh",
install_requires=[
"inflect>=5.3.0",
"fastavro>=1.4.0",
"pytz",
"dacite>=1.6.0",
"faker>=8.1.1",
'typing_extensions>=3.7.4;python_version<"3.9"',
"stringcase>=1.2.0",
],
author_email="schrohm@gmail.com",
url="https://github.com/marcosschroh/dataclasses-avroschema",
download_url="",
packages=find_packages(exclude=("tests",)),
include_package_data=True,
license="MIT",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development",
],
keywords=(
"""
Python, Data Classes, Avro Schema, Avro, Apache, Data Streaming
"""
),
)
| 28.773585 | 71 | 0.60918 |
b74cc0e620bba2a639b14bb6adc3276741c0d0a4 | 649 | py | Python | utils/datasets/__init__.py | voldemortX/DeeplabV3_PyTorch1.3_Codebase | d22d23e74800fafb58eeb61d6649008745c1a287 | [
"BSD-3-Clause"
] | 1 | 2020-09-17T06:21:39.000Z | 2020-09-17T06:21:39.000Z | utils/datasets/__init__.py | voldemortX/pytorch-segmentation | 9c62c0a721d11c8ea6bf312ecf1c7b238a54dcda | [
"BSD-3-Clause"
] | null | null | null | utils/datasets/__init__.py | voldemortX/pytorch-segmentation | 9c62c0a721d11c8ea6bf312ecf1c7b238a54dcda | [
"BSD-3-Clause"
] | null | null | null | from .segmentation import PASCAL_VOC_Segmentation, CityscapesSegmentation, SYNTHIA_Segmentation, GTAV_Segmentation
from .lane_as_segmentation import TuSimpleAsSegmentation, CULaneAsSegmentation, LLAMAS_AsSegmentation
from .lane_as_bezier import TuSimpleAsBezier, CULaneAsBezier, LLAMAS_AsBezier, Curvelanes_AsBezier
from .tusimple import TuSimple
from .tusimple_vis import TuSimpleVis
from .culane import CULane
from .culane_vis import CULaneVis
from .llamas import LLAMAS
from .llamas_vis import LLAMAS_Vis
from .image_folder import ImageFolderDataset
from .video import VideoLoader
from .utils import dict_collate_fn
from .builder import DATASETS
| 46.357143 | 114 | 0.875193 |
d45119bd67ee7f0b89a441fd1b9e20dd77ddc42b | 26,568 | py | Python | comet_core/app.py | efilipsson/comet-core | 2c6477e52aabc350af942b8571e9412720210eec | [
"Apache-2.0"
] | 17 | 2018-11-05T17:00:53.000Z | 2021-12-16T15:56:58.000Z | comet_core/app.py | efilipsson/comet-core | 2c6477e52aabc350af942b8571e9412720210eec | [
"Apache-2.0"
] | 17 | 2018-12-27T16:13:13.000Z | 2021-08-05T12:04:45.000Z | comet_core/app.py | efilipsson/comet-core | 2c6477e52aabc350af942b8571e9412720210eec | [
"Apache-2.0"
] | 17 | 2018-11-13T17:10:13.000Z | 2021-06-03T19:50:51.000Z | # Copyright 2018 Spotify AB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Comet app"""
import logging
import signal
import time
from datetime import datetime, timedelta
from comet_core.data_store import DataStore
from comet_core.exceptions import CometCouldNotSendException
from comet_core.fingerprint import comet_event_fingerprint
from comet_core.model import EventRecord
LOG = logging.getLogger(__name__)
class EventContainer:
"""This is the container of an event that is passed to the hydrator functions.
Args:
source_type (str): the source type of the message
message (dict): the message data
"""
def __init__(self, source_type, message):
self.source_type = source_type
self.message = message
self.owner = None
self.fingerprint = comet_event_fingerprint(data_dict=message, prefix=source_type + "_")
self.event_metadata = dict()
def get_record(self):
"""Make the event container into a database record.
Returns:
EventRecord: the database record for this event
"""
return EventRecord(
source_type=self.source_type,
fingerprint=self.fingerprint,
owner=self.owner,
event_metadata=self.event_metadata,
data=self.message,
)
def set_owner(self, owner):
"""Set the owner of the event.
Args:
owner (str): the owner of the event
"""
self.owner = owner
def set_fingerprint(self, fingerprint):
"""Set the fingerprint of the event.
Args:
fingerprint (str): the fingerprint that makes this event uniquely identifiable
"""
self.fingerprint = fingerprint
def set_metadata(self, metadata):
"""Set optional metadata for the event.
Args:
metadata (dict): arbitrary metadata for the event
"""
self.event_metadata = metadata
class SourceTypeFunction:
"""This is a collection that can register a function for one, many or all source_types."""
def __init__(self):
self.specific_collection = dict()
self.global_collection = list()
def add(self, source_types, func):
"""Adds a function for the specified source_types, or all if not specified.
Args:
source_types (Union[str,list,None]): which source types to register for as a str, list or None.
Given a string, the function is registered for that source_type only.
Given a list, the function is registered for all source_types in that list.
Given none, the function is registered for all source_types.
func (function): the function to register
"""
if source_types:
if isinstance(source_types, str):
self.specific_collection.setdefault(source_types, []).append(func)
elif isinstance(source_types, list):
for source_type in source_types:
self.specific_collection.setdefault(source_type, []).append(func)
else:
self.global_collection.append(func)
def for_source_type(self, source_type):
"""Get all applicable functions for a given source_type.
Args:
source_type (str): the source_type to get the registered functions for
Yields:
function: functions registered to the specified source_type
"""
for func in self.specific_collection.get(source_type, []):
yield func
for func in self.global_collection:
yield func
def func_count(self):
"""Returns the amount of functions registered in total, useful for testing.
Returns:
int: the total amount of registered functions
"""
res = 0
for val in self.specific_collection.values():
res += len(val)
return res + len(self.global_collection)
# pylint: disable=too-many-instance-attributes
class Comet:
"""The main Comet class
Args:
database_uri (str): the database to connect to as an URI
"""
def __init__(self, database_uri="sqlite://"):
self.running = False
self.data_store = DataStore(database_uri)
self.inputs = list()
self.instantiated_inputs = list()
self.hydrators = dict()
self.filters = dict()
self.parsers = dict()
self.routers = SourceTypeFunction()
self.escalators = SourceTypeFunction()
self.real_time_sources = list()
self.real_time_config_providers = dict()
self.database_uri = database_uri
self.batch_config = {
"communication_digest_mode": True,
# By default (communication_digest_mode=True), all batch events will be grouped by an owner and source_type.
# And email will look like:
# here are your X new issues, and by the way, you have these Y old ones.
# In case of the non-digest mode,
# the router will receive only these events that are new or need a reminder.
"escalation_reminder_cadence": timedelta(days=7),
# `escalation_reminder_cadence` defines how often to send escalation reminders
"escalation_time": timedelta(seconds=10),
# `escalation_time` defines how soon event should be escalated (it takes ignore_fingerprints into account)
"max_wait": timedelta(seconds=4),
# `max_wait` defines the amount of time to wait since the earliest event in an attempt to catch whole batch
"new_threshold": timedelta(days=7),
# `new_threshold` defines amount of time to wait since the latest report of the given fingerprint to assume
# it as a regression of the detected issue
"owner_reminder_cadence": timedelta(days=7),
# `owner_reminder_cadence` defines how often to send reminders
"wait_for_more": timedelta(seconds=3),
# `wait_for_more` defines the amount of time to wait since the latest event
}
self.specific_configs = {}
def message_callback(self, source_type, message):
"""This is the callback that inputs should call when they receive new messages
Args:
source_type (str): the source type of the message
message (str): the message as a string
Return:
boolean: True if parsing was successful, False otherwise
"""
LOG.info("received a message", extra={"source_type": source_type})
parse = self.parsers.get(source_type)
if not parse:
LOG.warning("no parser found", extra={"source_type": source_type})
return False
try:
message_dict = parse(message)
except ValueError as err:
LOG.warning("invalid message", extra={"source_type": source_type, "error": str(err)})
return False
# Prepare an event container
event = EventContainer(source_type, message_dict)
# Hydrate
hydrate = self.hydrators.get(source_type)
if hydrate:
hydrate(event)
# Filter event
filter_event = self.filters.get(source_type)
if filter_event:
event = filter_event(event)
# Add to datastore
if event:
self.data_store.add_record(event.get_record())
return True
def set_config(self, source_type, config):
"""Call to override default batching and batch escalation logic.
Args:
source_type (str): the source type to override the configuration for
config (dict): the config values to override
"""
self.specific_configs[source_type] = config
def register_input(self, clazz=None, **kwargs):
"""Register an input, with optional configuration.
This method can be used either as a decorator or with a class passed in.
The input will be registered but will not be instantiated until `run` is called.
This is to ensure that we do not get messages into the pipeline before the rest of the logic, parsers,
hydrators etc, is registered.
Args:
clazz (class): a class or None if used as a decorator
kwargs (dict): optional configuration values to pass to the constructor or clazz
Return:
function or None: if no clazz is given returns a decorator function, otherwise None
"""
if not clazz:
# pylint: disable=missing-return-doc, missing-return-type-doc
def decorator(clazz):
self.inputs.append((clazz, kwargs))
return clazz
return decorator
self.inputs.append((clazz, kwargs))
def register_parser(self, source_type, func=None):
"""Register a parser function.
This method can be used either as a decorator or with a parser function passed in.
Args:
source_type (str): the source type to register the parser for
func (Optional[function]): a function that parse a message of type source_type, or None if used as a
decorator
Return:
function or None: if no scehma is given returns a decorator function, otherwise None
"""
if not func:
# pylint: disable=missing-return-doc, missing-return-type-doc
def decorator(func):
self.parsers[source_type] = func
return func
return decorator
self.parsers[source_type] = func
def register_config_provider(self, source_type, func=None):
"""Register, per source type, a function that return config given a real time event.
This method can be used either as a decorator or with a parser function passed in.
Args:
source_type (str): the source type to register the config provider for
func (Optional[function]): a function that accepts an event and return a dictionary with configuration
Return:
dict: the config for the given real time event
"""
if not func:
# pylint: disable=missing-return-doc, missing-return-type-doc
def decorator(func):
self.real_time_config_providers[source_type] = func
return func
return decorator
self.real_time_config_providers[source_type] = func
def register_real_time_source(self, source_type):
"""Register real time source type
Args:
source_type (str): the source type to register the parser for
"""
self.real_time_sources.append(source_type)
def register_hydrator(self, source_type, func=None):
"""Register a hydrator.
This method can be used either as a decorator or with a hydrator function passed in.
Args:
source_type (str): the source type to register the parser for
func (Optional[function]): a function that hydrates a message of type source_type, or None if used as a
decorator
Return:
function or None: if no func is given returns a decorator function, otherwise None
"""
if not func:
# pylint: disable=missing-return-doc, missing-return-type-doc
def decorator(func):
self.hydrators[source_type] = func
return func
return decorator
self.hydrators[source_type] = func
def register_filter(self, source_type, func=None):
"""Register a filter function to filter events before saving them to the db.
This method can be used either as a decorator or with a filter function passed in.
Args:
source_type (str): the source type to register the filter for
func (Optional[function]): a function that filter a message of type source_type, or None if used as a
decorator
Return:
function or None: if no func is given returns a decorator function, otherwise None
"""
if not func:
# pylint: disable=missing-return-doc, missing-return-type-doc
def decorator(func):
self.filters[source_type] = func
return func
return decorator
self.filters[source_type] = func
def register_router(self, source_types=None, func=None):
"""Register a router.
This method can be used either as a decorator or with a routing function passed in.
Args:
source_types (Optional[Union[str,list]]): a source type or multiple source types (in a list) to route, or
None to route all source types
func (Optional[function]): a function that routes batched messages, or None if used as a decorator
Return:
function or None: if no func is given returns a decorator function, otherwise None
"""
if not func:
# pylint: disable=missing-return-doc, missing-return-type-doc
def decorator(func):
self.routers.add(source_types, func)
return func
return decorator
self.routers.add(source_types, func)
def register_escalator(self, source_types=None, func=None):
"""Register a escalator.
This method can be used either as a decorator or with a escalator function passed in.
Args:
source_types (Optional[Union[str,list]]): a source type or multiple source types (in a list) to escalate,
or None to route all source types
func (Optional[function]): a function that escalates messages, or None if used as a decorator
Return:
function or None: if no func is given returns a decorator function, otherwise None
"""
# pylint: disable=missing-return-doc, missing-return-type-doc
if not func:
def decorator(func):
self.escalators.add(source_types, func)
return func
return decorator
self.escalators.add(source_types, func)
# pylint: disable=too-many-branches, too-many-locals, too-many-nested-blocks, too-many-statements
def process_unprocessed_events(self):
"""Checks the database for unprocessed events and processes them.
Processing means: group by source-type and owner, check if for each source-type/owner set there is at least
one new event or one old event that the owner needs to be reminded about, and sends notification if that's the
case. It also will checks for all events if escalation is needed and send it if no escalation was sent to the
same escalation recipient recently. All ignored events will be skipped for the above, but marked as processed.
Config options we care about:
source_type_config['communication_digest_mode'],
source_type_config['escalation_reminder_cadence'],
source_type_config['escalation_time'],
source_type_config['max_wait'],
source_type_config['new_threshold'],
source_type_config['owner_reminder_cadence'],
source_type_config['wait_for_more']
"""
LOG.debug("Processing unprocessed events")
# pylint: disable=consider-iterating-dictionary
for source_type in self.parsers.keys():
source_type_config = self.batch_config.copy()
if source_type in self.specific_configs:
source_type_config.update(self.specific_configs[source_type])
batch_events = self.data_store.get_unprocessed_events_batch(
source_type_config["wait_for_more"], source_type_config["max_wait"], source_type
)
events_by_owner = {}
ignored_events = []
need_escalation_events = []
if source_type in self.real_time_sources:
real_time_events_by_owner = {}
for event in batch_events:
if self.data_store.fingerprint_is_ignored(event.fingerprint):
ignored_events.append(event)
else:
real_time_events_by_owner.setdefault(event.owner, []).append(event)
# handle unprocessed real_time alerts
self._handle_real_time_alerts(real_time_events_by_owner, source_type)
# check if real time alerts need escalation
events_to_escalate = self.data_store.get_events_need_escalation(source_type)
self._handle_events_need_escalation(source_type, events_to_escalate)
else:
# Group events by owner and mark them as new or seen before
for event in batch_events:
if self.data_store.fingerprint_is_ignored(event.fingerprint):
ignored_events.append(event)
else:
event.new = self.data_store.check_if_new(event.fingerprint, source_type_config["new_threshold"])
event.needs_escalation = False
if self.data_store.check_needs_escalation(source_type_config["escalation_time"], event):
event.needs_escalation = True
event.first_escalation = not self.data_store.check_if_previously_escalated(event)
need_escalation_events.append(event)
events_by_owner.setdefault(event.owner, []).append(event)
if ignored_events:
self.data_store.update_processed_at_timestamp_to_now(ignored_events)
LOG.info("events-ignored", extra={"events": len(ignored_events)})
# Determine if we should send an email to the system owner
# This happens if there are events that..
# * ..has not been seen before
# * ..was last sent to the owner X days ago
# (where X is `owner_reminder_cadence`, default 7 days)
for owner, events in events_by_owner.items():
owner_reminder_cadence = source_type_config["owner_reminder_cadence"]
events_to_remind = []
if source_type_config["communication_digest_mode"]:
if any(event.new for event in events) or self.data_store.check_any_issue_needs_reminder(
owner_reminder_cadence, events
):
events_to_remind = events
else:
fingerprints_to_remind = self.data_store.get_any_issues_need_reminder(
owner_reminder_cadence, events
)
if fingerprints_to_remind:
for e in events:
if e.fingerprint in fingerprints_to_remind:
e.reminder = True
events_to_remind.append(e)
for e in events:
if e.new and not e.fingerprint in fingerprints_to_remind:
events_to_remind.append(e)
if events_to_remind:
try:
self._route_events(owner, events_to_remind, source_type)
self.data_store.update_processed_at_timestamp_to_now(events_to_remind)
except CometCouldNotSendException:
LOG.error(f"Could not send alert to {owner}: {events_to_remind}")
self.data_store.update_processed_at_timestamp_to_now([e for e in events if e not in events_to_remind])
LOG.info("events-processed", extra={"events": len(events), "source-type": source_type, "owner": owner})
# Check if any of the events for this source_type needs
# escalation and if we may send an escalation
if need_escalation_events and self.data_store.may_send_escalation(
source_type, source_type_config["escalation_reminder_cadence"]
):
self._handle_events_need_escalation(source_type, need_escalation_events)
def handle_non_addressed_events(self):
"""Check if there are real time events sent to a user that were not addressed.
Each event has escalate_cadence parameter which is used as the earliest time to escalate if the user did
not address the alert.
"""
for source_type in self.real_time_sources:
non_addressed_events = self.data_store.get_events_did_not_addressed(source_type)
events_needs_escalation = []
for event in non_addressed_events:
# load configuration for event, using batch settings as default
event_config = {}
if source_type in self.real_time_config_providers:
event_config = self.real_time_config_providers[source_type](event)
escalate_cadence = event_config.get("escalate_cadence", timedelta(hours=36))
if escalate_cadence:
event_sent_at = event.sent_at
# when is earliest time to escalate the specific event
if event_sent_at <= datetime.utcnow() - escalate_cadence:
events_needs_escalation.append(event)
self._handle_events_need_escalation(source_type, events_needs_escalation)
def _route_events(self, owner, events, source_type):
"""route events need routing by getting the route function
function from the source type and route the events.
Args:
owner (str): the owner of the events
events (list(EventRecord)): events to route
source_type (str): source type to get escalator functions.
"""
routers = list(self.routers.for_source_type(source_type))
if not routers:
LOG.warning("no-router", extra={"source-type": source_type})
for route_func in routers:
route_func(source_type, owner, events)
self.data_store.update_sent_at_timestamp_to_now(events)
LOG.info("event-notification-sent", extra={"events": len(events), "source-type": source_type, "owner": owner})
def _handle_real_time_alerts(self, real_time_events_by_owner, source_type):
"""Handle real time alerts by sending the alerts to the owner
without any checks
Args:
real_time_events_by_owner (dict): events by owner
source_type (str): source type to get the specific router
"""
if real_time_events_by_owner:
for owner, events in real_time_events_by_owner.items():
try:
self._route_events(owner, events, source_type)
self.data_store.update_processed_at_timestamp_to_now(events)
except CometCouldNotSendException:
LOG.error(f"Could not send alert to {owner}: {events}")
def _handle_events_need_escalation(self, source_type, needs_escalation_events):
"""Handle events need escalation by getting the escalate
function from the source type and escalate.
Args:
source_type (str): source type to get escalator functions.
needs_escalation_events (list(EventRecord)): events need escalation
"""
if needs_escalation_events:
did_escalate = False
for escalator_func in self.escalators.for_source_type(source_type):
did_escalate = True
escalator_func(source_type, needs_escalation_events)
LOG.info("event-escalated", extra={"events": len(needs_escalation_events), "source_type": source_type})
if not did_escalate:
LOG.warning(
"event-not-escalated", extra={"events": len(needs_escalation_events), "source_type": source_type}
)
self.data_store.update_event_escalated_at_to_now(needs_escalation_events)
def stop(self, signum=0, frame=None): # pylint: disable=unused-argument
"""Stops all inputs.
Args:
signum (int): Signal that is stopping the function.
frame (frame or None): See Signal module documentation.
"""
LOG.info(f"Received stop signal {signum}")
for instance in self.instantiated_inputs:
LOG.info(f"Stopping instance {instance}")
instance.stop()
self.running = False
def validate_config(self):
"""Validates that every parser has a router"""
for source_type in list(self.parsers):
if not list(self.routers.for_source_type(source_type)):
LOG.warning("no router found", extra={"source_type": source_type})
del self.parsers[source_type]
def start_inputs(self):
"""Helper used to instantiate all registered inputs"""
self.instantiated_inputs = [clazz(self.message_callback, **kwargs) for clazz, kwargs in self.inputs]
def prepare_run(self):
"""Prepare the run for both normal running and staging"""
self.validate_config()
self.start_inputs()
# Run
self.running = True
signal.signal(signal.SIGTERM, self.stop)
signal.signal(signal.SIGINT, self.stop)
def staging(self):
"""For starting staging env"""
self.prepare_run()
timeout = time.time() + 60 # this is to wait 1 minute
while self.running:
self.process_unprocessed_events()
self.handle_non_addressed_events()
time.sleep(0.1)
if time.time() > timeout:
self.stop()
def run(self):
"""Start the Comet app"""
self.prepare_run()
while self.running:
self.process_unprocessed_events()
self.handle_non_addressed_events()
time.sleep(0.1)
| 41.773585 | 120 | 0.628124 |
7a17aadf39c4b8dedfda8e78d698d2c3967620ea | 917 | py | Python | main_compiling.py | lleonart1984/VAE-modeling | 9e46a758af77bafc603f43aeafc5cd0b7816bbca | [
"MIT"
] | null | null | null | main_compiling.py | lleonart1984/VAE-modeling | 9e46a758af77bafc603f43aeafc5cd0b7816bbca | [
"MIT"
] | null | null | null | main_compiling.py | lleonart1984/VAE-modeling | 9e46a758af77bafc603f43aeafc5cd0b7816bbca | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
import vae.training
import vae.cvae_model
import vae.compiling
def cvae_factory(epochs, batch_size):
print('Creating CVAE model...')
model = vae.cvae_model.CVAEModel(5, 4, 8, 16, 6, activation=lambda: nn.LeakyReLU(negative_slope=0.01))
optimizer = torch.optim.AdamW(model.parameters(), lr=0.002)
# optimizer = torch.optim.AdamW(model.parameters(), lr=0.001, weight_decay=0.0000001)
gamma = np.exp(np.log(0.001) / epochs)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma)
return model, optimizer, scheduler
# path = "./Running/linear_sphere_cvae"
path = "./Running/linear_sphere_cvae_na"
model: vae.cvae_model.CVAEModel = vae.training.get_last_model(path, cvae_factory, top_epoch=None)
compiler = vae.compiling.HLSLCompiler()
code = compiler.compile({
'naPathModel': model.decoder.model.model
})
print(code) | 32.75 | 106 | 0.745911 |
d0e802503da8c82ae8edda9c993e6974b4368ccd | 1,173 | py | Python | catalyst/rl/__main__.py | andrey-avdeev/catalyst | fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3 | [
"Apache-2.0"
] | 1 | 2019-11-26T06:41:33.000Z | 2019-11-26T06:41:33.000Z | catalyst/rl/__main__.py | andrey-avdeev/catalyst | fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3 | [
"Apache-2.0"
] | null | null | null | catalyst/rl/__main__.py | andrey-avdeev/catalyst | fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3 | [
"Apache-2.0"
] | 1 | 2021-12-20T07:32:25.000Z | 2021-12-20T07:32:25.000Z | from argparse import ArgumentParser, RawTextHelpFormatter
from collections import OrderedDict
from catalyst.__version__ import __version__
from .scripts import dump_db, load_db, run_samplers, run_trainer
COMMANDS = OrderedDict(
[
("run-trainer", run_trainer),
("run-samplers", run_samplers),
("dump-db", dump_db),
("load-db", load_db),
]
)
def build_parser() -> ArgumentParser:
parser = ArgumentParser(
"catalyst-rl", formatter_class=RawTextHelpFormatter
)
parser.add_argument(
"-v", "--version", action="version", version=f"%(prog)s {__version__}"
)
all_commands = ", \n".join(map(lambda x: f" {x}", COMMANDS.keys()))
subparsers = parser.add_subparsers(
metavar="{command}",
dest="command",
help=f"available commands: \n{all_commands}",
)
subparsers.required = True
for key, value in COMMANDS.items():
value.build_args(subparsers.add_parser(key))
return parser
def main():
parser = build_parser()
args, uargs = parser.parse_known_args()
COMMANDS[args.command].main(args, uargs)
if __name__ == "__main__":
main()
| 23.938776 | 78 | 0.650469 |
01ebb22df2fca25f495fd67a56d1b2ce9a749480 | 315 | py | Python | tests/type_test/statement_test.py | pniedzwiedzinski/pseudo | b27570bd8400b6a51a2958454b31f1ce2e25c4f9 | [
"MIT"
] | 5 | 2019-04-02T07:01:34.000Z | 2019-11-24T02:08:03.000Z | tests/type_test/statement_test.py | pniedzwiedzinski/pseudo | b27570bd8400b6a51a2958454b31f1ce2e25c4f9 | [
"MIT"
] | 11 | 2019-03-20T08:29:30.000Z | 2019-05-21T11:57:03.000Z | tests/type_test/statement_test.py | pniedzwiedzinski/pseudo | b27570bd8400b6a51a2958454b31f1ce2e25c4f9 | [
"MIT"
] | 1 | 2019-04-02T15:24:40.000Z | 2019-04-02T15:24:40.000Z | """This module contains tests for pseudo.type.Statement"""
import pytest
from pseudo.runtime import RunTime
from pseudo.type import Statement
@pytest.mark.timeout(2)
def test_exit():
try:
Statement("koniec").eval(RunTime())
except SystemExit:
pass
else:
raise AssertionError
| 18.529412 | 58 | 0.692063 |
0a8828838f9b5e06aebb5115fd31ea19dfcbd11d | 1,047 | py | Python | benchmark/analyzer/others.py | anyoptimization/pymoo-benchmark | 37460f3bf0159c1113cd48d5698af6493f26ed62 | [
"Apache-2.0"
] | null | null | null | benchmark/analyzer/others.py | anyoptimization/pymoo-benchmark | 37460f3bf0159c1113cd48d5698af6493f26ed62 | [
"Apache-2.0"
] | null | null | null | benchmark/analyzer/others.py | anyoptimization/pymoo-benchmark | 37460f3bf0159c1113cd48d5698af6493f26ed62 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from benchmark.analyzer.analyzer import Analyzer
from benchmark.core.util import fill_forward_if_nan
from pymoo.indicators.igd import IGD
from pymoo.util.misc import from_dict
class MultiObjectiveConvergenceAnalyzer(Analyzer):
def __init__(self, nan_if_not_available=True, **kwargs) -> None:
super().__init__(**kwargs)
self.nan_if_not_available = nan_if_not_available
def do(self, data, **kwargs):
t = []
for entry in data:
t.extend([e['n_evals'] for e in entry["callback"]])
t = sorted(list(set(t)))
hash = {}
for k, v in enumerate(t):
hash[v] = k
for i, entry in enumerate(data):
igd = IGD(entry["pf"], zero_to_one=True)
vals = np.full(len(t), np.nan)
for v in entry["callback"]:
_t, F = from_dict(v, "n_evals", "opt")
vals[hash[_t]] = igd.do(F)
fill_forward_if_nan(vals)
entry["n_evals"] = t
entry["conv"] = vals
| 26.846154 | 68 | 0.585482 |
dad767b45b0b33234808f018b11e38f864935588 | 656 | py | Python | tests/snippets/ast_snippet.py | ZapAnton/RustPython | 61e752e2b014239865c74c6997429a80433ee38a | [
"MIT"
] | null | null | null | tests/snippets/ast_snippet.py | ZapAnton/RustPython | 61e752e2b014239865c74c6997429a80433ee38a | [
"MIT"
] | null | null | null | tests/snippets/ast_snippet.py | ZapAnton/RustPython | 61e752e2b014239865c74c6997429a80433ee38a | [
"MIT"
] | null | null | null |
import ast
print(ast)
source = """
def foo():
print('bar')
pass
"""
n = ast.parse(source)
print(n)
print(n.body)
print(n.body[0].name)
assert n.body[0].name == 'foo'
foo = n.body[0]
assert foo.lineno == 2
print(foo.body)
assert len(foo.body) == 2
print(foo.body[0])
print(foo.body[0].value.func.id)
assert foo.body[0].value.func.id == 'print'
assert foo.body[0].lineno == 3
assert foo.body[1].lineno == 4
n = ast.parse("3 < 4 > 5\n")
assert n.body[0].value.left.n == 3
assert 'Lt' in str(n.body[0].value.ops[0])
assert 'Gt' in str(n.body[0].value.ops[1])
assert n.body[0].value.comparators[0].n == 4
assert n.body[0].value.comparators[1].n == 5
| 21.16129 | 44 | 0.644817 |
34cec6ef3597d65ad3feedc10c8c2ba6abd02cbf | 1,086 | py | Python | btrfs/datadog_checks/btrfs/config_models/defaults.py | kjmadscience/integrations-core | 663bdf44730dd6c9f3565c121318b320bfcb4988 | [
"BSD-3-Clause"
] | null | null | null | btrfs/datadog_checks/btrfs/config_models/defaults.py | kjmadscience/integrations-core | 663bdf44730dd6c9f3565c121318b320bfcb4988 | [
"BSD-3-Clause"
] | null | null | null | btrfs/datadog_checks/btrfs/config_models/defaults.py | kjmadscience/integrations-core | 663bdf44730dd6c9f3565c121318b320bfcb4988 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_service(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_excluded_devices(field, value):
return get_default_field_value(field, value)
def instance_metric_patterns(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_tags(field, value):
return get_default_field_value(field, value)
| 25.255814 | 105 | 0.779926 |
ea09ec02e8ea7b7e01112c03cbc5e1456734108a | 974 | py | Python | src/dispatch/tag/models.py | ZPerling/dispatch | 163ed1d2ae9dec90fdd42bca1d28ae88bf36abb2 | [
"Apache-2.0"
] | null | null | null | src/dispatch/tag/models.py | ZPerling/dispatch | 163ed1d2ae9dec90fdd42bca1d28ae88bf36abb2 | [
"Apache-2.0"
] | null | null | null | src/dispatch/tag/models.py | ZPerling/dispatch | 163ed1d2ae9dec90fdd42bca1d28ae88bf36abb2 | [
"Apache-2.0"
] | null | null | null | from typing import Optional, List
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy_utils import TSVectorType
from dispatch.database import Base
from dispatch.models import DispatchBase, tags_incidents, TimeStampMixin
class Tag(Base, TimeStampMixin):
id = Column(Integer, primary_key=True)
name = Column(String, unique=True)
description = Column(String)
uri = Column(String)
source = Column(String)
type = Column(String)
incidents = relationship("Incident", secondary=tags_incidents, backref="tags")
search_vector = Column(TSVectorType("name"))
# Pydantic models
class TagBase(DispatchBase):
name: str
source: str
type: str
uri: Optional[str]
description: Optional[str]
class TagCreate(TagBase):
pass
class TagUpdate(TagBase):
id: int
class TagRead(TagBase):
id: int
class TagPagination(DispatchBase):
items: List[TagRead]
total: int
| 20.723404 | 82 | 0.731006 |
d76a6b6662a4d5ed40c22fc67c83224b1434bf3c | 2,959 | py | Python | train_rnn_gan.py | nicksawhney/world-models | 1f443a50728c9312f7c977afd4deb3ae32aed95f | [
"MIT"
] | null | null | null | train_rnn_gan.py | nicksawhney/world-models | 1f443a50728c9312f7c977afd4deb3ae32aed95f | [
"MIT"
] | null | null | null | train_rnn_gan.py | nicksawhney/world-models | 1f443a50728c9312f7c977afd4deb3ae32aed95f | [
"MIT"
] | null | null | null | #python 04_train_rnn.py --new_model --batch_size 200
# python 04_train_rnn.py --new_model --batch_size 100
from rnn.arch import RNN
import argparse
import numpy as np
import os
ROOT_DIR_NAME = './data/'
SERIES_DIR_NAME = './data/series_gan/'
def get_filelist(N):
filelist = os.listdir(SERIES_DIR_NAME)
filelist = [x for x in filelist if (x != '.DS_Store' and x!='.gitignore')]
filelist.sort()
length_filelist = len(filelist)
if length_filelist > N:
filelist = filelist[:N]
if length_filelist < N:
N = length_filelist
return filelist, N
def random_batch(filelist, batch_size):
N_data = len(filelist)
indices = np.random.permutation(N_data)[0:batch_size]
z_list = []
action_list = []
rew_list = []
done_list = []
for i in indices:
try:
new_data = np.load(SERIES_DIR_NAME + filelist[i], allow_pickle=True)
mu = new_data['mu']
log_var = new_data['log_var']
action = new_data['action']
reward = new_data['reward']
done = new_data['done']
reward = np.expand_dims(reward, axis=1)
done = np.expand_dims(done, axis=1)
s = log_var.shape
z = mu + np.exp(log_var/2.0) * np.random.randn(*s)
z_list.append(z)
action_list.append(action)
rew_list.append(reward)
done_list.append(done)
except Exception as e:
print(e)
z_list = np.array(z_list)
action_list = np.array(action_list)
rew_list = np.array(rew_list)
done_list = np.array(done_list)
return z_list, action_list, rew_list, done_list
def main(args):
new_model = args.new_model
N = int(args.N)
steps = int(args.steps)
batch_size = int(args.batch_size)
rnn = RNN() #learning_rate = LEARNING_RATE
if not new_model:
try:
rnn.set_weights('./rnn/weights_gan.h5')
except:
print("Either set --new_model or ensure ./rnn/weights_gan.h5 exists")
raise
filelist, N = get_filelist(N)
for step in range(steps):
print('STEP ' + str(step))
z, action, rew ,done = random_batch(filelist, batch_size)
rnn_input = np.concatenate([z[:, :-1, :], action[:, :-1, :], rew[:, :-1, :]], axis = 2)
rnn_output = np.concatenate([z[:, 1:, :], rew[:, 1:, :]], axis = 2) #, done[:, 1:, :]
if step == 0:
np.savez_compressed(ROOT_DIR_NAME + 'rnn_files_gan.npz', rnn_input = rnn_input, rnn_output = rnn_output)
rnn.train(rnn_input, rnn_output)
if step % 10 == 0:
rnn.model.save_weights('./rnn/weights_gan.h5')
rnn.model.save_weights('./rnn/weights_gan.h5')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=('Train RNN'))
parser.add_argument('--N',default = 10000, help='number of episodes to use to train')
parser.add_argument('--new_model', action='store_true', help='start a new model from scratch?')
parser.add_argument('--steps', default = 4000, help='how many rnn batches to train over')
parser.add_argument('--batch_size', default = 100, help='how many episodes in a batch?')
args = parser.parse_args()
main(args)
| 23.117188 | 107 | 0.673876 |
ab3ea14a96fb8c546499af9b334654ad2b390a28 | 26,244 | py | Python | torch/_appdirs.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 60,067 | 2017-01-18T17:21:31.000Z | 2022-03-31T21:37:45.000Z | torch/_appdirs.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 66,955 | 2017-01-18T17:21:38.000Z | 2022-03-31T23:56:11.000Z | torch/_appdirs.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 19,210 | 2017-01-18T17:45:04.000Z | 2022-03-31T23:51:56.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
# flake8: noqa
"""
This file is directly from
https://github.com/ActiveState/appdirs/blob/3fe6a83776843a46f20c2e5587afcffe05e03b39/appdirs.py
The license of https://github.com/ActiveState/appdirs copied below:
# This is the MIT license
Copyright (c) 2010 ActiveState Software Inc.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""Utilities for determining application-specific dirs.
See <https://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version__ = "1.4.4"
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
import sys
import os
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: ~/Library/Preferences/<AppName>
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if system == "win32":
path = user_data_dir(appname, appauthor, None, roaming)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Preferences/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == 'win32':
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
elif system == 'darwin':
path = os.path.expanduser('/Library/Preferences')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname=None, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import winreg as _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| 40.751553 | 122 | 0.638279 |
095a88927a9ac0498b5c8132e7e64a752e521ed4 | 1,292 | py | Python | tests/unit/test_wpl.py | blairconrad/LibraryHippo | ca74853485c15b06e138f713f6f47cbbfa655127 | [
"MIT"
] | null | null | null | tests/unit/test_wpl.py | blairconrad/LibraryHippo | ca74853485c15b06e138f713f6f47cbbfa655127 | [
"MIT"
] | null | null | null | tests/unit/test_wpl.py | blairconrad/LibraryHippo | ca74853485c15b06e138f713f6f47cbbfa655127 | [
"MIT"
] | null | null | null | from app.libraries.wpl import WPL
from app.models import Card
def test_check_card_finds_holds(requests_mock):
login_url = (
"https://books.kpl.org/iii/cas/login?service="
+ "https://books.kpl.org/patroninfo~S3/"
+ "j_acegi_cas_security_check&lang=eng&scope=3"
)
requests_mock.get(login_url, text="")
requests_mock.post(login_url, text="<a href='/holds'>holds</a>")
requests_mock.get(
"/holds",
text="""
<table class="patFunc">
<tr class="patFuncHeaders"><th> TITLE </th><th>STATUS</th></tr>
<tr class="patFuncEntry">
<td class="patFuncTitle">Blood heir / Amélie Wen Zhao</td>
<td class="patFuncStatus"> 9 of 83 holds </td>
</tr>
</table>
""",
)
card = make_card()
target = WPL()
check_result = target.check_card(card)
assert check_result
assert check_result["holds"]
assert check_result["holds"][0] == {
"Status": " 9 of 83 holds ",
"Title": "Blood heir / Amélie Wen Zhao",
}
def make_card():
card = Card()
card.patron_name = "Blair Conrad"
card.number = "123456789"
card.pin = "9876"
return card
| 27.489362 | 77 | 0.551084 |
805aca45232a2b42ebd5d161a91cc2941fcd0dd2 | 454 | py | Python | cride/circles/admin.py | dmontoya1/cride | 0667e41edbd237c6ba62657c44034f2a8a7c3754 | [
"MIT"
] | null | null | null | cride/circles/admin.py | dmontoya1/cride | 0667e41edbd237c6ba62657c44034f2a8a7c3754 | [
"MIT"
] | null | null | null | cride/circles/admin.py | dmontoya1/cride | 0667e41edbd237c6ba62657c44034f2a8a7c3754 | [
"MIT"
] | null | null | null | """Circles Admin"""
# Django
from django.contrib import admin
# models
from cride.circles.models import Circle
@admin.register(Circle)
class CircleAdmin(admin.ModelAdmin):
""" Circle admin."""
list_display = (
'name',
'slug_name',
'is_public',
'verified',
'is_limited',
'members_limited'
)
search_fields = ('slug_name', 'name')
list_filter = ('is_public', 'is_limited', 'verified')
| 19.73913 | 57 | 0.612335 |
4ec34d4ccaeb19e32137cb122eb95a769e1a1953 | 2,481 | py | Python | speakers/forms.py | andyzsf/ConMan | e8d4aa9eeda7a85b39d8d897dbdba43de3cee9c1 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 7 | 2015-05-28T19:18:57.000Z | 2021-04-16T04:13:26.000Z | speakers/forms.py | andyzsf/ConMan | e8d4aa9eeda7a85b39d8d897dbdba43de3cee9c1 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | speakers/forms.py | andyzsf/ConMan | e8d4aa9eeda7a85b39d8d897dbdba43de3cee9c1 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3 | 2018-11-05T09:33:45.000Z | 2022-02-18T14:27:22.000Z | from common.models import UserProfile
from speakers.models import *
from django import forms
from django.forms import ValidationError
class PresentationForm(forms.ModelForm):
cat = forms.ModelChoiceField(Category.objects.all(),label=u'Category', help_text='Choose the closest category')
audiences = forms.ModelMultipleChoiceField(AudienceType.objects.all(), widget=forms.SelectMultiple(attrs={'size': 4}), help_text='Choose one or more audience')
title = forms.CharField()
short_abstract = forms.CharField(widget=forms.Textarea,
min_length=1,
max_length=5000,
help_text="An abstract less than 5000 characters")
slides = forms.FileField(required=False, widget=forms.HiddenInput, label='')
# long_abstract = forms.CharField(widget=forms.Textarea,min_length=1,required=False,max_length=3000)
# presenter = forms.ModelMultipleChoiceField(UserProfile.objects.none(), widget=forms.SelectMultiple(attrs={'size': 4}), label=u'Presenter(s)', help_text='Multiple presenters can be added to a presentation')
class Meta:
model = Presentation
fields = ('cat', 'audiences', 'title', 'short_abstract')
# def clean(self):
# if self.cleaned_data.get('password') and self.cleaned_data.get('confirm_password') and self.cleaned_data['password'] != self.cleaned_data['confirm_password']:
# raise ValidationError(u'Please make sure your passwords match.')
# return self.cleaned_data
def __init__(self, *args, **kwargs):
super(PresentationForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.status.name == 'Approved':
self['cat'].field.widget.attrs.update({'disabled': True})
self['cat'].field.required = False
self['audiences'].field.widget.attrs.update({'disabled': True})
self['audiences'].field.required = False
self['title'].field.widget.attrs.update({'disabled': 'disabled'})
self['title'].field.required = False
self['short_abstract'].field.widget.attrs.update({'disabled': 'disabled'})
self['short_abstract'].field.required = False
self['slides'].field.widget = forms.FileInput()
self['slides'].field.label = 'Slide Deck'
class PresentationSlidesForm(forms.ModelForm):
class Meta:
model = Presentation
fields = ('slides',)
| 52.787234 | 210 | 0.664248 |
602294c6d90ffc3ca2e688ee25b4e7cd350913c9 | 8,594 | py | Python | magnumclient/osc/v1/quotas.py | gyliu513/python-magnumclient | c0d368360831cd29affa47e77eba4e81e4c6f0f8 | [
"Apache-2.0"
] | 29 | 2015-04-17T12:06:04.000Z | 2019-05-03T06:27:17.000Z | magnumclient/osc/v1/quotas.py | gyliu513/python-magnumclient | c0d368360831cd29affa47e77eba4e81e4c6f0f8 | [
"Apache-2.0"
] | 2 | 2016-01-19T14:45:58.000Z | 2016-11-17T16:30:18.000Z | magnumclient/osc/v1/quotas.py | gyliu513/python-magnumclient | c0d368360831cd29affa47e77eba4e81e4c6f0f8 | [
"Apache-2.0"
] | 26 | 2015-08-17T19:28:56.000Z | 2020-07-10T08:12:07.000Z | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnumclient.common import cliutils as utils
from magnumclient.common import utils as magnum_utils
from magnumclient.i18n import _
from osc_lib.command import command
QUOTA_ATTRIBUTES = [
'resource',
'created_at',
'updated_at',
'hard_limit',
'project_id',
'id'
]
def _show_quota(quota):
utils.print_dict(quota._info)
class CreateQuotas(command.Command):
_description = _("Create a quota.")
def get_parser(self, prog_name):
parser = super(CreateQuotas, self).get_parser(prog_name)
parser.add_argument('--project-id',
required=True,
metavar='<project-id>',
help='Project ID')
parser.add_argument('--resource',
required=True,
metavar='<resource>',
help='Resource name.')
parser.add_argument('--hard-limit',
metavar='<hard-limit>',
type=int,
default=1,
help='Max resource limit (default: hard-limit=1)')
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
mag_client = self.app.client_manager.container_infra
opts = {
'project_id': parsed_args.project_id,
'resource': parsed_args.resource,
'hard_limit': parsed_args.hard_limit
}
try:
quota = mag_client.quotas.create(**opts)
_show_quota(quota)
except Exception as e:
print("Create quota for project_id %(id)s resource %(res)s failed:"
" %(e)s" % {'id': parsed_args.project_id,
'res': parsed_args.resource,
'e': e})
class DeleteQuotas(command.Command):
_description = _("Delete specified resource quota.")
def get_parser(self, prog_name):
parser = super(DeleteQuotas, self).get_parser(prog_name)
parser.add_argument('--project-id',
required=True,
metavar='<project-id>',
help='Project ID')
parser.add_argument('--resource',
required=True,
metavar='<resource>',
help='Resource name.')
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
mag_client = self.app.client_manager.container_infra
try:
mag_client.quotas.delete(parsed_args.project_id,
parsed_args.resource)
print("Request to delete quota for project id %(id)s and resource "
"%(res)s has been accepted." % {'id': parsed_args.project_id,
'res': parsed_args.resource})
except Exception as e:
print("Quota delete failed for project id %(id)s and resource "
"%(res)s :%(e)s" % {'id': parsed_args.project_id,
'res': parsed_args.resource,
'e': e})
class ShowQuotas(command.Command):
_description = _("Show details about the given project resource quota.")
def get_parser(self, prog_name):
parser = super(ShowQuotas, self).get_parser(prog_name)
parser.add_argument('--project-id',
required=True,
metavar='<project-id>',
help='Project ID')
parser.add_argument('--resource',
required=True,
metavar='<resource>',
help='Resource name.')
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
mag_client = self.app.client_manager.container_infra
project_id = parsed_args.project_id
resource = parsed_args.resource
quota = mag_client.quotas.get(project_id, resource)
_show_quota(quota)
class UpdateQuotas(command.Command):
_description = _("Update information about the given "
"project resource quota.")
def get_parser(self, prog_name):
parser = super(UpdateQuotas, self).get_parser(prog_name)
parser.add_argument('--project-id',
required=True,
metavar='<project-id>',
help='Project ID')
parser.add_argument('--resource',
required=True,
metavar='<resource>',
help='Resource name.')
parser.add_argument('--hard-limit',
metavar='<hard-limit>',
type=int,
default=1,
help='Max resource limit (default: hard-limit=1)')
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
mag_client = self.app.client_manager.container_infra
opts = {
'project_id': parsed_args.project_id,
'resource': parsed_args.resource,
'hard_limit': parsed_args.hard_limit
}
try:
quota = mag_client.quotas.update(parsed_args.project_id,
parsed_args.resource, opts)
_show_quota(quota)
except Exception as e:
print("Update quota for project_id %(id)s resource %(res)s failed:"
" %(e)s" % {'id': parsed_args.project_id,
'res': parsed_args.resource,
'e': e})
class ListQuotas(command.Command):
_description = _("Print a list of available quotas.")
def get_parser(self, prog_name):
parser = super(ListQuotas, self).get_parser(prog_name)
parser.add_argument('--marker',
metavar='<marker>',
default=None,
help=_('The last quota UUID of the previous page; '
'displays list of quotas after "marker".'))
parser.add_argument('--limit',
metavar='<limit>',
type=int,
help='Maximum number of quotas to return.')
parser.add_argument('--sort-key',
metavar='<sort-key>',
help='Column to sort results by.')
parser.add_argument('--sort-dir',
metavar='<sort-dir>',
choices=['desc', 'asc'],
help='Direction to sort. "asc" or "desc".')
parser.add_argument('--all-tenants',
action='store_true',
default=False,
help='Flag to indicate list all tenant quotas.')
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
mag_client = self.app.client_manager.container_infra
quotas = mag_client.quotas.list(marker=parsed_args.marker,
limit=parsed_args.limit,
sort_key=parsed_args.sort_key,
sort_dir=parsed_args.sort_dir,
all_tenants=parsed_args.all_tenants)
columns = ['project_id', 'resource', 'hard_limit']
utils.print_list(quotas, columns,
{'versions':
magnum_utils.print_list_field('versions')},
sortby_index=None)
| 39.787037 | 79 | 0.521759 |
0484f519c83af5cd2cbb6faa693f19dc014dd419 | 25,555 | py | Python | zoomrec.py | v1nc/zoomrec | 5b31e25f57697f18c92f698bcf7a20207575bfb0 | [
"MIT"
] | null | null | null | zoomrec.py | v1nc/zoomrec | 5b31e25f57697f18c92f698bcf7a20207575bfb0 | [
"MIT"
] | null | null | null | zoomrec.py | v1nc/zoomrec | 5b31e25f57697f18c92f698bcf7a20207575bfb0 | [
"MIT"
] | null | null | null | import csv
import logging
import os
import psutil
import pyautogui
import random
import schedule
import signal
import subprocess
import threading
import time
import atexit
from datetime import datetime, timedelta
import requests
global ONGOING_MEETING
global VIDEO_PANEL_HIDED
global TELEGRAM_TOKEN
global TELEGRAM_RETRIES
global TELEGRAM_CHAT_ID
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
# Turn DEBUG on:
# - screenshot on error
# - record joining
# - do not exit container on error
DEBUG = True if os.getenv('DEBUG') == 'True' else False
# Disable failsafe
pyautogui.FAILSAFE = False
# Get vars
BASE_PATH = os.getenv('HOME')
CSV_PATH = os.path.join(BASE_PATH, "meetings.csv")
IMG_PATH = os.path.join(BASE_PATH, "img")
REC_PATH = os.path.join(BASE_PATH, "recordings")
DEBUG_PATH = os.path.join(REC_PATH, "screenshots")
# Add your Telegram token and chat id here
TELEGRAM_TOKEN = os.getenv('TELEGRAM_BOT_TOKEN')
TELEGRAM_CHAT_ID = os.getenv('TELEGRAM_CHAT_ID')
TELEGRAM_RETRIES = 5
# Change name that is displayed inside Zoom meeting
DISPLAY_NAME = "Dieter Gaber"
TIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
CSV_DELIMITER = ';'
ONGOING_MEETING = False
VIDEO_PANEL_HIDED = False
class BackgroundThread:
def __init__(self, interval=10):
# Sleep interval between
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
global ONGOING_MEETING
ONGOING_MEETING = True
logging.debug("Check continuously if meeting has ended..")
while ONGOING_MEETING:
# Check if recording
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'warn_meeting_recording.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("This meeting is being recorded..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'accept_recording.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Accepted recording..")
except TypeError:
logging.error("Could not accept recording!")
# Check if ended
if (pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'meeting_ended_by_host_1.png'),
confidence=0.9) is not None or pyautogui.locateOnScreen(
os.path.join(IMG_PATH, 'meeting_ended_by_host_2.png'), confidence=0.9) is not None):
ONGOING_MEETING = False
logging.info("Meeting ended by host..")
time.sleep(self.interval)
class HideViewOptionsThread:
def __init__(self, interval=10):
# Sleep interval between
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
global VIDEO_PANEL_HIDED
logging.debug("Check continuously if screensharing is active..")
while ONGOING_MEETING:
# Check if host is sharing poll results
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'host_is_sharing_poll_results.png'),
confidence=0.9,
minSearchTime=2) is not None):
logging.info("Host is sharing poll results..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'host_is_sharing_poll_results.png'), confidence=0.9)
pyautogui.click(x, y)
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'exit.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Closed poll results window..")
except TypeError:
logging.error("Could not exit poll results window!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_close_poll_results_error.png")
except TypeError:
logging.error("Could not find poll results window anymore!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_find_poll_results_error.png")
# Check if view options available
if pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'view_options.png'), confidence=0.9) is not None:
if not VIDEO_PANEL_HIDED:
logging.info("Screensharing active..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'view_options.png'), confidence=0.9)
pyautogui.click(x, y)
time.sleep(1)
# Hide video panel
if pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'show_video_panel.png'),
confidence=0.9) is not None:
# Leave 'Show video panel' and move mouse from screen
pyautogui.moveTo(0, 0)
pyautogui.click(0, 0)
VIDEO_PANEL_HIDED = True
else:
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'hide_video_panel.png'), confidence=0.9)
pyautogui.click(x, y)
# Move mouse from screen
pyautogui.moveTo(0, 0)
VIDEO_PANEL_HIDED = True
except TypeError:
logging.error("Could not hide video panel!")
except TypeError:
logging.error("Could not find view options!")
else:
VIDEO_PANEL_HIDED = False
time.sleep(self.interval)
def send_telegram_message(text):
global TELEGRAM_TOKEN
global TELEGRAM_CHAT_ID
global TELEGRAM_RETRIES
url_req = "https://api.telegram.org/bot" + TELEGRAM_TOKEN + "/sendMessage" + "?chat_id=" + TELEGRAM_CHAT_ID + "&text=" + text
tries = 0
done = False
while not done:
results = requests.get(url_req)
results = results.json()
done = 'ok' in results and results['ok']
tries+=1
if not done and tries < TELEGRAM_RETRIES:
print("Sending Telegram message failed, retring in 5 seconds...")
time.sleep(5)
if not done and tries >= TELEGRAM_RETRIES:
print("Sending Telegram message failed {} times, please check your credentials!".format(tries))
done = True
def check_connecting(zoom_pid, start_date, duration):
# Check if connecting
check_periods = 0
connecting = False
# Check if connecting
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'connecting.png'), confidence=0.9) is not None:
connecting = True
logging.info("Connecting..")
# Wait while connecting
# Exit when meeting ends after time
while connecting:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom_pid), signal.SIGQUIT)
return
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'connecting.png'), confidence=0.9) is None:
logging.info("Maybe not connecting anymore..")
check_periods += 1
if check_periods >= 2:
connecting = False
logging.info("Not connecting anymore..")
return
time.sleep(2)
def join_meeting(meet_id):
logging.info("Join a meeting..")
found_join_meeting = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_meeting.png'), minSearchTime=2, confidence=0.9)
pyautogui.click(x, y)
found_join_meeting = True
except TypeError:
pass
if not found_join_meeting:
logging.error("Could not find 'Join Meeting' on screen!")
return False
time.sleep(2)
# Insert meeting id
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.write(meet_id, interval=0.1)
# Insert name
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.hotkey('ctrl', 'a')
pyautogui.write(DISPLAY_NAME, interval=0.1)
# Configure
pyautogui.press('tab')
pyautogui.press('space')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('space')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('space')
time.sleep(2)
# Sometimes invalid id error is displayed
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'invalid_meeting_id.png'), confidence=0.9) is not None:
logging.error("Maybe a invalid meeting id was inserted..")
left = False
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'leave.png'), confidence=0.9)
pyautogui.click(x, y)
left = True
except TypeError:
pass
# Valid id
if left:
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_meeting.png'), confidence=0.9) is not None:
logging.error("Invalid meeting id!")
return False
else:
return True
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'authorized_attendees_only.png'), confidence=0.9) is not None:
logging.error("This meeting is for authorized attendees only!")
return False
return True
def find_process_id_by_name(process_name):
list_of_process_objects = []
# Iterate over the all the running process
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name'])
# Check if process name contains the given name string.
if process_name.lower() in pinfo['name'].lower():
list_of_process_objects.append(pinfo)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return list_of_process_objects
def show_toolbars():
# Mouse move to show toolbar
width, height = pyautogui.size()
y = (height / 2)
pyautogui.moveTo(0, y, duration=0.5)
pyautogui.moveTo(width - 1, y, duration=0.5)
def join_audio(description):
audio_joined = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_with_computer_audio.png'), confidence=0.9)
logging.info("Join with computer audio..")
pyautogui.click(x, y)
audio_joined = True
return True
except TypeError:
logging.error("Could not join with computer audio!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_join_with_computer_audio_error.png")
time.sleep(1)
if not audio_joined:
try:
show_toolbars()
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_audio.png'), confidence=0.9)
pyautogui.click(x, y)
join_audio(description)
except TypeError:
logging.error("Could not join audio!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_join_audio_error.png")
return False
def join(meet_id, meet_pw, duration, description):
global VIDEO_PANEL_HIDED
ffmpeg_debug = None
logging.info("Join meeting: " + description)
if DEBUG:
# Start recording
width, height = pyautogui.size()
resolution = str(width) + 'x' + str(height)
disp = os.getenv('DISPLAY')
logging.info("Start recording..")
filename = os.path.join(
REC_PATH, time.strftime(TIME_FORMAT)) + "-" + description + "-JOIN.mkv"
command = "ffmpeg -nostats -loglevel quiet -f pulse -ac 2 -i 1 -f x11grab -r 30 -s " + resolution + " -i " + \
disp + " -acodec pcm_s16le -vcodec libx264rgb -preset ultrafast -crf 0 -threads 0 -async 1 -vsync 1 " + filename
ffmpeg_debug = subprocess.Popen(
command, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
atexit.register(os.killpg, os.getpgid(
ffmpeg_debug.pid), signal.SIGQUIT)
# Exit Zoom if running
exit_process_by_name("zoom")
# Start Zoom
zoom = subprocess.Popen("zoom", stdout=subprocess.PIPE,
shell=True, preexec_fn=os.setsid)
# Wait while zoom process is there
list_of_process_ids = find_process_id_by_name('zoom')
while len(list_of_process_ids) <= 0:
logging.info("No Running Zoom Process found!")
list_of_process_ids = find_process_id_by_name('zoom')
time.sleep(1)
# Wait for zoom is started
while pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'join_meeting.png'), confidence=0.9) is None:
logging.info("Zoom not ready yet!")
time.sleep(1)
logging.info("Zoom started!")
start_date = datetime.now()
joined = join_meeting(meet_id)
if not joined:
send_telegram_message("Failed to join meeting {}!".format(description))
logging.error("Failed to join meeting!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG and ffmpeg_debug is not None:
# closing ffmpeg
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
pyautogui.write(meet_pw, interval=0.2)
pyautogui.press('tab')
pyautogui.press('space')
# Joined meeting
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
# Check if meeting is started by host
check_periods = 0
meeting_started = True
time.sleep(2)
# Check if waiting for host
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'wait_for_host.png'), confidence=0.9, minSearchTime=3) is not None:
meeting_started = False
logging.info("Please wait for the host to start this meeting.")
# Wait for the host to start this meeting
# Exit when meeting ends after time
while not meeting_started:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'wait_for_host.png'), confidence=0.9) is None:
logging.info("Maybe meeting was started now.")
check_periods += 1
if check_periods >= 2:
meeting_started = True
logging.info("Meeting started by host.")
break
time.sleep(2)
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
# Check if in waiting room
check_periods = 0
in_waitingroom = False
time.sleep(2)
# Check if joined into waiting room
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'waiting_room.png'), confidence=0.9,
minSearchTime=3) is not None:
in_waitingroom = True
logging.info("Please wait, the meeting host will let you in soon..")
# Wait while host will let you in
# Exit when meeting ends after time
while in_waitingroom:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'waiting_room.png'), confidence=0.9) is None:
logging.info("Maybe no longer in the waiting room..")
check_periods += 1
if check_periods == 2:
logging.info("No longer in the waiting room..")
break
time.sleep(2)
# Meeting joined
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
logging.info("Joined meeting..")
# Check if recording warning is shown at the beginning
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'warn_meeting_recording.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("This meeting is being recorded..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'accept_recording.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Accepted recording..")
except TypeError:
logging.error("Could not accept recording!")
# Check if host is sharing poll results at the beginning
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'host_is_sharing_poll_results.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("Host is sharing poll results..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'host_is_sharing_poll_results.png'), confidence=0.9)
pyautogui.click(x, y)
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'exit.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Closed poll results window..")
except TypeError:
logging.error("Could not exit poll results window!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_close_poll_results_error.png")
except TypeError:
logging.error("Could not find poll results window anymore!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_find_poll_results_error.png")
# Start BackgroundThread
BackgroundThread()
# Set computer audio
time.sleep(2)
if not join_audio(description):
logging.info("Exit!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
time.sleep(2)
join(meet_id, meet_pw, duration, description)
time.sleep(2)
logging.info("Enter fullscreen..")
show_toolbars()
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'view.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not find view!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_error.png")
time.sleep(2)
fullscreen = False
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'fullscreen.png'), confidence=0.9)
pyautogui.click(x, y)
fullscreen = True
except TypeError:
logging.error("Could not find fullscreen!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_fullscreen_error.png")
# TODO: Check for 'Exit Full Screen': already fullscreen -> fullscreen = True
# Screensharing already active
if not fullscreen:
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'view_options.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not find view options!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_options_error.png")
# Switch to fullscreen
time.sleep(2)
show_toolbars()
logging.info("Enter fullscreen..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'enter_fullscreen.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not enter fullscreen by image!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_enter_fullscreen_error.png")
return
time.sleep(2)
# Screensharing not active
screensharing_active = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'view_options.png'), confidence=0.9)
pyautogui.click(x, y)
screensharing_active = True
except TypeError:
logging.error("Could not find view options!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_options_error.png")
time.sleep(2)
if screensharing_active:
# hide video panel
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'hide_video_panel.png'), confidence=0.9)
pyautogui.click(x, y)
VIDEO_PANEL_HIDED = True
except TypeError:
logging.error("Could not hide video panel!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_hide_video_panel_error.png")
else:
# switch to speaker view
show_toolbars()
logging.info("Switch view..")
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'view.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not find view!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_error.png")
time.sleep(2)
try:
# speaker view
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'speaker_view.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not switch speaker view!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_speaker_view_error.png")
try:
# minimize panel
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'minimize.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not minimize panel!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_minimize_error.png")
# Move mouse from screen
pyautogui.moveTo(0, 0)
pyautogui.click(0, 0)
if DEBUG and ffmpeg_debug is not None:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
# Audio
# Start recording
logging.info("Start recording..")
filename = os.path.join(REC_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + ".mkv"
width, height = pyautogui.size()
resolution = str(width) + 'x' + str(height)
disp = os.getenv('DISPLAY')
command = "ffmpeg -nostats -loglevel error -f pulse -ac 2 -i 1 -f x11grab -r 30 -s " + resolution + " -i " + \
disp + " -acodec pcm_s16le -vcodec libx264rgb -preset ultrafast -crf 0 -threads 0 -async 1 -vsync 1 " + filename
ffmpeg = subprocess.Popen(
command, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
atexit.register(os.killpg, os.getpgid(
ffmpeg.pid), signal.SIGQUIT)
start_date = datetime.now()
end_date = start_date + timedelta(seconds=duration + 300) # Add 5 minutes
# Start thread to check active screensharing
HideViewOptionsThread()
# Send Telegram notification
send_telegram_message("Joined Meeting '{}' and started recording.".format(description))
meeting_running = True
while meeting_running:
time_remaining = end_date - datetime.now()
if time_remaining.total_seconds() < 0 or not ONGOING_MEETING:
meeting_running = False
else:
print(f"Meeting ends in {time_remaining}", end="\r", flush=True)
time.sleep(5)
logging.info("Meeting ends at %s" % datetime.now())
# Close everything
if DEBUG and ffmpeg_debug is not None:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
os.killpg(os.getpgid(ffmpeg.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
if not ONGOING_MEETING:
try:
# Press OK after meeting ended by host
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'ok.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_ok_error.png")
send_telegram_message("Meeting '{}' ended.".format(description))
def exit_process_by_name(name):
list_of_process_ids = find_process_id_by_name(name)
if len(list_of_process_ids) > 0:
logging.info(name + " process exists | killing..")
for elem in list_of_process_ids:
process_id = elem['pid']
try:
os.kill(process_id, signal.SIGKILL)
except Exception as ex:
logging.error("Could not terminate " + name +
"[" + str(process_id) + "]: " + str(ex))
def join_ongoing_meeting():
with open(CSV_PATH, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=CSV_DELIMITER)
for row in csv_reader:
# Check and join ongoing meeting
curr_date = datetime.now()
# Monday, tuesday, ..
if row["weekday"].lower() == curr_date.strftime('%A').lower():
curr_time = curr_date.time()
start_time_csv = datetime.strptime(row["time"], '%H:%M')
start_date = curr_date.replace(
hour=start_time_csv.hour, minute=start_time_csv.minute)
start_time = start_date.time()
end_date = start_date + \
timedelta(seconds=int(row["duration"]) * 60 + 300) # Add 5 minutes
end_time = end_date.time()
recent_duration = (end_date - curr_date).total_seconds()
if start_time < end_time:
if start_time <= curr_time <= end_time:
if str(row["record"]) == 'true':
logging.info(
"Join meeting that is currently running..")
join(meet_id=row["id"], meet_pw=row["password"],
duration=recent_duration, description=row["description"])
else: # crosses midnight
if curr_time >= start_time or curr_time <= end_time:
if str(row["record"]) == 'true':
logging.info(
"Join meeting that is currently running..")
join(meet_id=row["id"], meet_pw=row["password"],
duration=recent_duration, description=row["description"])
def setup_schedule():
with open(CSV_PATH, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=CSV_DELIMITER)
line_count = 0
for row in csv_reader:
if str(row["record"]) == 'true':
cmd_string = "schedule.every()." + row["weekday"] \
+ ".at(\"" \
+ (datetime.strptime(row["time"], '%H:%M') - timedelta(minutes=1)).strftime('%H:%M') \
+ "\").do(join, meet_id=\"" + row["id"] \
+ "\", meet_pw=\"" + row["password"] \
+ "\", duration=" + str(int(row["duration"]) * 60) \
+ ", description=\"" + row["description"] + "\")"
cmd = compile(cmd_string, "<string>", "eval")
eval(cmd)
line_count += 1
logging.info("Added %s meetings to schedule." % line_count)
def main():
try:
if DEBUG and not os.path.exists(DEBUG_PATH):
os.makedirs(DEBUG_PATH)
except Exception:
logging.error("Failed to create screenshot folder!")
raise
setup_schedule()
join_ongoing_meeting()
if __name__ == '__main__':
main()
while True:
schedule.run_pending()
time.sleep(1)
time_of_next_run = schedule.next_run()
time_now = datetime.now()
remaining = time_of_next_run - time_now
print(f"Next meeting in {remaining}", end="\r", flush=True)
| 31.164634 | 127 | 0.70632 |
e9cd06d0bca370980037028ed734bf787b37bdf2 | 2,646 | py | Python | tests/test_settings.py | faultyserver/donation-tracker | efd21c1b80c4065eb40cddd7da928727d016cc48 | [
"Apache-2.0"
] | null | null | null | tests/test_settings.py | faultyserver/donation-tracker | efd21c1b80c4065eb40cddd7da928727d016cc48 | [
"Apache-2.0"
] | null | null | null | tests/test_settings.py | faultyserver/donation-tracker | efd21c1b80c4065eb40cddd7da928727d016cc48 | [
"Apache-2.0"
] | null | null | null | import os
from tracker import ajax_lookup_channels
DOMAIN = 'testserver'
SECRET_KEY = 'ForTestingPurposesOnly'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.staticfiles',
'channels',
'post_office',
'paypal.standard.ipn',
'tracker',
'timezone_field',
'ajax_select',
'mptt',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'testdb.sqlite',
'OPTIONS': {'timeout': 5},
},
}
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
USE_TZ = True
TIME_ZONE = 'America/Denver'
ROOT_URLCONF = 'tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'debug': True,
'string_if_invalid': 'Invalid Variable: %s',
},
},
]
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.common.CommonMiddleware',
)
AJAX_LOOKUP_CHANNELS = ajax_lookup_channels.AJAX_LOOKUP_CHANNELS
CACHES = {'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache',}}
ASGI_APPLICATION = 'tests.routing.application'
CHANNEL_LAYERS = {'default': {'BACKEND': 'channels.layers.InMemoryChannelLayer'}}
SWEEPSTAKES_URL = 'https://example.com/'
TEST_OUTPUT_DIR = 'test-results'
# uncomment this for some additional logging during testing
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {'console': {'level': 'DEBUG', 'class': 'logging.StreamHandler',},},
# 'loggers': {'django': {'handlers': ['console'],},},
# 'root': {'level': 'INFO'},
# }
| 33.923077 | 86 | 0.65155 |
5614e314da77588614e83d6e53286d8b34dc4cef | 1,599 | py | Python | elastico/search.py | klorenz/python-elastico | 9a39e6cfe33d3081cc52424284c19e9698343006 | [
"MIT"
] | null | null | null | elastico/search.py | klorenz/python-elastico | 9a39e6cfe33d3081cc52424284c19e9698343006 | [
"MIT"
] | null | null | null | elastico/search.py | klorenz/python-elastico | 9a39e6cfe33d3081cc52424284c19e9698343006 | [
"MIT"
] | null | null | null | from .util import to_dt, dt_isoformat
from .util import string
from datetime import datetime, timedelta
def search(es, query=None, index=None):
return es.search(index=index or '*', body=build_query_body(query))
def build_query_body(query):
return {'query': {'query_string': {'query': query}}}
def build_search_body(config, name):
'''build a search body from given key `name` in config.
'''
body = None
# list of filters
if isinstance(config[name], list):
filters = config[name]
# lucene query string
if isinstance(config[name], string):
filters = [{'query_string': {'query': config[name]}}]
# complete search body (including timerange, if any)
if isinstance(config[name], dict):
return config[name]
add_range = False
query = {
'query': {'bool': {'must': filters}},
}
if add_range:
timestamp_field = config.get('timestamp_field', '@timestamp')
timeframe = config.get('timeframe_minutes', 60)
if 'endtime' in config:
endtime = to_dt(config['endtime'])
else:
endtime = datetime.utcnow() #.isoformat('T', 'seconds')+"Z"
if 'starttime' in config:
starttime = to_dt(config['starttime'])
else:
starttime = endtime - timedelta(minutes=timeframe)
starttime = dt_isoformat(starttime)
endtime = dt_isoformat(endtime)
filters += [
{'range': {timestamp_field: {'gte': starttime, 'lte': endtime}}}
]
query['sort'] = {timestamp_field: 'desc'}
return query
| 29.072727 | 76 | 0.610381 |
e07d88c4953cd053365e0ca7976c43a23ee8c0aa | 1,876 | py | Python | src/compas_rhino/geometry/vector.py | arpastrana/compas | ed677a162c14dbe562c82d72f370279259faf7da | [
"MIT"
] | null | null | null | src/compas_rhino/geometry/vector.py | arpastrana/compas | ed677a162c14dbe562c82d72f370279259faf7da | [
"MIT"
] | null | null | null | src/compas_rhino/geometry/vector.py | arpastrana/compas | ed677a162c14dbe562c82d72f370279259faf7da | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import compas
from compas.geometry import Vector
from compas_rhino.geometry.point import RhinoPoint
if compas.RHINO:
import Rhino
__all__ = ['RhinoVector']
class RhinoVector(RhinoPoint):
"""Wrapper for a Rhino vector objects.
Attributes
----------
x (read-only) : float
The X coordinate.
y (read-only) : float
The Y coordinate.
z (read-only) : float
The Z coordinate.
xyz (read-only) : list
The XYZ coordinates.
"""
def __init__(self):
super(RhinoVector, self).__init__()
@classmethod
def from_geometry(cls, geometry):
"""Construct a vector wrapper from an existing geometry object.
Parameters
----------
geometry : vector or :class:`Rhino.Geometry.Point3d` or :class:`Rhino.Geometry.Vector3d`
The input geometry.
Returns
-------
:class:`compas_rhino.geometry.RhinoVector`
The wrapped vector.
"""
if not isinstance(geometry, (Rhino.Geometry.Vector3d, Rhino.Geometry.Point3d)):
geometry = Rhino.Geometry.Vector3d(geometry[0], geometry[1], geometry[2])
vector = cls()
vector.geometry = geometry
return vector
@classmethod
def from_selection(cls):
raise NotImplementedError
def to_compas(self):
"""Convert the wrapper to a COMPAS object.
Returns
-------
:class:`compas.geometry.Vector`
The COMPAS vector.
"""
return Vector(self.x, self.y, self.z)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| 24.684211 | 96 | 0.564499 |
cc6e21544c6aab286b0951dcbf4f7e007d35b4f7 | 372 | py | Python | stockbuddy/migrations/0004_auto_20200612_1330.py | rejuls/stockbuddy | f4a52110510d6e6c745d881620524d2281765d15 | [
"MIT"
] | 1 | 2020-09-08T02:11:10.000Z | 2020-09-08T02:11:10.000Z | stockbuddy/migrations/0004_auto_20200612_1330.py | rejuls/stockbuddy | f4a52110510d6e6c745d881620524d2281765d15 | [
"MIT"
] | 3 | 2020-09-08T13:09:25.000Z | 2021-12-13T20:45:36.000Z | stockbuddy/migrations/0004_auto_20200612_1330.py | rejuls/stockbuddy | f4a52110510d6e6c745d881620524d2281765d15 | [
"MIT"
] | 1 | 2020-11-21T23:24:12.000Z | 2020-11-21T23:24:12.000Z | # Generated by Django 3.0.5 on 2020-06-12 13:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stockbuddy', '0003_auto_20200612_1325'),
]
operations = [
migrations.RenameField(
model_name='stockdata',
old_name='test',
new_name='prediction3',
),
]
| 19.578947 | 50 | 0.594086 |
080c6ce75d7ef888fd1ee1edaea204a82382fd32 | 2,587 | py | Python | arcade/geometry.py | KommentatorForAll/arcade | 88343fe87efc1bd619412261a725a8f04de08fc7 | [
"MIT"
] | null | null | null | arcade/geometry.py | KommentatorForAll/arcade | 88343fe87efc1bd619412261a725a8f04de08fc7 | [
"MIT"
] | null | null | null | arcade/geometry.py | KommentatorForAll/arcade | 88343fe87efc1bd619412261a725a8f04de08fc7 | [
"MIT"
] | null | null | null | """
Functions for calculating geometry.
"""
import math
from shapely import speedups # type: ignore
from shapely.geometry import Polygon, Point # type: ignore
from typing import List
from arcade import PointList
_PRECISION = 2
speedups.enable()
def are_polygons_intersecting(poly_a: PointList,
poly_b: PointList) -> bool:
"""
Return True if two polygons intersect.
:param PointList poly_a: List of points that define the first polygon.
:param PointList poly_b: List of points that define the second polygon.
:Returns: True or false depending if polygons intersect
:rtype bool:
"""
shapely_polygon_a = Polygon(poly_a)
shapely_polygon_b = Polygon(poly_b)
r2 = False
r1 = shapely_polygon_a.intersects(shapely_polygon_b)
if r1:
r2 = shapely_polygon_a.touches(shapely_polygon_b)
return r1 and not r2
def is_point_in_polygon(x: float, y: float, polygon_point_list):
"""
Use ray-tracing to see if point is inside a polygon
Args:
x:
y:
polygon_point_list:
Returns: bool
"""
shapely_point = Point(x, y)
shapely_polygon = Polygon(polygon_point_list)
return shapely_polygon.contains(shapely_point)
def get_distance(x1: float, y1: float, x2: float, y2: float):
""" Get the distance between two points. """
return math.hypot(x1 - x2, y1 - y2)
def clamp(a, low, high):
""" Clamp a number between a range. """
if a > high:
return high
elif a < low:
return low
else:
return a
def rotate_point(x: float, y: float, cx: float, cy: float,
angle_degrees: float) -> List[float]:
"""
Rotate a point around a center.
:param x: x value of the point you want to rotate
:param y: y value of the point you want to rotate
:param cx: x value of the center point you want to rotate around
:param cy: y value of the center point you want to rotate around
:param angle_degrees: Angle, in degrees, to rotate
:return: Return rotated (x, y) pair
:rtype: (float, float)
"""
temp_x = x - cx
temp_y = y - cy
# now apply rotation
angle_radians = math.radians(angle_degrees)
cos_angle = math.cos(angle_radians)
sin_angle = math.sin(angle_radians)
rotated_x = temp_x * cos_angle - temp_y * sin_angle
rotated_y = temp_x * sin_angle + temp_y * cos_angle
# translate back
rounding_precision = 2
x = round(rotated_x + cx, rounding_precision)
y = round(rotated_y + cy, rounding_precision)
return [x, y]
| 25.362745 | 75 | 0.660611 |
1d5e880f7c6b8c2ca476af8c65823fab6ed164bd | 1,190 | py | Python | gamesnstuff/scanner/background/send_message.py | LionelNoussi/Games-n-Stuff | 91c4af67d0534a048e324b258eddbb2dc3963c36 | [
"MIT"
] | null | null | null | gamesnstuff/scanner/background/send_message.py | LionelNoussi/Games-n-Stuff | 91c4af67d0534a048e324b258eddbb2dc3963c36 | [
"MIT"
] | null | null | null | gamesnstuff/scanner/background/send_message.py | LionelNoussi/Games-n-Stuff | 91c4af67d0534a048e324b258eddbb2dc3963c36 | [
"MIT"
] | null | null | null | from django.contrib import messages
successfulMessages = ["Success"]
neutralMessages = ["Returned"]
badMessages = ["Invalid student ID", "Invalid item"]
warningMessages = ["Already borrowed something"]
def make_toast(request, message, student_name, item):
message_texts = {"Success": f"{student_name} successfully borrowed item: {item}",
"Returned": f"{student_name} successfully returned item: {item}",
"Invalid student ID": "Your student ID was not recognized!",
"Invalid item": "The item you tried to borrow doesn't exist!",
"Already borrowed something": f"WARNING: Please {student_name }, remember to also return the "
f"things you have already borrowed!"}
if message in successfulMessages:
messages.success(request, f"{message_texts[message]}")
if message in badMessages:
messages.error(request, f"{message_texts[message]}")
if message in neutralMessages:
messages.info(request, f"{message_texts[message]}")
if message in warningMessages:
messages.warning(request, f"{message_texts[message]}")
| 47.6 | 115 | 0.647899 |
d245175981d44ba43d247335377641db12ed9ce3 | 3,143 | py | Python | cmake/external/onnxruntime-extensions/test/test_onnxprocess.py | fushwLZU/onnxruntime_test | 7ee82dde9150dc0d3014c06a82eabdecb989f2f3 | [
"MIT"
] | null | null | null | cmake/external/onnxruntime-extensions/test/test_onnxprocess.py | fushwLZU/onnxruntime_test | 7ee82dde9150dc0d3014c06a82eabdecb989f2f3 | [
"MIT"
] | null | null | null | cmake/external/onnxruntime-extensions/test/test_onnxprocess.py | fushwLZU/onnxruntime_test | 7ee82dde9150dc0d3014c06a82eabdecb989f2f3 | [
"MIT"
] | 1 | 2021-09-17T04:50:02.000Z | 2021-09-17T04:50:02.000Z | import io
import onnx
import unittest
import torchvision
import numpy as np
from onnxruntime_extensions import PyOrtFunction, hook_model_op, PyOp
from onnxruntime_extensions.onnxprocess import torch_wrapper as torch
from onnxruntime_extensions.onnxprocess import trace_for_onnx, pyfunc_from_model
class TestTorchE2E(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mobilenet = torchvision.models.mobilenet_v2(pretrained=True)
cls.argmax_input = None
@staticmethod
def on_hook(*x):
TestTorchE2E.argmax_input = x[0]
return x
def test_range(self):
num = 10
f = io.BytesIO()
with trace_for_onnx(num, names=['count']) as tc_sess:
num_in = tc_sess.get_inputs()[0]
done = torch.tensor(True)
st_0 = torch.tensor(0)
cfg = torch.control_flow()
for _ in cfg.loop(num_in, done, st_0):
iter_num, *v = _
cfg.flow_output(done, st_0, iter_num + 0)
*_, rout = cfg.finalize()
tc_sess.save_as_onnx(f, rout)
m = onnx.load_model_from_string(f.getvalue())
onnx.save_model(m, 'temp_range.onnx')
fu_m = PyOrtFunction.from_model(m)
result = fu_m(num)
np.testing.assert_array_equal(result, np.array(range(num)))
def test_sequence(self):
input_text = ['test sentence', 'sentence 2']
f = io.BytesIO()
with trace_for_onnx(input_text, names=['in_text']) as tc_sess:
tc_inputs = tc_sess.get_inputs()[0]
batchsize = tc_inputs.size()[0]
shape = [batchsize, 2]
fuse_output = torch.zeros(*shape).size()
tc_sess.save_as_onnx(f, fuse_output)
m = onnx.load_model_from_string(f.getvalue())
onnx.save_model(m, 'temp_test00.onnx')
fu_m = PyOrtFunction.from_model(m)
result = fu_m(input_text)
np.testing.assert_array_equal(result, [2, 2])
def test_imagenet_postprocess(self):
mb_core_path = 'temp_mobilev2.onnx'
mb_full_path = 'temp_mobilev2_full.onnx'
dummy_input = torch.randn(10, 3, 224, 224)
np_input = dummy_input.numpy()
torch.onnx.export(self.mobilenet, dummy_input, mb_core_path, opset_version=11)
mbnet2 = pyfunc_from_model(mb_core_path)
with trace_for_onnx(dummy_input, names=['b10_input']) as tc_sess:
scores = mbnet2(*tc_sess.get_inputs())
probabilities = torch.softmax(scores, dim=1)
batch_top1 = probabilities.argmax(dim=1)
np_argmax = probabilities.numpy() # for the result comparison
np_output = batch_top1.numpy()
tc_sess.save_as_onnx(mb_full_path, batch_top1)
hkdmdl = hook_model_op(onnx.load_model(mb_full_path), 'argmax', self.on_hook, [PyOp.dt_float])
mbnet2_full = PyOrtFunction.from_model(hkdmdl)
batch_top1_2 = mbnet2_full(np_input)
np.testing.assert_allclose(np_argmax, self.argmax_input, rtol=1e-5)
np.testing.assert_array_equal(batch_top1_2, np_output)
if __name__ == "__main__":
unittest.main()
| 36.126437 | 102 | 0.650016 |
823450f017098b3dc2ab9748a489c3860152387c | 2,844 | py | Python | windowScoreCalculator.py | TheSeeven/home-thermal-management | c3a7856ca9475501f0d8cbace4deb1cb4d0b2fa9 | [
"Unlicense"
] | null | null | null | windowScoreCalculator.py | TheSeeven/home-thermal-management | c3a7856ca9475501f0d8cbace4deb1cb4d0b2fa9 | [
"Unlicense"
] | null | null | null | windowScoreCalculator.py | TheSeeven/home-thermal-management | c3a7856ca9475501f0d8cbace4deb1cb4d0b2fa9 | [
"Unlicense"
] | null | null | null | TEMPERATURE_BIAS = 0.30
AIR_QUALITY_BIAS = 0.65
HUMIDITY_BIAS = 0.05
def getDesirabilityAirQuality(inside, desired, outside):
if inside == None or desired == None or outside == None:
return 0
result = outside - inside
isOutsideBetter = outside > inside
isInsideOk = inside >= desired
if isOutsideBetter and isInsideOk:
return 0
elif isOutsideBetter and not isInsideOk:
return result * 2
elif not isOutsideBetter and isInsideOk:
return 0
elif not isOutsideBetter and not isInsideOk:
return result * 2
def getDesirabilityTemperature(inside, desired, outside):
result = 0
if inside == None or desired == None or outside == None:
return result
isOutsideHotter = outside > inside
isInsideTooHot = inside > desired
if isInsideTooHot and not isOutsideHotter:
if not outside > desired:
result = abs(outside - inside) + abs(inside - desired)
else:
if inside != outside:
result = abs(outside - desired)
else:
result = -abs(outside - desired)
elif isInsideTooHot and isOutsideHotter:
result = -abs(outside - desired) + -abs(outside - inside)
elif not isInsideTooHot and isOutsideHotter:
if inside != desired:
result = abs(outside - inside) + abs(desired - inside)
else:
result = -abs(outside - desired)
elif not isInsideTooHot and not isOutsideHotter:
result = -abs(desired - outside) + -abs(outside - inside)
return result
def getDesirabilityHumidity(inside, desired, outside):
result = 0
if inside == None or desired == None or outside == None:
return result
isOutsideMoreHumid = outside > inside
isInsideTooHumid = inside > desired
if isInsideTooHumid and isOutsideMoreHumid:
result = -abs(outside - desired) + -abs(outside - desired)
elif not isInsideTooHumid and isOutsideMoreHumid:
if inside != desired:
result = abs(outside - inside) + abs(desired - inside)
else:
result = -abs(outside - desired)
elif not isInsideTooHumid and not isOutsideMoreHumid:
result = -abs(desired - outside) + -abs(outside - inside)
elif isInsideTooHumid and not isOutsideMoreHumid:
if not outside > desired:
result = abs(outside - inside) + abs(inside - desired)
else:
if inside != outside:
result = abs(outside - desired)
else:
result = -abs(outside - desired)
return result
def getWindowsDesirability(temperatureFactor, humidityFactor, aqFactor):
temp = (temperatureFactor / 10) * TEMPERATURE_BIAS
hum = (humidityFactor / 20) * HUMIDITY_BIAS
aq = aqFactor * AIR_QUALITY_BIAS
return temp + hum + aq
| 33.069767 | 72 | 0.638537 |
b46dc1932f2050c6ffe5a28f6d924813fbde7da1 | 9,670 | py | Python | hathor/cli/util.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | 51 | 2019-12-28T03:33:27.000Z | 2022-03-10T14:03:03.000Z | hathor/cli/util.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | 316 | 2019-09-10T09:20:05.000Z | 2022-03-31T20:18:56.000Z | hathor/cli/util.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | 19 | 2020-01-04T00:13:18.000Z | 2022-02-08T21:18:46.000Z | # Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from argparse import ArgumentParser
from collections import OrderedDict
from datetime import datetime
from typing import Any, List
import configargparse
def create_parser() -> ArgumentParser:
return configargparse.ArgumentParser(auto_env_var_prefix='hathor_')
def setup_logging(
debug: bool = False,
capture_stdout: bool = False,
json_logging: bool = False,
*,
sentry: bool = False,
_test_logging: bool = False,
) -> None:
import logging
import logging.config
import structlog
import twisted
from twisted.logger import LogLevel
# Mappings to Python's logging module
twisted_to_logging_level = {
LogLevel.debug: logging.DEBUG,
LogLevel.info: logging.INFO,
LogLevel.warn: logging.WARNING,
LogLevel.error: logging.ERROR,
LogLevel.critical: logging.CRITICAL,
}
# common timestamper for structlog loggers and foreign (stdlib and twisted) loggers
timestamper = structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S")
# processors for foreign loggers
pre_chain = [
structlog.stdlib.add_log_level,
structlog.stdlib.add_logger_name,
timestamper,
]
# docs at http://www.structlog.org/en/stable/api.html#structlog.dev.ConsoleRenderer
class ConsoleRenderer(structlog.dev.ConsoleRenderer):
def __call__(self, _, __, event_dict):
from io import StringIO
from structlog.dev import _pad
sio = StringIO()
ts = event_dict.pop('timestamp', None)
if ts is not None:
sio.write(
# can be a number if timestamp is UNIXy
self._styles.timestamp
+ str(ts)
+ self._styles.reset
+ ' '
)
level = event_dict.pop('level', None)
if level is not None:
sio.write(
'['
+ self._level_to_color[level]
+ _pad(level, self._longest_level)
+ self._styles.reset
+ '] '
)
logger_name = event_dict.pop('logger', None)
if logger_name is not None:
sio.write(
'['
+ self._styles.logger_name
+ self._styles.bright
+ logger_name
+ self._styles.reset
+ '] '
)
event = str(event_dict.pop('event'))
if event_dict:
event = _pad(event, self._pad_event) + self._styles.reset + ' '
else:
event += self._styles.reset
sio.write(self._styles.bright + event)
stack = event_dict.pop('stack', None)
exc = event_dict.pop('exception', None)
sio.write(
' '.join(
self._styles.kv_key
+ key
+ self._styles.reset
+ '='
+ self._styles.kv_value
+ self._repr(event_dict[key])
+ self._styles.reset
for key in sorted(event_dict.keys())
)
)
if stack is not None:
sio.write('\n' + stack)
if exc is not None:
sio.write('\n\n' + '=' * 79 + '\n')
if exc is not None:
sio.write('\n' + exc)
return sio.getvalue()
@staticmethod
def get_default_level_styles(colors=True):
import colorama
if not colors:
return structlog.dev.ConsoleRenderer.get_default_level_styles(False)
return {
'critical': colorama.Style.BRIGHT + colorama.Fore.RED,
'exception': colorama.Fore.RED,
'error': colorama.Fore.RED,
'warn': colorama.Fore.YELLOW,
'warning': colorama.Fore.YELLOW,
'info': colorama.Fore.GREEN,
'debug': colorama.Style.BRIGHT + colorama.Fore.CYAN,
'notset': colorama.Back.RED,
}
def _repr(self, val):
if isinstance(val, datetime):
return str(val)
else:
return super()._repr(val)
if json_logging:
handlers = ['json']
else:
handlers = ['pretty']
# See: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'plain': {
'()': structlog.stdlib.ProcessorFormatter,
'processor': ConsoleRenderer(colors=False),
'foreign_pre_chain': pre_chain,
},
'colored': {
'()': structlog.stdlib.ProcessorFormatter,
'processor': ConsoleRenderer(colors=True),
'foreign_pre_chain': pre_chain,
},
'json': {
'()': structlog.stdlib.ProcessorFormatter,
'processor': structlog.processors.JSONRenderer(),
'foreign_pre_chain': pre_chain,
},
},
'handlers': {
'pretty': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'colored',
},
'json': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'json',
},
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.handlers.WatchedFileHandler',
# 'filename': 'test.log',
# 'formatter': 'plain',
# },
},
'loggers': {
# set twisted verbosity one level lower than hathor's
'twisted': {
'handlers': handlers,
'level': 'INFO' if debug else 'WARN',
'propagate': False,
},
'': {
'handlers': handlers,
'level': 'DEBUG' if debug else 'INFO',
},
}
})
def kwargs_formatter(_, __, event_dict):
if event_dict and event_dict.get('event') and isinstance(event_dict['event'], str):
event_dict['event'] = event_dict['event'].format(**event_dict)
return event_dict
processors: List[Any] = [
structlog.stdlib.filter_by_level,
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
]
if sentry:
from structlog_sentry import SentryProcessor
processors.append(SentryProcessor(level=logging.ERROR))
processors.extend([
structlog.stdlib.PositionalArgumentsFormatter(),
kwargs_formatter,
timestamper,
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
])
structlog.configure(
processors=processors,
context_class=OrderedDict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
twisted_logger = structlog.get_logger('twisted')
def twisted_structlog_observer(event):
try:
level = twisted_to_logging_level.get(event.get('log_level'), logging.INFO)
kwargs = {}
msg = ''
if not msg and event.get('log_format', None):
msg = event['log_format'].format(**event)
if not msg and event.get('format', None):
msg = event['format'] % event
failure = event.get('log_failure')
if failure is not None:
kwargs['exc_info'] = (failure.type, failure.value, failure.getTracebackObject())
twisted_logger.log(level, msg, **kwargs)
except Exception as e:
print('error when logging event', e)
for k, v in event.items():
print(k, v)
# start logging to std logger so structlog can catch it
twisted.python.log.startLoggingWithObserver(twisted_structlog_observer, setStdout=capture_stdout)
if _test_logging:
logger = structlog.get_logger()
logger.debug('Test: debug.')
logger.info('Test: info.')
logger.warning('Test: warning.')
logger.error('Test error.')
logger.critical('Test: critical.')
def check_or_exit(condition: bool, message: str) -> None:
"""Will exit printing `message` if `condition` is False."""
if not condition:
print(message)
sys.exit(2)
| 34.412811 | 101 | 0.532472 |
1c25cf170b72c6852eeaa64334cf5ded57f7f53a | 14,364 | py | Python | kubric/simulator/pybullet.py | ScottyLectronica/kubric | 31930b4a8517d1fc5987bb1502e47f130209505a | [
"Apache-2.0"
] | null | null | null | kubric/simulator/pybullet.py | ScottyLectronica/kubric | 31930b4a8517d1fc5987bb1502e47f130209505a | [
"Apache-2.0"
] | null | null | null | kubric/simulator/pybullet.py | ScottyLectronica/kubric | 31930b4a8517d1fc5987bb1502e47f130209505a | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=function-redefined
import logging
import pathlib
import sys
import tempfile
from typing import Dict, List, Optional, Tuple, Union
import tensorflow as tf
from singledispatchmethod import singledispatchmethod
from kubric import core
from kubric.redirect_io import RedirectStream
# --- hides the "pybullet build time: May 26 2021 18:52:36" message on import
with RedirectStream(stream=sys.stderr):
import pybullet as pb
logger = logging.getLogger(__name__)
class PyBullet(core.View):
"""Adds physics simulation on top of kb.Scene using PyBullet."""
def __init__(self, scene: core.Scene, scratch_dir=tempfile.mkdtemp()):
self.scratch_dir = scratch_dir
self.physics_client = pb.connect(pb.DIRECT) # pb.GUI
# --- Set some parameters to fix the sticky-walls problem; see
# https://github.com/bulletphysics/bullet3/issues/3094
pb.setPhysicsEngineParameter(restitutionVelocityThreshold=0.,
warmStartingFactor=0.,
useSplitImpulse=True,
contactSlop=0.,
enableConeFriction=False,
deterministicOverlappingPairs=True)
# TODO: setTimeStep if scene.step_rate != 240 Hz
super().__init__(scene, scene_observers={
"gravity": [lambda change: pb.setGravity(*change.new)],
})
def __del__(self):
try:
pb.disconnect()
except Exception: # pylint: disable=broad-except
pass # cleanup code. ignore errors
@singledispatchmethod
def add_asset(self, asset: core.Asset) -> Optional[int]:
raise NotImplementedError(f"Cannot add {asset!r}")
def remove_asset(self, asset: core.Asset) -> None:
if self in asset.linked_objects:
pb.removeBody(asset.linked_objects[self])
# TODO(klausg): unobserve
@add_asset.register(core.Camera)
def _add_object(self, obj: core.Camera) -> None:
logger.debug("Ignored camera %s", obj)
@add_asset.register(core.Material)
def _add_object(self, obj: core.Material) -> None:
logger.debug("Ignored material %s", obj)
@add_asset.register(core.Light)
def _add_object(self, obj: core.Light) -> None:
logger.debug("Ignored light %s", obj)
@add_asset.register(core.Cube)
def _add_object(self, obj: core.Cube) -> Optional[int]:
collision_idx = pb.createCollisionShape(pb.GEOM_BOX, halfExtents=obj.scale)
visual_idx = -1
mass = 0 if obj.static else obj.mass
# useMaximalCoordinates and contactProcessingThreshold are required to
# fix the sticky walls issue;
# see https://github.com/bulletphysics/bullet3/issues/3094
box_idx = pb.createMultiBody(mass, collision_idx, visual_idx, obj.position,
wxyz2xyzw(obj.quaternion), useMaximalCoordinates=True)
pb.changeDynamics(box_idx, -1, contactProcessingThreshold=0)
register_physical_object_setters(obj, box_idx)
return box_idx
@add_asset.register(core.Sphere)
def _add_object(self, obj: core.Sphere) -> Optional[int]:
radius = obj.scale[0]
assert radius == obj.scale[1] == obj.scale[2], obj.scale # only uniform scaling
collision_idx = pb.createCollisionShape(pb.GEOM_SPHERE, radius=radius)
visual_idx = -1
mass = 0 if obj.static else obj.mass
# useMaximalCoordinates and contactProcessingThreshold are required to
# fix the sticky walls issue;
# see https://github.com/bulletphysics/bullet3/issues/3094
sphere_idx = pb.createMultiBody(mass, collision_idx, visual_idx, obj.position,
wxyz2xyzw(obj.quaternion), useMaximalCoordinates=True)
pb.changeDynamics(sphere_idx, -1, contactProcessingThreshold=0)
register_physical_object_setters(obj, sphere_idx)
return sphere_idx
@add_asset.register(core.FileBasedObject)
def _add_object(self, obj: core.FileBasedObject) -> Optional[int]:
# TODO: support other file-formats
if obj.simulation_filename is None:
return None # if there is no simulation file, then ignore this object
path = pathlib.Path(obj.simulation_filename).resolve()
logger.debug("Loading '%s' in the simulator", path)
if not path.exists():
raise IOError(f"File '{path}' does not exist.")
scale = obj.scale[0]
assert obj.scale[1] == obj.scale[2] == scale, "Pybullet does not support non-uniform scaling"
# useMaximalCoordinates and contactProcessingThreshold are required to
# fix the sticky walls issue;
# see https://github.com/bulletphysics/bullet3/issues/3094
if path.suffix == ".urdf":
obj_idx = pb.loadURDF(str(path), useFixedBase=obj.static,
globalScaling=scale,
useMaximalCoordinates=True)
else:
raise IOError(
"Unsupported format '{path.suffix}' of file '{path}'")
if obj_idx < 0:
raise IOError(f"Failed to load '{path}'")
pb.changeDynamics(obj_idx, -1, contactProcessingThreshold=0)
register_physical_object_setters(obj, obj_idx)
return obj_idx
def check_overlap(self, obj: core.PhysicalObject) -> bool:
obj_idx = obj.linked_objects[self]
body_ids = [pb.getBodyUniqueId(i) for i in range(pb.getNumBodies())]
for body_id in body_ids:
if body_id == obj_idx:
continue
overlap_points = pb.getClosestPoints(obj_idx, body_id, distance=0)
if overlap_points:
return True
return False
def get_position_and_rotation(self, obj_idx: int):
pos, quat = pb.getBasePositionAndOrientation(obj_idx)
return pos, xyzw2wxyz(quat) # convert quaternion format
def get_velocities(self, obj_idx: int):
velocity, angular_velocity = pb.getBaseVelocity(obj_idx)
return velocity, angular_velocity
def save_state(self, path: Union[pathlib.Path, str] = "scene.bullet"):
"""Receives a folder path as input."""
assert self.scratch_dir is not None
# first store in a temporary file and then copy, to support remote paths
pb.saveBullet(str(self.scratch_dir / "scene.bullet"))
tf.io.gfile.copy(self.scratch_dir / "scene.bullet", path, overwrite=True)
def run(
self,
frame_start: int = 0,
frame_end: Optional[int] = None
) -> Tuple[Dict[core.PhysicalObject, Dict[str, list]], List[dict]]:
"""
Run the physics simulation.
The resulting animation is saved directly as keyframes in the assets,
and also returned (together with the collision events).
Args:
frame_start: The first frame from which to start the simulation (inclusive).
Also the first frame for which keyframes are stored.
frame_end: The last frame (inclusive) that is simulated (and for which animations
are computed).
Returns:
A dict of all animations and a list of all collision events.
"""
frame_end = self.scene.frame_end if frame_end is None else frame_end
steps_per_frame = self.scene.step_rate // self.scene.frame_rate
max_step = (frame_end - frame_start + 1) * steps_per_frame
obj_idxs = [pb.getBodyUniqueId(i) for i in range(pb.getNumBodies())]
animation = {obj_id: {"position": [], "quaternion": [], "velocity": [], "angular_velocity": []}
for obj_id in obj_idxs}
collisions = []
for current_step in range(max_step):
contact_points = pb.getContactPoints()
for collision in contact_points:
(contact_flag,
body_a, body_b,
link_a, link_b,
position_a, position_b, contact_normal_b,
contact_distance, normal_force,
lateral_friction1, lateral_friction_dir1,
lateral_friction2, lateral_friction_dir2) = collision
del link_a, link_b # < unused
del contact_flag, contact_distance, position_a # < unused
del lateral_friction1, lateral_friction2 # < unused
del lateral_friction_dir1, lateral_friction_dir2 # < unused
if normal_force > 1e-6:
collisions.append({
"instances": (self._obj_idx_to_asset(body_b), self._obj_idx_to_asset(body_a)),
"position": position_b,
"contact_normal": contact_normal_b,
"frame": current_step / steps_per_frame,
"force": normal_force,
})
if current_step % steps_per_frame == 0:
for obj_idx in obj_idxs:
position, quaternion = self.get_position_and_rotation(obj_idx)
velocity, angular_velocity = self.get_velocities(obj_idx)
animation[obj_idx]["position"].append(position)
animation[obj_idx]["quaternion"].append(quaternion)
animation[obj_idx]["velocity"].append(velocity)
animation[obj_idx]["angular_velocity"].append(angular_velocity)
pb.stepSimulation()
animation = {asset: animation[asset.linked_objects[self]] for asset in self.scene.assets
if asset.linked_objects.get(self) in obj_idxs}
# --- Transfer simulation to renderer keyframes
for obj in animation.keys():
for frame_id in range(frame_end - frame_start + 1):
obj.position = animation[obj]["position"][frame_id]
obj.quaternion = animation[obj]["quaternion"][frame_id]
obj.velocity = animation[obj]["velocity"][frame_id]
obj.angular_velocity = animation[obj]["angular_velocity"][frame_id]
obj.keyframe_insert("position", frame_id + frame_start)
obj.keyframe_insert("quaternion", frame_id + frame_start)
obj.keyframe_insert("velocity", frame_id + frame_start)
obj.keyframe_insert("angular_velocity", frame_id + frame_start)
return animation, collisions
def _obj_idx_to_asset(self, idx):
assets = [asset for asset in self.scene.assets if asset.linked_objects.get(self) == idx]
if len(assets) == 1:
return assets[0]
elif len(assets) == 0:
return None
else:
raise RuntimeError("Multiple assets linked to same pybullet object. That should never happen")
def xyzw2wxyz(xyzw):
"""Convert quaternions from XYZW format to WXYZ."""
x, y, z, w = xyzw
return w, x, y, z
def wxyz2xyzw(wxyz):
"""Convert quaternions from WXYZ format to XYZW."""
w, x, y, z = wxyz
return x, y, z, w
def register_physical_object_setters(obj: core.PhysicalObject, obj_idx):
assert isinstance(obj, core.PhysicalObject), f"{obj!r} is not a PhysicalObject"
obj.observe(setter(obj_idx, set_position), "position")
obj.observe(setter(obj_idx, set_quaternion), "quaternion")
# TODO Pybullet does not support rescaling. So we should warn if scale is changed
obj.observe(setter(obj_idx, set_velocity), "velocity")
obj.observe(setter(obj_idx, set_angular_velocity), "angular_velocity")
obj.observe(setter(obj_idx, set_friction), "friction")
obj.observe(setter(obj_idx, set_restitution), "restitution")
obj.observe(setter(obj_idx, set_mass), "mass")
obj.observe(setter(obj_idx, set_static), "static")
def setter(object_idx, func):
def _callable(change):
return func(object_idx, change.new, change.owner)
return _callable
def set_position(object_idx, position, asset): # pylint: disable=unused-argument
# reuse existing quaternion
_, quaternion = pb.getBasePositionAndOrientation(object_idx)
# resetBasePositionAndOrientation zeroes out velocities, but we wish to conserve them
velocity, angular_velocity = pb.getBaseVelocity(object_idx)
pb.resetBasePositionAndOrientation(object_idx, position, quaternion)
pb.resetBaseVelocity(object_idx, velocity, angular_velocity)
def set_quaternion(object_idx, quaternion, asset): # pylint: disable=unused-argument
quaternion = wxyz2xyzw(quaternion) # convert quaternion format
# reuse existing position
position, _ = pb.getBasePositionAndOrientation(object_idx)
# resetBasePositionAndOrientation zeroes out velocities, but we wish to conserve them
velocity, angular_velocity = pb.getBaseVelocity(object_idx)
pb.resetBasePositionAndOrientation(object_idx, position, quaternion)
pb.resetBaseVelocity(object_idx, velocity, angular_velocity)
def set_velocity(object_idx, velocity, asset): # pylint: disable=unused-argument
_, angular_velocity = pb.getBaseVelocity(object_idx) # reuse existing angular velocity
pb.resetBaseVelocity(object_idx, velocity, angular_velocity)
def set_angular_velocity(object_idx, angular_velocity, asset): # pylint: disable=unused-argument
velocity, _ = pb.getBaseVelocity(object_idx) # reuse existing velocity
pb.resetBaseVelocity(object_idx, velocity, angular_velocity)
def set_mass(object_idx, mass: float, asset):
if mass < 0:
raise ValueError(f"mass cannot be negative ({mass})")
if not asset.static:
pb.changeDynamics(object_idx, -1, mass=mass)
def set_static(object_idx, is_static, asset):
if is_static:
pb.changeDynamics(object_idx, -1, mass=0.)
else:
pb.changeDynamics(object_idx, -1, mass=asset.mass)
def set_friction(object_idx, friction: float, asset): # pylint: disable=unused-argument
if friction < 0:
raise ValueError("friction cannot be negative ({friction})")
pb.changeDynamics(object_idx, -1, lateralFriction=friction)
def set_restitution(object_idx, restitution: float, asset): # pylint: disable=unused-argument
if restitution < 0:
raise ValueError("restitution cannot be negative ({restitution})")
if restitution > 1:
raise ValueError("restitution should be below 1.0 ({restitution})")
pb.changeDynamics(object_idx, -1, restitution=restitution)
| 40.461972 | 101 | 0.689432 |
38bc0d8c1961839a88e2c4293f68b634252ae61b | 92 | py | Python | venv/lib/python3.7/site-packages/pywikibot/data/__init__.py | Sanechoic/JoyStory | 8b2f605f56db765292bb77b54d661a1cb8e638d9 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/pywikibot/data/__init__.py | Sanechoic/JoyStory | 8b2f605f56db765292bb77b54d661a1cb8e638d9 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/pywikibot/data/__init__.py | Sanechoic/JoyStory | 8b2f605f56db765292bb77b54d661a1cb8e638d9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Module providing several layers of data access to the wiki."""
| 30.666667 | 66 | 0.641304 |
c093bf8d7c36c1833811cac6785306a31e7f287e | 5,295 | py | Python | scripts/updatestatistics.py | metakgp/pywikibot | 28e3125167bb3ec521a25ade859d0e0afcfb1262 | [
"MIT"
] | 2 | 2017-06-19T16:48:34.000Z | 2017-07-07T14:15:28.000Z | scripts/updatestatistics.py | metakgp/pywikibot | 28e3125167bb3ec521a25ade859d0e0afcfb1262 | [
"MIT"
] | 11 | 2018-12-07T18:20:05.000Z | 2022-03-11T23:12:42.000Z | scripts/updatestatistics.py | metakgp/pywikibot | 28e3125167bb3ec521a25ade859d0e0afcfb1262 | [
"MIT"
] | 3 | 2018-12-09T10:18:35.000Z | 2020-09-12T13:50:14.000Z | """A simple example of how to access the Google Analytics API."""
import re
from datetime import date
import difflib
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
import httplib2
import json
import pywikibot
def get_service(api_name, api_version, scope, key_file_location,
service_account_email):
"""Get a service that communicates to a Google API.
Args:
api_name: The name of the api to connect to.
api_version: The api version to connect to.
scope: A list auth scopes to authorize for the application.
key_file_location: The path to a valid service account p12 key file.
service_account_email: The service account email address.
Returns:
A service that is connected to the specified API.
"""
with open(key_file_location, 'rb') as f:
key = json.load(f)['private_key']
credentials = SignedJwtAssertionCredentials(service_account_email, key,
scope=scope)
http = credentials.authorize(httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
def get_first_profile_id(service):
# Use the Analytics service object to get the first profile id.
# Get a list of all Google Analytics accounts for this user
accounts = service.management().accounts().list().execute()
if accounts.get('items'):
# Get the first Google Analytics account.
account = accounts.get('items')[0].get('id')
# Get a list of all the properties for the first account.
properties = service.management().webproperties().list(
accountId=account).execute()
if properties.get('items'):
# Get the first property id.
property = properties.get('items')[0].get('id')
# Get a list of all views (profiles) for the first property.
profiles = service.management().profiles().list(
accountId=account,
webPropertyId=property).execute()
if profiles.get('items'):
# return the first view (profile) id.
return profiles.get('items')[0].get('id')
return None
def filter_main_ns(results):
pages = []
for r in results:
url_title = re.match(r'^/w/([^/]+)$', r[1])
if not url_title:
continue
else:
url_title = url_title.group(1)
if re.match(r'[a-zA-Z0-9_ ]+:.*', url_title):
continue
if re.match('.*\.php', url_title):
continue
match = re.match(r'(.+) - Metakgp Wiki', r[0])
if not match:
continue
if match.group(1) in pages:
continue
#print 'r: ', r
pages.append(match.group(1))
if len(pages) == 10:
break
return pages
def get_popular_pages(service, profile_id):
results = service.data().ga().get(
ids='ga:' + profile_id,
start_date='90daysAgo',
end_date='today',
metrics='ga:uniquePageviews',
dimensions='ga:pageTitle, ga:pagePath',
sort='-ga:uniquePageviews',
max_results=50).execute()
return filter_main_ns(results['rows'])
def get_trending_pages(service, profile_id):
results = service.data().ga().get(
ids='ga:' + profile_id,
start_date='7daysAgo',
end_date='today',
metrics='ga:entrances',
dimensions='ga:pageTitle, ga:landingPagePath',
sort='-ga:entrances',
max_results=50).execute()
return filter_main_ns(results['rows'])
def update_list_of_pages(template, pages):
template_page = pywikibot.Page(pywikibot.Link(template), pywikibot.Site())
text = " <noinclude>This page is automatically generated. Changes will be overwritten, so '''do not modify'''.</noinclude>\n"
for p in pages:
text += "*[[%s]]\n" % p
text = text.rstrip()
if template_page.text == text:
print template, 'unchanged, no edit made.'
return
else:
print template, 'changed:'
print text
#diff = difflib.ndiff(template_page.text.splitlines(1),
# text.splitlines(1))
#for d in diff:
# print d,
template_page.text = text
template_page.save('Updated on ' +
date.today().strftime('%B %d, %Y'))
def main():
# Define the auth scopes to request.
scope = ['https://www.googleapis.com/auth/analytics.readonly']
# Use the developer console and replace the values with your
# service account email and relative location of your key file.
service_account_email = '225416695729-s8pjufb10pkgp269bbvf1hsdimcnmpbn@developer.gserviceaccount.com'
key_file_location = 'ga_credentials.json'
# Authenticate and construct service.
service = get_service('analytics', 'v3', scope, key_file_location,
service_account_email)
profile = get_first_profile_id(service)
popular_pages = get_popular_pages(service, profile)
update_list_of_pages('Template:Popular_pages', popular_pages)
trending_pages = get_trending_pages(service, profile)
update_list_of_pages('Template:Trending_pages', trending_pages)
if __name__ == '__main__':
main()
| 30.964912 | 129 | 0.638338 |
ac1f308efebe93d17e95bf216cc2c791e56420f8 | 2,058 | py | Python | shapenet2kubric/urdf_template.py | ritmps/kubric | ef517ccdedeb304a7a9e77ba109552601a2ae98c | [
"Apache-2.0"
] | null | null | null | shapenet2kubric/urdf_template.py | ritmps/kubric | ef517ccdedeb304a7a9e77ba109552601a2ae98c | [
"Apache-2.0"
] | null | null | null | shapenet2kubric/urdf_template.py | ritmps/kubric | ef517ccdedeb304a7a9e77ba109552601a2ae98c | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 The Kubric Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
URDF_TEMPLATE = """
<robot name="{id}">
<link name="base">
<inertial>
<origin xyz="{center_mass[0]} {center_mass[1]} {center_mass[2]}" />
<mass value="{mass}" />
<inertia ixx="{inertia[0][0]}" ixy="{inertia[0][1]}"
ixz="{inertia[0][2]}" iyy="{inertia[1][1]}"
iyz="{inertia[1][2]}" izz="{inertia[2][2]}" />
</inertial>
<visual>
<origin xyz="{center_mass[0]} {center_mass[1]} {center_mass[2]}" />
<geometry>
<mesh filename="collision_geometry.obj" />
</geometry>
</visual>
<collision>
<origin xyz="{center_mass[0]} {center_mass[1]} {center_mass[2]}" />
<geometry>
<mesh filename="collision_geometry.obj" />
</geometry>
</collision>
</link>
</robot>
"""
| 38.111111 | 79 | 0.635083 |
01d9ee104cb91da9df3cd172f7816bf6eaf703b8 | 4,320 | py | Python | lib/python3.8/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_region.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_region.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_region.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Mikhail Yohman (@FragmentedPacket)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: netbox_region
short_description: Creates or removes regions from Netbox
description:
- Creates or removes regions from Netbox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Mikhail Yohman (@FragmentedPacket)
requirements:
- pynetbox
version_added: "0.1.0"
options:
netbox_url:
description:
- URL of the Netbox instance resolvable by Ansible control host
required: true
type: str
netbox_token:
description:
- The token created within Netbox to authorize API access
required: true
type: str
data:
type: dict
description:
- Defines the region configuration
suboptions:
name:
description:
- Name of the region to be created
required: true
type: str
slug:
description:
- The slugified version of the name or custom slug.
- This is auto-generated following NetBox rules if not provided
required: false
type: str
parent_region:
description:
- The parent region this region should be tied to
required: false
type: raw
required: true
state:
description:
- Use C(present) or C(absent) for adding or removing.
choices: [ absent, present ]
default: present
type: str
query_params:
description:
- This can be used to override the specified values in ALLOWED_QUERY_PARAMS that is defined
- in plugins/module_utils/netbox_utils.py and provides control to users on what may make
- an object unique in their environment.
required: false
type: list
elements: str
validate_certs:
description:
- |
If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
default: true
type: raw
"""
EXAMPLES = r"""
- name: "Test Netbox region module"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create tenant within Netbox with only required information
netbox_region:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: "Test Region One"
state: present
- name: Delete tenant within netbox
netbox_region:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Tenant Group ABC
state: absent
"""
RETURN = r"""
region:
description: Serialized object as created or already existent within Netbox
returned: on creation
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
"""
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_utils import (
NetboxAnsibleModule,
NETBOX_ARG_SPEC,
)
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_dcim import (
NetboxDcimModule,
NB_REGIONS,
)
from copy import deepcopy
def main():
"""
Main entry point for module execution
"""
argument_spec = deepcopy(NETBOX_ARG_SPEC)
argument_spec.update(
dict(
data=dict(
type="dict",
required=True,
options=dict(
name=dict(required=True, type="str"),
slug=dict(required=False, type="str"),
parent_region=dict(required=False, type="raw"),
),
),
)
)
required_if = [("state", "present", ["name"]), ("state", "absent", ["name"])]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_region = NetboxDcimModule(module, NB_REGIONS)
netbox_region.run()
if __name__ == "__main__": # pragma: no cover
main()
| 26.666667 | 97 | 0.653241 |
6c678ab732c4b859e805615a55498d80cbc7b26b | 5,371 | py | Python | homeassistant/components/trafikverket_weatherstation/sensor.py | mhorst314/home-assistant-core | a9bbc42981b4b3188ff696d17379a7d9e083c641 | [
"Apache-2.0"
] | 1 | 2021-09-29T20:33:32.000Z | 2021-09-29T20:33:32.000Z | homeassistant/components/trafikverket_weatherstation/sensor.py | tomachristian/core | 71c8fcee20c55536b33c3ee774c76c1795f37cd2 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/trafikverket_weatherstation/sensor.py | tomachristian/core | 71c8fcee20c55536b33c3ee774c76c1795f37cd2 | [
"Apache-2.0"
] | 2 | 2020-04-19T13:35:24.000Z | 2020-04-19T13:35:51.000Z | """Weather information for air and road temperature (by Trafikverket)."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
from pytrafikverket.trafikverket_weather import TrafikverketWeather
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_API_KEY,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
SPEED_METERS_PER_SECOND,
TEMP_CELSIUS,
UNIT_DEGREE,
UNIT_PERCENTAGE,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Trafikverket"
ATTR_MEASURE_TIME = "measure_time"
ATTR_ACTIVE = "active"
CONF_STATION = "station"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=10)
SCAN_INTERVAL = timedelta(seconds=300)
SENSOR_TYPES = {
"air_temp": [
"Air temperature",
TEMP_CELSIUS,
"air_temp",
"mdi:thermometer",
DEVICE_CLASS_TEMPERATURE,
],
"road_temp": [
"Road temperature",
TEMP_CELSIUS,
"road_temp",
"mdi:thermometer",
DEVICE_CLASS_TEMPERATURE,
],
"precipitation": [
"Precipitation type",
None,
"precipitationtype",
"mdi:weather-snowy-rainy",
None,
],
"wind_direction": [
"Wind direction",
UNIT_DEGREE,
"winddirection",
"mdi:flag-triangle",
None,
],
"wind_direction_text": [
"Wind direction text",
None,
"winddirectiontext",
"mdi:flag-triangle",
None,
],
"wind_speed": [
"Wind speed",
SPEED_METERS_PER_SECOND,
"windforce",
"mdi:weather-windy",
None,
],
"humidity": [
"Humidity",
UNIT_PERCENTAGE,
"humidity",
"mdi:water-percent",
DEVICE_CLASS_HUMIDITY,
],
"precipitation_amount": [
"Precipitation amount",
"mm",
"precipitation_amount",
"mdi:cup-water",
None,
],
"precipitation_amountname": [
"Precipitation name",
None,
"precipitation_amountname",
"mdi:weather-pouring",
None,
],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_STATION): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): [vol.In(SENSOR_TYPES)],
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Trafikverket sensor platform."""
sensor_name = config[CONF_NAME]
sensor_api = config[CONF_API_KEY]
sensor_station = config[CONF_STATION]
web_session = async_get_clientsession(hass)
weather_api = TrafikverketWeather(web_session, sensor_api)
dev = []
for condition in config[CONF_MONITORED_CONDITIONS]:
dev.append(
TrafikverketWeatherStation(
weather_api, sensor_name, condition, sensor_station
)
)
if dev:
async_add_entities(dev, True)
class TrafikverketWeatherStation(Entity):
"""Representation of a Trafikverket sensor."""
def __init__(self, weather_api, name, sensor_type, sensor_station):
"""Initialize the sensor."""
self._client = name
self._name = SENSOR_TYPES[sensor_type][0]
self._type = sensor_type
self._state = None
self._unit = SENSOR_TYPES[sensor_type][1]
self._station = sensor_station
self._weather_api = weather_api
self._icon = SENSOR_TYPES[sensor_type][3]
self._device_class = SENSOR_TYPES[sensor_type][4]
self._weather = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._client} {self._name}"
@property
def icon(self):
"""Icon to use in the frontend."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes of Trafikverket Weatherstation."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_ACTIVE: self._weather.active,
ATTR_MEASURE_TIME: self._weather.measure_time,
}
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Trafikverket and updates the states."""
try:
self._weather = await self._weather_api.async_get_weather(self._station)
self._state = getattr(self._weather, SENSOR_TYPES[self._type][2])
except (asyncio.TimeoutError, aiohttp.ClientError, ValueError) as error:
_LOGGER.error("Could not fetch weather data: %s", error)
| 27.126263 | 86 | 0.646062 |
5cd7d453680329fe415b01a04bb46193ac50cc1b | 694 | py | Python | test_settings.py | praekeltfoundation/mc-freebasics | ad9b2408aa97402a2be6444e619c4533663118fb | [
"BSD-2-Clause"
] | null | null | null | test_settings.py | praekeltfoundation/mc-freebasics | ad9b2408aa97402a2be6444e619c4533663118fb | [
"BSD-2-Clause"
] | 29 | 2016-02-29T11:53:47.000Z | 2018-04-05T07:46:15.000Z | test_settings.py | praekeltfoundation/mc2-freebasics | ad9b2408aa97402a2be6444e619c4533663118fb | [
"BSD-2-Clause"
] | null | null | null | from freebasics.settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'mc2_test.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
DEBUG = True
CELERY_ALWAYS_EAGER = True
def scratchpath(*paths):
return abspath('.scratchpath', *paths)
SCRATCHPATH = scratchpath()
MESOS_MARATHON_HOST = 'http://testserver:8080'
FREE_BASICS_CAS_SERVER_URL = 'http://testserver'
FREE_BASICS_RAVEN_DSN = 'http://test-raven-dsn'
FREE_BASICS_VOLUME_PATH = '/path/to/media/'
HUB_DOMAIN = 'test.com'
| 19.828571 | 65 | 0.622478 |
ed8c048aa14438bd6caf04a09c8924c78984e13c | 3,403 | py | Python | pysot/utils/model_load.py | tsingqguo/AttackTracker | 054268d5afa0044675c7acf1ac13e621f1c9549e | [
"Apache-2.0"
] | 11 | 2020-11-25T16:19:23.000Z | 2022-01-12T08:08:47.000Z | pysot/utils/model_load.py | tsingqguo/AttackTracker | 054268d5afa0044675c7acf1ac13e621f1c9549e | [
"Apache-2.0"
] | null | null | null | pysot/utils/model_load.py | tsingqguo/AttackTracker | 054268d5afa0044675c7acf1ac13e621f1c9549e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import torch
from pysot.core.config import cfg
logger = logging.getLogger('global')
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
# filter 'num_batches_tracked'
missing_keys = [x for x in missing_keys
if not x.endswith('num_batches_tracked')]
if len(missing_keys) > 0:
logger.info('[Warning] missing keys: {}'.format(missing_keys))
logger.info('missing keys:{}'.format(len(missing_keys)))
if len(unused_pretrained_keys) > 0:
logger.info('[Warning] unused_pretrained_keys: {}'.format(
unused_pretrained_keys))
logger.info('unused checkpoint keys:{}'.format(
len(unused_pretrained_keys)))
logger.info('used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, \
'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters
share common prefix 'module.' '''
logger.info('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_pretrain(model, pretrained_path):
logger.info('load pretrained model from {}'.format(pretrained_path))
if cfg.CUDA:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path,
map_location=lambda storage, loc: storage.cuda(device))
else:
pretrained_dict = torch.load(pretrained_path,
map_location=lambda storage, loc: storage)
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'],
'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
try:
check_keys(model, pretrained_dict)
except:
logger.info('[Warning]: using pretrain as features.\
Adding "features." as prefix')
new_dict = {}
for k, v in pretrained_dict.items():
k = 'features.' + k
new_dict[k] = v
pretrained_dict = new_dict
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
def restore_from(model, optimizer, ckpt_path):
if cfg.CUDA:
device = torch.cuda.current_device()
ckpt = torch.load(ckpt_path,
map_location=lambda storage, loc: storage.cuda(device))
else:
ckpt = torch.load(ckpt_path,
map_location=lambda storage, loc: storage)
epoch = ckpt['epoch']
ckpt_model_dict = remove_prefix(ckpt['state_dict'], 'module.')
check_keys(model, ckpt_model_dict)
model.load_state_dict(ckpt_model_dict, strict=False)
check_keys(optimizer, ckpt['optimizer'])
optimizer.load_state_dict(ckpt['optimizer'])
return model, optimizer, epoch
| 36.202128 | 72 | 0.675287 |
2a03f49875931a410ec6bb7d84764b2b2f88c010 | 6,508 | py | Python | spec_mytextrank.py | jjayd/project | 3bc41502bfad04c7ca447ab30e3041f623059d59 | [
"MIT"
] | null | null | null | spec_mytextrank.py | jjayd/project | 3bc41502bfad04c7ca447ab30e3041f623059d59 | [
"MIT"
] | null | null | null | spec_mytextrank.py | jjayd/project | 3bc41502bfad04c7ca447ab30e3041f623059d59 | [
"MIT"
] | null | null | null | from newspaper import Article
from konlpy.tag import Kkma
from konlpy.tag import Twitter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
from datetime import datetime, timedelta
import numpy as np
import hanja
from hanja import hangul
f = open('outtfdf.txt','r')
exceptx=[]
head=""
while True:
line = f.readline()
if not line: break
exceptx.append(line[:-1])
class SentenceTokenizer(object):
def __init__(self):
self.kkma = Kkma()
self.twitter = Twitter()
self.stopwords = ['중인' ,'만큼', '마찬가지', '꼬집었', "연합뉴스", "데일리", "동아일보", "중앙일보", "조선일보", "기자"
,"아", "휴", "아이구", "아이쿠", "아이고", "어", "나", "우리", "저희", "따라", "의해", "을", "를", "에", "의", "가",]
def url2sentences(self, url):
article = Article(url, language='ko')
article.download()
article.parse()
sentences = self.kkma.sentences(article.text)
for idx in range(0, len(sentences)):
if len(sentences[idx]) <= 10:
sentences[idx-1] += (' ' + sentences[idx])
sentences[idx] = ''
return sentences
def text2sentences(self, text):
sentences = self.kkma.sentences(text)
for idx in range(0, len(sentences)):
if len(sentences[idx]) <= 10:
sentences[idx-1] += (' ' + sentences[idx])
sentences[idx] = ''
return sentences
def get_nouns(self, sentences):
nouns = []
head.replace('…',' ').replace('"',' ').replace('.',' ')
line = head.split()
ta=""
for a in line:
if len(a)>1:
ta+=str(a[:2])+" "
nouns.append(ta)
for sentence in sentences:
tmp=""
if sentence is not '':
for n in line:
if n in sentence and len(n)>1:
tmp+=str(n[:2])+" "
for noun in self.twitter.nouns(str(sentence)):
if noun not in self.stopwords and len(noun)>1:
tmp+=str(noun)+" "
nouns.append(tmp)
# tmp.append(noun for noun in self.twitter.nouns(str(sentence))
# if noun not in self.stopwords and len(noun) > 1)
# nouns.append(' '.join([str(n) for n in tmp]))
return nouns
class GraphMatrix(object):
def __init__(self):
self.tfidf = TfidfVectorizer()
self.cnt_vec = CountVectorizer()
self.graph_sentence = []
def build_sent_graph(self, sentence):
tfidf_mat = self.tfidf.fit_transform(sentence).toarray()
self.graph_sentence = np.dot(tfidf_mat, tfidf_mat.T)
return self.graph_sentence
def build_words_graph(self, sentence):
cnt_vec_mat = normalize(self.cnt_vec.fit_transform(sentence).toarray().astype(float), axis=0)
vocab = self.cnt_vec.vocabulary_
return np.dot(cnt_vec_mat.T, cnt_vec_mat), {vocab[word] : word for word in vocab}
class Rank(object):
def get_ranks(self, graph, d=0.85): # d = damping factor
A = graph
matrix_size = A.shape[0]
for id in range(matrix_size):
A[id, id] = 0 # diagonal 부분을 0으로
link_sum = np.sum(A[:,id]) # A[:, id] = A[:][id]
if link_sum != 0:
A[:, id] /= link_sum
A[:, id] *= -d
A[id, id] = 1
B = (1-d) * np.ones((matrix_size, 1))
ranks = np.linalg.solve(A, B) # 연립방정식 Ax = b
return {idx: r[0] for idx, r in enumerate(ranks)}
class TextRank(object):
def __init__(self, text):
self.sent_tokenize = SentenceTokenizer()
if text[:5] in ('http:', 'https'):
self.sentences = self.sent_tokenize.url2sentences(text)
else:
self.sentences = self.sent_tokenize.text2sentences(text)
self.nouns = self.sent_tokenize.get_nouns(self.sentences)
self.graph_matrix = GraphMatrix()
self.sent_graph = self.graph_matrix.build_sent_graph(self.nouns)
self.words_graph, self.idx2word = self.graph_matrix.build_words_graph(self.nouns)
self.rank = Rank()
self.sent_rank_idx = self.rank.get_ranks(self.sent_graph)
self.sorted_sent_rank_idx = sorted(self.sent_rank_idx, key=lambda k: self.sent_rank_idx[k], reverse=True)
self.word_rank_idx = self.rank.get_ranks(self.words_graph)
self.sorted_word_rank_idx = sorted(self.word_rank_idx, key=lambda k: self.word_rank_idx[k], reverse=True)
def summarize(self, sent_num=3):
summary = []
index=[]
for idx in self.sorted_sent_rank_idx[:sent_num]:
index.append(idx)
index.sort()
for idx in index:
summary.append(self.sentences[idx])
return summary
def keywords(self, word_num=30):
rank = Rank()
rank_idx = rank.get_ranks(self.words_graph)
sorted_rank_idx = sorted(rank_idx, key=lambda k: rank_idx[k], reverse=True)
keywords = []
index=[]
for idx in sorted_rank_idx[:word_num]:
index.append(idx)
# index.sort()#####
for idx in index:
keywords.append(self.idx2word[idx])
realkeywords=[]
cnt=0
for i in range(len(keywords)):
if keywords[i] in exceptx:
continue
else:
realkeywords.append(keywords[i])
cnt=cnt+1
if cnt==20:
break
return realkeywords
#url = 'http://v.media.daum.net/v/20170611192209012?rcmd=r'
f2 = open('spec_keywordslist.txt','w')
for i in range(16,17):
for j in range(1,16):
tmp = 'text/news/input'+str(i)+'-'+str(j)+'.txt'
try:
f = open(tmp,'r')
except:
continue
lines=""
headchk=0
while True:
line = f.readline()
if not line:
break
if headchk==0:
head=line
headchk=1
lines+=line
textrank = TextRank(lines)
# for row in textrank.summarize(3):
# print(row)
print(i,j,'keywords: ',textrank.keywords())
for k in textrank.keywords():
f2.write(k)
f2.write(' ')
f2.write('\n')
| 35.178378 | 113 | 0.546865 |
bf728da07918a405b93b293b0cffc1491e3a9d97 | 645 | py | Python | examples/ev3/i2c_basics/main.py | thesynman/pybricks-api | fbbb81caf0703d3d862d5417416adb9295754de0 | [
"MIT"
] | 51 | 2020-04-02T10:03:45.000Z | 2022-03-27T23:49:39.000Z | examples/ev3/i2c_basics/main.py | thesynman/pybricks-api | fbbb81caf0703d3d862d5417416adb9295754de0 | [
"MIT"
] | 77 | 2020-03-22T17:32:14.000Z | 2022-03-28T18:02:43.000Z | examples/ev3/i2c_basics/main.py | thesynman/pybricks-api | fbbb81caf0703d3d862d5417416adb9295754de0 | [
"MIT"
] | 25 | 2020-03-18T23:35:17.000Z | 2022-01-01T12:52:01.000Z | #!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.iodevices import I2CDevice
from pybricks.parameters import Port
# Initialize the EV3
ev3 = EV3Brick()
# Initialize I2C Sensor
device = I2CDevice(Port.S2, 0xD2 >> 1)
# Read one byte from the device.
# For this device, we can read the Who Am I
# register (0x0F) for the expected value: 211.
if 211 not in device.read(0x0F):
raise ValueError("Unexpected I2C device ID")
# To write data, create a bytes object of one
# or more bytes. For example:
# data = bytes((1, 2, 3))
# Write one byte (value 0x08) to register 0x22
device.write(0x22, bytes((0x08,)))
| 26.875 | 48 | 0.733333 |
82906c96eec6b809bc195fb09900560f35aeb062 | 2,233 | py | Python | pwndbg/commands/misc.py | jakuta-tech/pwndbg | 26f7321c6049dbc92ba706ecdf5de2d7ad4a75a3 | [
"MIT"
] | null | null | null | pwndbg/commands/misc.py | jakuta-tech/pwndbg | 26f7321c6049dbc92ba706ecdf5de2d7ad4a75a3 | [
"MIT"
] | null | null | null | pwndbg/commands/misc.py | jakuta-tech/pwndbg | 26f7321c6049dbc92ba706ecdf5de2d7ad4a75a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import errno as _errno
import struct
import gdb
import pwndbg as _pwndbg
import pwndbg.arch as _arch
import pwndbg.commands
import pwndbg.regs
import pwndbg.symbol
_errno.errorcode[0] = 'OK'
parser = argparse.ArgumentParser(description='''
Converts errno (or argument) to its string representation.
''')
parser.add_argument('err', type=int, nargs='?', default=None, help='Errno; if not passed, it is retrieved from __errno_location')
@_pwndbg.commands.ArgparsedCommand(parser)
def errno(err):
if err is None:
# Dont ask.
errno_location = pwndbg.symbol.get('__errno_location')
err = pwndbg.memory.int(errno_location)
# err = int(gdb.parse_and_eval('*((int *(*) (void)) __errno_location) ()'))
err = abs(int(err))
if err >> 63:
err -= (1<<64)
elif err >> 31:
err -= (1<<32)
msg = _errno.errorcode.get(int(err), "Unknown error code")
print("Errno %i: %s" % (err, msg))
parser = argparse.ArgumentParser(description='''
Prints out a list of all pwndbg commands. The list can be optionally filtered if filter_pattern is passed.
''')
parser.add_argument('filter_pattern', type=str, nargs='?', default=None, help='Filter to apply to commands names/docs')
@_pwndbg.commands.ArgparsedCommand(parser)
def pwndbg(filter_pattern):
sorted_commands = list(_pwndbg.commands._Command.commands)
sorted_commands.sort(key=lambda x: x.__name__)
if filter_pattern:
filter_pattern = filter_pattern.lower()
for c in sorted_commands:
name = c.__name__
docs = c.__doc__
if docs: docs = docs.strip()
if docs: docs = docs.splitlines()[0]
if not filter_pattern or filter_pattern in name.lower() or (docs and filter_pattern in docs.lower()):
print("%-20s %s" % (name, docs))
@_pwndbg.commands.ParsedCommand
def distance(a, b):
'''Print the distance between the two arguments'''
a = int(a) & _arch.ptrmask
b = int(b) & _arch.ptrmask
distance = (b-a)
print("%#x->%#x is %#x bytes (%#x words)" % (a, b, distance, distance // _arch.ptrsize))
| 30.175676 | 129 | 0.678012 |
2f866973d8f3c1c0d74ccee05e0cafb510b1b6c2 | 2,795 | py | Python | newrelic_lambda_cli/utils.py | eeroniemi/newrelic-lambda-cli | 53a62bfcdab13dd356bfb7b5f28628f3b72e92f8 | [
"Apache-2.0"
] | 1 | 2020-11-23T14:07:24.000Z | 2020-11-23T14:07:24.000Z | newrelic_lambda_cli/utils.py | eeroniemi/newrelic-lambda-cli | 53a62bfcdab13dd356bfb7b5f28628f3b72e92f8 | [
"Apache-2.0"
] | null | null | null | newrelic_lambda_cli/utils.py | eeroniemi/newrelic-lambda-cli | 53a62bfcdab13dd356bfb7b5f28628f3b72e92f8 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import sys
import boto3
import botocore
import click
NEW_RELIC_ARN_PREFIX_TEMPLATE = "arn:aws:lambda:%s:451483290750"
RUNTIME_CONFIG = {
"nodejs10.x": {"Handler": "newrelic-lambda-wrapper.handler"},
"nodejs12.x": {"Handler": "newrelic-lambda-wrapper.handler"},
"python2.7": {"Handler": "newrelic_lambda_wrapper.handler"},
"python3.6": {"Handler": "newrelic_lambda_wrapper.handler"},
"python3.7": {"Handler": "newrelic_lambda_wrapper.handler"},
"python3.8": {"Handler": "newrelic_lambda_wrapper.handler"},
}
def catch_boto_errors(func):
def _boto_error_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except botocore.exceptions.NoRegionError:
error(
"You must specify a region. Pass `--aws-region` or run `aws configure`."
)
except botocore.exceptions.NoCredentialsError:
error("No AWS credentials configured. Did you run `aws configure`?")
except botocore.exceptions.BotoCoreError as e:
error("Unexpected AWS error: %s" % e)
return _boto_error_wrapper
def get_arn_prefix(region):
return NEW_RELIC_ARN_PREFIX_TEMPLATE % (get_region(region),)
@catch_boto_errors
def get_region(region):
boto_kwargs = {}
if region:
boto_kwargs["region_name"] = region
session = boto3.session.Session(**boto_kwargs)
return session.region_name
@catch_boto_errors
def get_lambda_client(session):
return session.client("lambda")
@catch_boto_errors
def all_lambda_regions():
return boto3.Session().get_available_regions("lambda")
def is_valid_handler(runtime, handler):
runtime_handler = RUNTIME_CONFIG.get(runtime, {}).get("Handler", None)
if isinstance(runtime_handler, dict):
for _, valid_handler in runtime_handler.items():
if handler == valid_handler:
return True
return False
elif handler == runtime_handler:
return True
return False
def error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
sys.exit(1)
def validate_aws_profile(ctx, param, value):
"""A click callback to validate that an AWS profile exists"""
try:
boto3.Session(profile_name=value)
except botocore.exceptions.ProfileNotFound as e:
raise click.BadParameter(e.fmt)
else:
return value
def unique(seq):
"""Returns unique values in a sequence while preserving order"""
seen = set()
# Why assign seen.add to seen_add instead of just calling seen.add?
# Python is a dynamic language, and resolving seen.add each iteration is more costly
# than resolving a local variable.
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
| 29.421053 | 88 | 0.683005 |
01983f91b2c64d366d53e3ac5ebc4538e270c5c8 | 853 | py | Python | deep_trainer/model_predictor.py | BMFriedenstein/StringSounds | ab83f928a0d3f563745f0f5a17a7b20b1f14d73c | [
"MIT"
] | 1 | 2020-03-26T04:00:54.000Z | 2020-03-26T04:00:54.000Z | deep_trainer/model_predictor.py | BMFriedenstein/StringSounds | ab83f928a0d3f563745f0f5a17a7b20b1f14d73c | [
"MIT"
] | null | null | null | deep_trainer/model_predictor.py | BMFriedenstein/StringSounds | ab83f928a0d3f563745f0f5a17a7b20b1f14d73c | [
"MIT"
] | 1 | 2020-03-26T04:00:57.000Z | 2020-03-26T04:00:57.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 3 13:04:38 2019
@author: Brandon
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from PIL import Image
from cnn_model_trainer import X,BuildModel
input_x = []
model_name = "tmp/model"
filename = "test_9992"
optimizer, loss, out = BuildModel()
saver = tf.train.Saver()
with tf.Session() as sess:
pic = Image.open(filename + ".png").convert("RGB")
input_x.append(np.array(pic,dtype= np.float16)/255)
input_x = np.array(input_x, dtype=np.float16)
saver.restore(sess, model_name)
sess.run(out, feed_dict={X: input_x})
print("Model restored.")
np.savetxt(filename + "_out.data", out.eval(feed_dict={X: input_x})[0], delimiter=",", fmt="%.6f") | 29.413793 | 102 | 0.684642 |
ae3a4ed37d1c7b2f041d0b9f5c1b64db46a927d4 | 908 | py | Python | Task1D.py | lhliew/flood-warning | 234bb3f7ec7174fc91963d8b7e64df1893694e1b | [
"MIT"
] | null | null | null | Task1D.py | lhliew/flood-warning | 234bb3f7ec7174fc91963d8b7e64df1893694e1b | [
"MIT"
] | null | null | null | Task1D.py | lhliew/flood-warning | 234bb3f7ec7174fc91963d8b7e64df1893694e1b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 4 10:19:57 2017
@author: user 1
"""
from floodsystem.geo import rivers_with_station
from floodsystem.geo import stations_by_rivers
from floodsystem.stationdata import build_station_list
def run():
stations = build_station_list()
""" Part 1 of Task 1D
"""
a = rivers_with_station(stations)
""" Sorting the list of rivers
"""
b = sorted(a)
print (len(a), '\n')
"""Getting the first 10 entries
"""
print (b[:10], '\n')
""" Part 2 of Task 1D
"""
riverdict = stations_by_rivers(stations)
c= riverdict["River Aire"]
d= riverdict["River Cam"]
e= riverdict["Thames"]
print (sorted(c), '\n' )
print (sorted(d), '\n')
print (sorted(e))
if __name__ == "__main__":
print ("** Task 1D: CUED Part IA Flood Warning System **")
run() | 19.319149 | 62 | 0.580396 |
0898149af14a9bb7665b5ab46f3837b0820a49e7 | 241,841 | py | Python | tensorflow/python/framework/ops.py | autoih/tensorflow | 4a1ae31d56c3c7f40232aace615945c29dcf9c38 | [
"Apache-2.0"
] | 1 | 2020-01-18T17:54:05.000Z | 2020-01-18T17:54:05.000Z | tensorflow/python/framework/ops.py | autoih/tensorflow | 4a1ae31d56c3c7f40232aace615945c29dcf9c38 | [
"Apache-2.0"
] | 3 | 2019-07-25T16:55:56.000Z | 2019-08-01T23:44:31.000Z | tensorflow/python/framework/ops.py | autoih/tensorflow | 4a1ae31d56c3c7f40232aace615945c29dcf9c38 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import sys
import threading
import types
import numpy as np
import six
from six.moves import map # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import kwarg_only
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs.doc_controls import do_not_generate_docs
ag_ctx = LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE
# pylint: enable=protected-access
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
# pylint: disable=protected-access
text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
# pylint: enable=protected-access
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
@tf_export(v1=["enable_tensor_equality"])
def enable_tensor_equality():
"""Compare Tensors with element-wise comparison and thus be unhashable.
Comparing tensors with element-wise allows comparisons such as
tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are
unhashable. Thus tensors can no longer be directly used in sets or as a key in
a dictionary.
"""
Tensor._USE_EQUALITY = True # pylint: disable=protected-access
@tf_export(v1=["disable_tensor_equality"])
def disable_tensor_equality():
"""Compare Tensors by their id and be hashable.
This is a legacy behaviour of TensorFlow and is highly discouraged.
"""
Tensor._USE_EQUALITY = False # pylint: disable=protected-access
@tf_export("Tensor")
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow `tf.compat.v1.Session`.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
`tf.Session.run`.
`t.eval()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.compat.v1.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
# Whether to allow hashing or numpy-style equality
_USE_EQUALITY = tf2.enabled()
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self._as_tf_output().
self._tf_output = None
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
self._id = uid()
self._name = None
@staticmethod
def _create_with_tf_output(op, value_index, dtype, tf_output):
ret = Tensor(op, value_index, dtype)
ret._tf_output = tf_output
return ret
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
`tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vector = [None if d == -1 else d for d in shape_vector]
return tensor_shape.TensorShape(shape_vector)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def _disallow_when_autograph_disabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph is disabled in this function."
" Try decorating it directly with @tf.function.".format(task))
def _disallow_when_autograph_enabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph did not convert this function. Try"
" decorating it directly with @tf.function.".format(task))
def _disallow_in_graph_mode(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed in Graph execution. Use Eager execution or decorate"
" this function with @tf.function.".format(task))
def _disallow_bool_casting(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled(
"using a `tf.Tensor` as a Python `bool`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled(
"using a `tf.Tensor` as a Python `bool`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`")
def _disallow_iteration(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled("iterating over `tf.Tensor`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("iterating over `tf.Tensor`")
def __iter__(self):
if not context.executing_eagerly():
self._disallow_iteration()
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def _maybe_constant_shape(self, gen_array_ops):
"""The shape tuple if fully defined, otherwise op to get shape."""
shape = self._shape_as_list()
if shape is not None and all(x is not None for x in shape):
return shape
return gen_array_ops.shape(self)
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.compat.v1.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
NOTE: This shape is not enforced at runtime. Setting incorrect shapes can
result in inconsistencies between the statically-known graph and the runtime
value of tensors. For runtime validation of the shape, use `tf.ensure_shape`
instead.
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
c_api.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = c_api.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
# NOTE: Beyond preventing unnecessary (re-)allocation, the cached object
# also guarantees that a dictionary of tf_output objects will retain a
# deterministic (yet unsorted) order which prevents memory blowup in the
# cache of executor(s) stored for every session.
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
g = getattr(self, "graph", None)
if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and
(g is None or g._building_function)): # pylint: disable=protected-access
raise TypeError("Tensor is unhashable if Tensor equality is enabled. "
"Instead, use tensor.experimental_ref() as the key.")
else:
return id(self)
def __copy__(self):
# TODO(b/77597810): get rid of Tensor copies.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
def __array__(self):
raise NotImplementedError("Cannot convert a symbolic Tensor ({}) to a numpy"
" array.".format(self.name))
def __len__(self):
raise TypeError("len is not well defined for symbolic Tensors. ({}) "
"Please call `x.shape` rather than `len(x)` for "
"shape information.".format(self.name))
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (most commonly in an `if` or `while`
statement), in code that was not converted by AutoGraph. For example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
def experimental_ref(self):
# tf.Variable also has the same experimental_ref() API. If you update the
# documenation here, please update tf.Variable.experimental_ref() as well.
"""Returns a hashable reference object to this Tensor.
Warning: Experimental API that could be changed or removed.
The primary usecase for this API is to put tensors in a set/dictionary.
We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer
available starting Tensorflow 2.0.
```python
import tensorflow as tf
x = tf.constant(5)
y = tf.constant(10)
z = tf.constant(10)
# The followings will raise an exception starting 2.0
# TypeError: Tensor is unhashable if Tensor equality is enabled.
tensor_set = {x, y, z}
tensor_dict = {x: 'five', y: 'ten', z: 'ten'}
```
Instead, we can use `tensor.experimental_ref()`.
```python
tensor_set = {x.experimental_ref(),
y.experimental_ref(),
z.experimental_ref()}
print(x.experimental_ref() in tensor_set)
==> True
tensor_dict = {x.experimental_ref(): 'five',
y.experimental_ref(): 'ten',
z.experimental_ref(): 'ten'}
print(tensor_dict[y.experimental_ref()])
==> ten
```
Also, the reference object provides `.deref()` function that returns the
original Tensor.
```python
x = tf.constant(5)
print(x.experimental_ref().deref())
==> tf.Tensor(5, shape=(), dtype=int32)
```
"""
return object_identity.Reference(self)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
# __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __int__(self):
return int(self._numpy())
def __long__(self):
return long(self._numpy())
def __float__(self):
return float(self._numpy())
def __index__(self):
return self._numpy().__index__()
def __bool__(self):
return bool(self._numpy())
__nonzero__ = __bool__
def __format__(self, format_spec):
return self._numpy().__format__(format_spec)
def __reduce__(self):
return convert_to_tensor, (self._numpy(),)
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: shape=%s, dtype=%s, numpy=%s>" % (
self.shape, self.dtype.name, numpy_text(self, is_repr=True))
def __len__(self):
"""Returns the length of the first dimension in the Tensor."""
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
# pylint: disable=protected-access
try:
return self._shape_tuple()[0]
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
def _numpy_internal(self):
raise NotImplementedError()
def _numpy(self):
# pylint: disable=protected-access
try:
return self._numpy_internal()
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Returns a numpy array or a scalar with the same contents as the Tensor.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array or a scalar. Numpy array may share memory with the
Tensor object. Any changes to one may be reflected in the other. A scalar
value is returned when self has rank 0.
Raises:
ValueError: if the type of this Tensor is not representable in numpy.
"""
maybe_arr = self._numpy() # pylint: disable=protected-access
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
@property
def backing_device(self):
"""Returns the name of the device holding this tensor's memory.
`.backing_device` is usually the same as `.device`, which returns
the device on which the kernel of the operation that produced this tensor
ran. However, some operations can produce tensors on a different device
(e.g., an operation that executes on the GPU but produces output tensors
in host memory).
"""
raise NotImplementedError()
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _maybe_constant_shape(self, _):
return self.shape
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _num_elements(self):
"""Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor
"""
raise NotImplementedError()
def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name
raise NotImplementedError()
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# pylint: disable=protected-access
try:
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
@deprecation.deprecated(None, "Use tf.identity instead.")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase)
register_dense_tensor_like_type(Tensor)
@tf_export(v1=["convert_to_tensor"])
def convert_to_tensor_v1(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
dtype_hint=None,
ctx=None,
accepted_result_types=(Tensor,)):
"""Implementation of the public convert_to_tensor."""
# TODO(b/142518781): Fix all call-sites and remove redundant arg
preferred_dtype = preferred_dtype or dtype_hint
if isinstance(value, EagerTensor):
if ctx is None:
ctx = context.context()
if not ctx.executing_eagerly():
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, Tensor):
if dtype is not None and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
if preferred_dtype is not None:
preferred_dtype = dtypes.as_dtype(preferred_dtype)
for base_type, conversion_func in tensor_conversion_registry.get(type(value)):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
pass
else:
if (ret is not NotImplemented and
ret.dtype.base_dtype != preferred_dtype.base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype, preferred_dtype.base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, accepted_result_types):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, type(value)))
internal_convert_to_tensor = convert_to_tensor
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor` or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor`, or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accepted_result_types=(Tensor, composite_tensor.CompositeTensor))
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor``, or objects that can be
consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor` and/or `CompositeTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, attrs=None):
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
attrs: Dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type),
name=compat.as_bytes(name))
if attrs:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/>]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/>]*$")
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
c_api.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor`
objects as input, and produces zero or more `Tensor` objects as output.
Objects of type `Operation` are created by calling a Python op constructor
(such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default`
context manager.
For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an
`Operation` of type "MatMul" that takes tensors `a` and `b` as input, and
produces `c` as output.
If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be
executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for
calling `tf.compat.v1.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for
attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and
`device`. The `input` attribute is irrelevant here as it will be
computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the `Tensors`
computed by this operation. The length of this list indicates the
number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a control
dependency.
input_types: List of `DType` objects representing the types of the tensors
accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x
in inputs]`. Operations that expect reference-typed inputs must specify
these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type
that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "SwigPyObject":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# Gradient function for this op. There are three ways to specify gradient
# function, and first available gradient gets used, in the following order.
# 1. self._gradient_function
# 2. Gradient name registered by "_gradient_op_type" attribute.
# 3. Gradient name registered by op.type.
self._gradient_function = None
# Initialize self._c_op.
if c_op:
self._c_op = c_op
op_def = g._get_op_def(c_api.TF_OperationOpType(c_op))
name = self.name
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
op_def, inputs, node_def.attr)
self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
control_input_ops)
name = compat.as_str(node_def.name)
# pylint: enable=protected-access
self._is_stateful = op_def.is_stateful
# Initialize self._outputs.
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
self._outputs = []
for i in range(num_outputs):
tf_output = c_api_util.tf_output(self._c_op, i)
output_type = c_api.TF_OperationOutputType(tf_output)
tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access
self._outputs.append(tensor)
self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing(input_tensors=inputs)
def _control_flow_post_processing(self, input_tensors=None):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
Args:
input_tensors: (Optional.) A list of `Tensors` corresponding to the inputs
of this op, which should be equivalent to `self.inputs`. Pass this
argument to avoid evaluating `self.inputs` unnecessarily.
"""
if input_tensors is None:
input_tensors = self.inputs
for input_tensor in input_tensors:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return c_api.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return c_api.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.compat.v1.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
"""Fast path to set device if the type is known to be a string.
This function is called frequently enough during graph construction that
there are non-trivial performance gains if the caller can guarantee that
the specified device is already a string.
Args:
device_str: A string specifying where to place this op.
"""
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
"""See AddWhileInputHack in python_api.h.
NOTE: This is for TF internal use only. Please don't use it.
Args:
tensors: list of Tensors
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
"""Adds new Tensors to self.outputs.
Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr.
Arguments:
types: list of DTypes
shapes: list of TensorShapes
"""
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
@property
def inputs(self):
"""The sequence of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
# pylint: disable=protected-access
self._inputs_val = tuple(map(self.graph._get_tensor_by_tf_output,
c_api.GetOperationInputs(self._c_op)))
# pylint: enable=protected-access
return self._inputs_val
@property
def _input_types(self):
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return c_api.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationToNodeDef(self._c_op, buf)
data = c_api.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return self._traceback
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = c_api.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
self._set_attr_with_buf(attr_name, buf)
finally:
c_api.TF_DeleteBuffer(buf)
def _set_attr_with_buf(self, attr_name, attr_buf):
"""Set an attr in the node_def with a pre-allocated buffer."""
# pylint: disable=protected-access
c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, attr_buf)
# pylint: enable=protected-access
def _set_func_attr(self, attr_name, func_name):
"""Private method used to set a function attribute in the node_def."""
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
"""Private method used to set a list(function) attribute in the node_def."""
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
"""Private method used to set a list(type) attribute in the node_def."""
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
"""Private method used to set a list(shape) attribute in the node_def."""
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
"""Private method used to clear an attribute in the node_def."""
# pylint: disable=protected-access
c_api.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = c_api.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def _get_attr_type(self, name):
"""Returns the `DType` value of the attr of this op with the given `name`."""
try:
dtype_enum = c_api.TF_OperationGetAttrType(self._c_op, name)
return _DTYPES_INTERN_TABLE[dtype_enum]
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_bool(self, name):
"""Returns the `bool` value of the attr of this op with the given `name`."""
try:
return c_api.TF_OperationGetAttrBool(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_int(self, name):
"""Returns the `int` value of the attr of this op with the given `name`."""
try:
return c_api.TF_OperationGetAttrInt(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
gradient_function = op._gradient_function # pylint: disable=protected-access
if gradient_function:
return gradient_function
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
def set_shape_and_handle_data_for_outputs(_):
"""No op. TODO(b/74620627): Remove this."""
pass
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
Graphs are used by `tf.function`s to represent the function's computations.
Each graph contains a set of `tf.Operation` objects, which represent units of
computation; and `tf.Tensor` objects, which represent the units of data that
flow between operations.
### Using graphs directly (deprecated)
A `tf.Graph` can be constructed and used directly without a `tf.function`, as
was required in TensorFlow 1, but this is deprecated and it is recommended to
use a `tf.function` instead. If a graph is directly used, other deprecated
TensorFlow 1 classes are also required to execute the graph, such as a
`tf.compat.v1.Session`.
A default graph can be registered with the `tf.Graph.as_default` context
manager. Then, operations will be added to the graph instead of being executed
eagerly. For example:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
`tf.compat.v1.get_default_graph()` can be used to obtain the default graph.
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {} # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = {} # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._device_function_stack is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# A map from op type to a gradient function that should be used instead.
self._gradient_function_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = object_identity.ObjectIdentitySet()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None. This is required only for
# backward compatibility with Estimator and optimizer V1 use cases.
self._last_loss_reduction = None
# Flag that is used to indicate whether loss has been scaled by optimizer.
# If this flag has been set, then estimator uses it to scale losss back
# before reporting. This is required only for backward compatibility with
# Estimator and optimizer V1 use cases.
self._is_loss_scaled_by_optimizer = False
self._container = ""
# Set to True if this graph is being built in an
# AutomaticControlDependencies context.
self._add_control_dependencies = False
# Cache for OpDef protobufs retrieved via the C API.
self._op_def_cache = {}
# Cache for constant results of `broadcast_gradient_args()`. The keys are
# tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the
# values are tuples of reduction indices: (rx, ry).
self._bcast_grad_args_cache = {}
# Cache for constant results of `reduced_shape()`. The keys are pairs of
# tuples: (input_shape_tuple, reduction_indices_tuple), and the values
# are pairs of tuples: (output_shape_kept_dims, tile_scaling).
self._reduced_shape_cache = {}
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
c_api.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
"""Scope which defines a variable creation function.
Args:
creator: A callable taking `next_creator` and `kwargs`. See the
`tf.variable_creator_scope` docstring.
priority: Creators with a higher `priority` are called first. Within the
same priority, creators are called inner-to-outer.
Yields:
`_variable_creator_scope` is a context manager with a side effect, but
doesn't return a value.
Raises:
RuntimeError: If variable creator scopes are not properly nested.
"""
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# Sorting is stable, so we'll put higher-priority creators later in the list
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op, op_name):
"""Adds 'op' to the graph and returns the unique ID for the added Operation.
Args:
op: the Operation to add.
op_name: the name of the Operation.
Returns:
An integer that is a unique ID for the added Operation.
"""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
op_id = self._next_id_counter
self._nodes_by_id[op_id] = op
self._nodes_by_name[op_name] = op
self._version = max(self._version, op_id)
return op_id
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphVersions(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.compat.v1.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`.
Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphToGraphDef(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# _DefinedFunction doesn't have a graph, _EagerDefinedFunction
# does. Both rely on ops.py, so we can't really isinstance check
# them.
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
# TODO(b/141471245): Fix the inconsistency when inputs of func graph
# are appended during gradient computation of while/cond.
for input_tensor, _ in zip(func_graph_inputs,
function_def.signature.input_arg):
if input_tensor.dtype == dtypes.resource:
# TODO(allenl): Save and restore handle data, then save the
# resource placeholder's shape. Right now some shape functions get
# confused if we set the shape of the resource placeholder (to a
# scalar of course) and there isn't any handle data.
input_shapes.list.shape.add().CopyFrom(
tensor_shape.TensorShape(None).as_proto())
else:
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return compat.as_str(name) in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient)
# pylint: enable=protected-access
self._functions[compat.as_str(name)] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Creates an `Operation` in this graph.
Implements `Graph.create_op()` without the overhead of the deprecation
wrapper.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, attrs)
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
op._gradient_function = self._gradient_function_map.get(op.type) # pylint: disable=protected-access
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op._is_stateful: # pylint: disable=protected-access
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed to
compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can
also be any object with an `_as_graph_element()` method that returns a
value of one of these types. Note: `_as_graph_element` will be called
inside the graph's lock and so may not modify the graph.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = c_api.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
# NOTE: No locking is required because the lookup and insertion operations
# on Python dictionaries are atomic.
try:
return self._op_def_cache[type]
except KeyError:
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf)
# pylint: enable=protected-access
data = c_api.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
self._op_def_cache[type] = op_def
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
try:
if regex.match(item.name):
c.append(item)
except AttributeError:
# Collection items with no name are ignored.
pass
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.compat.v1.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within the
context, rather than applying all colocation properties on the stack.
If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op = _op_to_colocate_with(op, self)
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
self._colocation_stack.push_obj(op, offset=4)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in the
context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If device scopes are not properly nested.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
# Take advantage of the fact that None is a singleton and Python interns
# strings, since identity checks are faster than equality checks.
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.queue.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.queue.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.queue.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition to the
current control dependencies. None to indicate that the dependencies
should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
if isinstance(op, Tensor):
op = op.experimental_ref()
self._seen_nodes.add(op)
def op_in_group(self, op):
if isinstance(op, Tensor):
op = op.experimental_ref()
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# The hasattr(handle) is designed to match ResourceVariables. This is so
# control dependencies on a variable or on an unread variable don't
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to AttrValue protocol
buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to kernel
label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _override_gradient_function(self, gradient_function_map):
"""Specify gradient function for the given op type."""
# This is an internal API and we don't need nested context for this.
assert not self._gradient_function_map
self._gradient_function_map = gradient_function_map
yield
self._gradient_function_map = {}
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op type
strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
"""For implementing `tf.distribute.set_strategy()`."""
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
@property
def _auto_cast_variable_read_dtype(self):
"""The dtype that instances of `AutoCastVariable` will be casted to.
This is None if `AutoCastVariables` should not be casted.
See `AutoCastVariable` for more information.
Returns:
The dtype that instances of `AutoCastVariable` will be casted to.
"""
if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"):
self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access
return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access
@_auto_cast_variable_read_dtype.setter
def _auto_cast_variable_read_dtype(self, dtype):
if dtype:
dtype = dtypes.as_dtype(dtype)
self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access
@tf_contextlib.contextmanager
def _enable_auto_casting_variables(self, dtype):
"""Context manager to automatically cast AutoCastVariables.
If an AutoCastVariable `var` is used under this context manager, it will be
casted to `dtype` before being used.
See `AutoCastVariable` for more information.
Args:
dtype: The dtype that AutoCastVariables should be casted to.
Yields:
Nothing.
"""
prev_read_dtype = self._auto_cast_variable_read_dtype
try:
self._auto_cast_variable_read_dtype = dtype
yield
finally:
self._auto_cast_variable_read_dtype = prev_read_dtype
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See `tf.Graph.device` for more details.
Args:
device_name_or_function: The device name or function to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
elif executing_eagerly_outside_functions():
@tf_contextlib.contextmanager
def combined(device_name_or_function):
with get_default_graph().device(device_name_or_function):
if not callable(device_name_or_function):
with context.device(device_name_or_function):
yield
else:
yield
return combined(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
"""Specifies the device for ops created/executed in this context.
`device_name` can be fully specified, as in "/job:worker/task:1/device:cpu:0",
or partially specified, containing only a subset of the "/"-separated
fields. Any fields which are specified override device annotations from outer
scopes. For example:
```python
with tf.device('/job:foo'):
# ops created here have devices with /job:foo
with tf.device('/job:bar/task:0/device:gpu:2'):
# ops created here have the fully specified device above
with tf.device('/device:gpu:1'):
# ops created here have the device '/job:foo/device:gpu:1'
```
Args:
device_name: The device name to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If a function is passed in.
"""
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
return device(device_name)
@tf_export(v1=["container"])
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the context.
Can also be `None` to clear the control dependencies. If eager execution
is enabled, any callable object in the `control_inputs` list will be
called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Excute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack():
"""Get the outermost context not building a function."""
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when defining graph functions via
tf.contrib.eager.defun. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.contrib.eager.defun
def func():
# A defun-decorated function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(scope), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
def executing_eagerly_outside_functions():
"""Returns True if executing eagerly, even if inside a graph function."""
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
def inside_function():
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created. It can be used at the beginning of the program for complex migration
projects from TensorFlow 1.x to 2.x.
"""
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def))
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." %
(item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
current_default_graph = get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or current_default_graph
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.compat.v1.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.compat.v1.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.compat.v1.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.compat.v1.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.compat.v1.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
"""Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs.
"""
memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access
# Now clean up Operation<->Graph reference cycles by clearing all of the
# attributes for the Graph and its ops.
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collections. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items without
a `name` attribute are never returned if a scope is supplied and the
choice or `re.match` means that a `scope` without special tokens filters
by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
def name_scope(name, default_name=None, values=None):
"""Internal-only entry point for `name_scope*`.
Internal ops do not use the public API and instead rely on
`ops.name_scope` regardless of the execution mode. This function
dispatches to the correct `name_scope*` implementation based on
the arguments provided and the current mode. Specifically,
* if `values` contains a graph tensor `Graph.name_scope` is used;
* `name_scope_v1` is used in graph mode;
* `name_scope_v2` -- in eager mode.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Returns:
`name_scope*` context manager.
"""
ctx = context.context()
in_eager_mode = ctx.executing_eagerly()
if not in_eager_mode:
return internal_name_scope_v1(name, default_name, values)
name = default_name if name is None else name
if values:
# The presence of a graph tensor in `values` overrides the context.
# TODO(slebedev): this is Keras-specific and should be removed.
# pylint: disable=unidiomatic-typecheck
graph_value = next((value for value in values if type(value) == Tensor),
None)
# pylint: enable=unidiomatic-typecheck
if graph_value is not None:
return graph_value.graph.name_scope(name)
return name_scope_v2(name or "")
class internal_name_scope_v1(object): # pylint: disable=invalid-name
"""Graph-only version of `name_scope_v1`."""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
g = get_default_graph()
if self._values and not g.building_function:
# Specialize based on the knowledge that `_get_graph_from_inputs()`
# ignores `inputs` when building a function.
g_from_inputs = _get_graph_from_inputs(self._values)
if g_from_inputs is not g:
g = g_from_inputs
self._g_manager = g.as_default()
self._g_manager.__enter__()
else:
self._g_manager = None
else:
self._g_manager = None
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
if self._g_manager is not None:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, *exc_info):
self._name_scope.__exit__(*exc_info)
if self._g_manager is not None:
self._g_manager.__exit__(*exc_info)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
class name_scope_v1(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
self._name_scope = name_scope(name, default_name, values)
self._name = default_name if name is None else name
def __enter__(self):
return self._name_scope.__enter__()
def __exit__(self, *exc_info):
return self._name_scope.__exit__(*exc_info)
def enter_eager_name_scope(ctx, name):
"""Updates the eager context to enter the given name scope."""
old_name = ctx.scope_name
if not name:
scope_name = ""
else:
if name.endswith("/"):
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = name
else:
scope_name = name + "/"
if old_name:
scope_name = old_name + scope_name
ctx.scope_name = scope_name
return scope_name, old_name
@tf_export("name_scope", v1=[])
class name_scope_v2(object):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
def __init__(self, name):
"""Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is None, or not a string.
"""
if name is None or not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
self._name = name
self._exit_fns = []
@property
def name(self):
return self._name
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
ctx = context.context()
if ctx.executing_eagerly():
scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name)
self._exit_fns.append(
lambda *a: setattr(ctx, "scope_name", old_scope_name))
else:
scope = get_default_graph().name_scope(self._name)
scope_name = scope.__enter__()
self._exit_fns.append(scope.__exit__)
return scope_name
def __exit__(self, type_arg, value_arg, traceback_arg):
exit_fn = self._exit_fns.pop()
exit_fn(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
def _op_to_colocate_with(v, graph):
"""Operation object corresponding to v to use for colocation constraints."""
if v is None:
return None
if isinstance(v, Operation):
return v
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and isinstance(v.handle, Tensor):
if graph.building_function:
return graph.capture(v.handle).op
else:
return v.handle.op
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
tensor_conversion_registry.register_tensor_conversion_function(
Operation, _operation_conversion_error)
# These symbols were originally defined in this module; import them for
# backwards compatibility until all references have been updated to access
# them from the indexed_slices.py module.
IndexedSlices = indexed_slices.IndexedSlices
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
convert_to_tensor_or_indexed_slices = \
indexed_slices.convert_to_tensor_or_indexed_slices
convert_n_to_tensor_or_indexed_slices = \
indexed_slices.convert_n_to_tensor_or_indexed_slices
internal_convert_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_to_tensor_or_indexed_slices
internal_convert_n_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_n_to_tensor_or_indexed_slices
register_tensor_conversion_function = \
tensor_conversion_registry.register_tensor_conversion_function
# Helper functions for op wrapper modules generated by `python_op_gen`.
def to_raw_op(f):
"""Make a given op wrapper function `f` raw.
Raw op wrappers are not included in the docs, and can only be called
with keyword arguments.
Args:
f: An op wrapper function to make raw.
Returns:
Raw `f`.
"""
# Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail
# due to double-registration.
f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__,
f.__closure__)
return kwarg_only(do_not_generate_docs(f))
def raise_from_not_ok_status(e, name):
message = e.message + (" name: " + name if name is not None else "")
# pylint: disable=protected-access
six.raise_from(core._status_to_exception(e.code, message), None)
# pylint: enable=protected-access
| 36.614837 | 115 | 0.692761 |
0b15bfddf1122edf6aa083e9085c9b49b4878e58 | 1,677 | py | Python | vendor/packages/sqlalchemy/lib/sqlalchemy/connectors/zxJDBC.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | 2 | 2019-08-19T17:08:47.000Z | 2019-10-05T11:37:02.000Z | vendor/packages/sqlalchemy/lib/sqlalchemy/connectors/zxJDBC.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | vendor/packages/sqlalchemy/lib/sqlalchemy/connectors/zxJDBC.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | import sys
from sqlalchemy.connectors import Connector
class ZxJDBCConnector(Connector):
driver = 'zxjdbc'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_unicode_binds = True
supports_unicode_statements = sys.version > '2.5.0+'
description_encoding = None
default_paramstyle = 'qmark'
jdbc_db_name = None
jdbc_driver_name = None
@classmethod
def dbapi(cls):
from com.ziclix.python.sql import zxJDBC
return zxJDBC
def _driver_kwargs(self):
"""Return kw arg dict to be sent to connect()."""
return {}
def _create_jdbc_url(self, url):
"""Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`"""
return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host,
url.port is not None
and ':%s' % url.port or '',
url.database)
def create_connect_args(self, url):
opts = self._driver_kwargs()
opts.update(url.query)
return [
[self._create_jdbc_url(url),
url.username, url.password,
self.jdbc_driver_name],
opts]
def is_disconnect(self, e):
if not isinstance(e, self.dbapi.ProgrammingError):
return False
e = str(e)
return 'connection is closed' in e or 'cursor is closed' in e
def _get_server_version_info(self, connection):
# use connection.connection.dbversion, and parse appropriately
# to get a tuple
raise NotImplementedError()
| 31.641509 | 74 | 0.57901 |
6b21f43f412115563665d6cce034e9b618bdd204 | 193 | py | Python | PyCharm/primer5.py | PervykhDarya/LR3 | 55a4c379c29e167eb3257b4cff4cf9b717906236 | [
"MIT"
] | null | null | null | PyCharm/primer5.py | PervykhDarya/LR3 | 55a4c379c29e167eb3257b4cff4cf9b717906236 | [
"MIT"
] | null | null | null | PyCharm/primer5.py | PervykhDarya/LR3 | 55a4c379c29e167eb3257b4cff4cf9b717906236 | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# -*- cosing: utf-8 -*-
fileprt = open("file2.txt", "r")
content1 = fileptr.readline()
content2 = fileptr.readline()
print(content1)
print(content2)
fileprt.close() | 14.846154 | 32 | 0.673575 |
044a49513e66f77742231d542f9704d937438308 | 21 | py | Python | texturesynth/snrp.py | tochikuji/pyTextureSynth | 6e1746fa1cc931ea083e3f04004a42a4894c762e | [
"MIT"
] | null | null | null | texturesynth/snrp.py | tochikuji/pyTextureSynth | 6e1746fa1cc931ea083e3f04004a42a4894c762e | [
"MIT"
] | null | null | null | texturesynth/snrp.py | tochikuji/pyTextureSynth | 6e1746fa1cc931ea083e3f04004a42a4894c762e | [
"MIT"
] | null | null | null | class SnrP(object):
| 7 | 19 | 0.714286 |
e7afddbc0d55ec9f37709a252aa680d30008349d | 20,038 | py | Python | tests/test_api.py | eriksp/django-drf-filepond | 518c86e7f75be8bec894f90c0b6bfdcbff58eab2 | [
"BSD-3-Clause"
] | null | null | null | tests/test_api.py | eriksp/django-drf-filepond | 518c86e7f75be8bec894f90c0b6bfdcbff58eab2 | [
"BSD-3-Clause"
] | null | null | null | tests/test_api.py | eriksp/django-drf-filepond | 518c86e7f75be8bec894f90c0b6bfdcbff58eab2 | [
"BSD-3-Clause"
] | null | null | null | '''
Tests for the store_upload api function provided by django-drf-filepond
THIS SET OF TESTS WILL TEST LOCAL STORAGE FUNCTIONALITY OF THE store_upload
FUNCTION.
store_upload:
Moves a temporary upload to permanent storage at a location on the local
filesystem or to a remote file store via the django-storages library.
If using a local filestore, the base location where files are stored is
set using the DJANGO_DRF_FILEPOND_FILE_STORE_PATH setting. If using a
remote file store, this setting defines the base location on the remote
file store where files will placed.
'''
import logging
import os
from django.test import TestCase
from django_drf_filepond.api import store_upload
from django_drf_filepond.views import _get_file_id
from django.core.files.uploadedfile import SimpleUploadedFile
from django_drf_filepond.models import TemporaryUpload, StoredUpload
import django_drf_filepond.drf_filepond_settings as local_settings
from django.core.exceptions import ImproperlyConfigured
from django_drf_filepond.api import _store_upload_local
# There's no built in FileNotFoundError, FileExistsError in Python 2
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
try:
FileExistsError
except NameError:
FileExistsError = OSError
# Python 2/3 support
try:
from unittest.mock import patch
except ImportError:
from mock import patch
LOG = logging.getLogger(__name__)
#########################################################################
# Tests for store_upload:
#
# test_store_upload_unset_file_store_path: Call store_upload with the file
# store path in settings unset. An exception should be raised.
#
# test_store_upload_invalid_id: Call store_upload with an invalid ID that
# doesn't fit the required ID format.
#
# test_store_upload_invalid_id_correct_format: Call store_upload with an
# invalid ID that is of the correct format.
#
# test_store_upload_path_none: Call store_upload with a valid ID but path
# set to none.
#
# test_store_upload_path_blank: Call store_upload with a valid ID but path
# set to empty string.
#
# test_store_upload_path_with_dirname: Call store_upload with a valid ID
# and a path which is a directory (with a leading slash) but no target
# filename. The file should end up in the specified location,
# relative to the configured FILE_STORE_PATH, with the file named using
# the original name of the file when it was uploaded.
#
# test_store_upload_path_with_dirname_no_leading_sep: Call store_upload with
# a valid ID and a path including a target location which is a directory
# (with a leading file separator). The file should end up in the specified
# location, relative to the configured FILE_STORE_PATH.
#
# test_store_upload_path_with_filename: Call store_upload with a valid ID
# and a path including a target filename which is different to the
# name of the file when originally uploaded.
#
# test_store_multiple_uploads_to_same_dir: Call store_upload twice with two
# different valid file storage IDs, using the same target directory but
# different filenames for each of the two uploads.
#
# test_store_upload_with_root_path: Call store_upload with the path set to
# '/'. The temporary upload should be stored in the root of the file
# store directory with the name originally provided when it was uploaded.
#
# test_store_upload_local_direct_file_exists: Call _store_upload_local with
# a target file that already exists. Expect a FileExistsError
#
# test_store_upload_local_direct_no_file_store_path: Call store_upload with
# the file store path in settings unset. An exception should be raised.
#
# test_store_upload_local_direct_missing_store_path: Call _store_upload_local
# with a file store directory set that is missing. Expect exception.
#
# test_store_upload_local_copy_to_store_fails: Call _store_upload_local and
# the copy to permanent storage fails - expect exception.
#
class ApiTestCase(TestCase):
def setUp(self):
# Set up an initial file upload
self.upload_id = _get_file_id()
self.upload_id2 = _get_file_id()
self.file_id = _get_file_id()
self.file_content = ('This is some test file data for an '
'uploaded file.')
self.fn = 'my_test_file.txt'
self.test_target_dirname = 'test_storage/'
self.test_target_filename = '/test_storage/testfile.txt'
self.test_target_filename2 = '/test_storage/testfile2.txt'
uploaded_file = SimpleUploadedFile(self.fn,
str.encode(self.file_content))
tu = TemporaryUpload(upload_id=self.upload_id,
file_id=self.file_id,
file=uploaded_file, upload_name=self.fn,
upload_type=TemporaryUpload.FILE_DATA)
tu.save()
tu2 = TemporaryUpload(upload_id=self.upload_id2,
file_id=self.file_id,
file=uploaded_file, upload_name=self.fn,
upload_type=TemporaryUpload.FILE_DATA)
tu2.save()
def test_store_upload_unset_file_store_path(self):
fsp = local_settings.FILE_STORE_PATH
local_settings.FILE_STORE_PATH = None
with self.assertRaisesMessage(
ImproperlyConfigured, 'A required setting is missing in '
'your application configuration.'):
store_upload('hsdfiuysh78sdhiu', '/test_storage/test_file.txt')
local_settings.FILE_STORE_PATH = fsp
def test_store_upload_invalid_id(self):
with self.assertRaisesMessage(ValueError, 'The provided upload ID '
'is of an invalid format.'):
store_upload('hsdfiuysh78sdhiu', '/test_storage/test_file.txt')
# tu = TemporaryUpload.objects.get(upload_id=self.upload_id)
# store_upload(self.upload_id, '/test_storage/%s' % tu.uplod_name)
def test_store_upload_invalid_id_correct_format(self):
with self.assertRaisesMessage(ValueError, 'Record for the specified '
'upload_id doesn\'t exist'):
store_upload('hsdfiuysh78sdhiuf73gds', '/test_storage/test.txt')
def test_store_upload_path_none(self):
with self.assertRaisesMessage(ValueError, 'No destination file '
'path provided.'):
store_upload('hsdfiuysh78sdhiuf73gds', None)
def test_store_upload_path_blank(self):
with self.assertRaisesMessage(ValueError, 'No destination file '
'path provided.'):
store_upload('hsdfiuysh78sdhiuf73gds', '')
def test_store_upload_path_with_filename(self):
test_target_filename = self.test_target_filename
if test_target_filename.startswith(os.sep):
test_target_filename = test_target_filename[1:]
su = store_upload(self.upload_id, test_target_filename)
upload_id = su.upload_id
su = StoredUpload.objects.get(upload_id=upload_id)
LOG.debug('About to check that file path <%s> and stored path <%s> '
'are equal' % (test_target_filename, su.file.name))
self.assertEqual(
test_target_filename, su.file.name,
'File has been stored with wrong filename in the database.')
# Check that the file has actually been stored in the correct
# location and that the temporary file has been deleted
upload_tmp_base = getattr(local_settings, 'UPLOAD_TMP', None)
file_store_path = getattr(local_settings, 'FILE_STORE_PATH', None)
if not file_store_path:
raise ValueError('Couldn\'t access file store path')
file_full_path = os.path.join(file_store_path, su.file.name)
self.assertTrue(os.path.exists(file_full_path) and
os.path.isfile(file_full_path))
self.assertFalse(os.path.exists(
os.path.join(upload_tmp_base, self.upload_id, self.file_id)))
def test_store_upload_with_root_path(self):
test_target_dirname = '/'
su = store_upload(self.upload_id, test_target_dirname)
upload_id = su.upload_id
su = StoredUpload.objects.get(upload_id=upload_id)
target_file_path = os.path.join(test_target_dirname, self.fn)
LOG.debug('About to check that file path <%s> and stored path <%s> '
'are equal' % (target_file_path, su.file.name))
self.assertEqual(
target_file_path[1:], su.file.name,
'File has been stored with wrong filename in the database.')
# Check that the file has actually been stored in the correct
# location and that the temporary file has been deleted
upload_tmp_base = getattr(local_settings, 'UPLOAD_TMP', None)
file_store_path = getattr(local_settings, 'FILE_STORE_PATH', None)
if not file_store_path:
raise ValueError('Couldn\'t access file store path')
file_full_path = os.path.join(file_store_path, su.file.name)
self.assertTrue(os.path.exists(file_full_path) and
os.path.isfile(file_full_path))
self.assertFalse(os.path.exists(
os.path.join(upload_tmp_base, self.upload_id, self.file_id)))
def test_store_upload_path_with_dirname(self):
test_target_dirname = self.test_target_dirname
if test_target_dirname.startswith(os.sep):
test_target_dirname = test_target_dirname[1:]
su = store_upload(self.upload_id, test_target_dirname)
upload_id = su.upload_id
su = StoredUpload.objects.get(upload_id=upload_id)
target_file_path = os.path.join(test_target_dirname, self.fn)
LOG.debug('About to check that file path <%s> and stored path <%s> '
'are equal' % (target_file_path, su.file.name))
self.assertEqual(
target_file_path, su.file.name,
'File has been stored with wrong filename in the database.')
# Check that the file has actually been stored in the correct
# location and that the temporary file has been deleted
upload_tmp_base = getattr(local_settings, 'UPLOAD_TMP', None)
file_store_path = getattr(local_settings, 'FILE_STORE_PATH', None)
if not file_store_path:
raise ValueError('Couldn\'t access file store path')
file_full_path = os.path.join(file_store_path, su.file.name)
self.assertTrue(os.path.exists(file_full_path) and
os.path.isfile(file_full_path))
self.assertFalse(os.path.exists(
os.path.join(upload_tmp_base, self.upload_id, self.file_id)))
def test_store_upload_path_with_dirname_no_leading_sep(self):
test_target_dirname = self.test_target_dirname
if not test_target_dirname.startswith(os.sep):
test_target_dirname = os.sep + test_target_dirname
su = store_upload(self.upload_id, test_target_dirname)
upload_id = su.upload_id
# File should be stored relative to the file store path so the
# leading os.sep should be ignored.
su = StoredUpload.objects.get(upload_id=upload_id)
target_file_path = os.path.join(test_target_dirname, self.fn)
if target_file_path.startswith(os.sep):
target_file_path = target_file_path[1:]
LOG.debug('About to check that file path <%s> and stored path <%s> '
'are equal' % (target_file_path, su.file.name))
self.assertEqual(
target_file_path, su.file.name,
'File has been stored with wrong filename in the database.')
# Check that the file has actually been stored in the correct
# location and that the temporary file has been deleted
upload_tmp_base = getattr(local_settings, 'UPLOAD_TMP', None)
file_store_path = getattr(local_settings, 'FILE_STORE_PATH', None)
if not file_store_path:
raise ValueError('Couldn\'t access file store path')
file_full_path = os.path.join(file_store_path, su.file.name)
self.assertTrue(os.path.exists(file_full_path) and
os.path.isfile(file_full_path))
self.assertFalse(os.path.exists(
os.path.join(upload_tmp_base, self.upload_id, self.file_id)))
def test_store_multiple_uploads_to_same_dir(self):
test_target_filename = self.test_target_filename
test_target_filename2 = self.test_target_filename2
if test_target_filename.startswith(os.sep):
test_target_filename = test_target_filename[1:]
if test_target_filename2.startswith(os.sep):
test_target_filename2 = test_target_filename2[1:]
su = store_upload(self.upload_id, test_target_filename)
su2 = store_upload(self.upload_id2, test_target_filename2)
self.assertEqual(
test_target_filename, su.file.name,
'File has been stored with wrong filename in the database.')
self.assertEqual(
test_target_filename2, su2.file.name,
'File 2 has been stored with wrong filename in the database.')
# Check that the files have actually been stored in the correct
# locations and that the temporary files have been deleted
upload_tmp_base = getattr(local_settings, 'UPLOAD_TMP', None)
file_store_path = getattr(local_settings, 'FILE_STORE_PATH', None)
if not file_store_path:
raise ValueError('Couldn\'t access file store path')
file_full_path = os.path.join(file_store_path, su.file.name)
file_full_path2 = os.path.join(file_store_path, su2.file.name)
# Check first file
self.assertTrue(os.path.exists(file_full_path) and
os.path.isfile(file_full_path))
self.assertFalse(os.path.exists(
os.path.join(upload_tmp_base, self.upload_id, self.file_id)))
# Check second file
self.assertTrue(os.path.exists(file_full_path2) and
os.path.isfile(file_full_path2))
self.assertFalse(os.path.exists(
os.path.join(upload_tmp_base, self.upload_id2, self.file_id)))
def test_store_upload_local_direct_no_file_store_path(self):
fsp = local_settings.FILE_STORE_PATH
local_settings.FILE_STORE_PATH = None
tu = TemporaryUpload.objects.get(upload_id=self.upload_id)
with self.assertRaisesMessage(
ValueError,
'The FILE_STORE_PATH is not set to a directory.'):
_store_upload_local('/test_storage', 'test_file.txt', tu)
local_settings.FILE_STORE_PATH = fsp
def test_store_upload_local_direct_missing_store_path(self):
fsp = local_settings.FILE_STORE_PATH
test_dir = '/tmp/%s' % _get_file_id()
local_settings.FILE_STORE_PATH = test_dir
with self.assertRaisesMessage(
FileNotFoundError,
'The local output directory [%s] defined by '
'FILE_STORE_PATH is missing.' % test_dir):
_store_upload_local('/test_storage', 'test_file.txt', None)
local_settings.FILE_STORE_PATH = fsp
def test_store_upload_local_direct_file_exists(self):
filestore_base = getattr(local_settings, 'FILE_STORE_PATH', None)
target_file_dir = os.path.join(filestore_base, 'test_storage')
os.mkdir(target_file_dir)
with open(os.path.join(target_file_dir, 'testfile.txt'), 'a') as f:
f.write('\n')
tu = TemporaryUpload.objects.get(upload_id=self.upload_id)
with self.assertRaisesMessage(
FileExistsError,
'The specified temporary file cannot be stored to the '
'specified location - file exists.'):
_store_upload_local('/test_storage', 'testfile.txt', tu)
def test_store_upload_local_copy_to_store_fails(self):
tu = TemporaryUpload.objects.get(upload_id=self.upload_id)
with patch('shutil.copy2') as copy2_patch:
with patch('os.path.exists') as exists_patch:
with patch('os.path.isdir') as isdir_patch:
exists_patch.side_effect = [True, False, True]
isdir_patch.return_value = True
copy2_patch.side_effect = IOError(
'Error moving temporary file to permanent storage '
'location')
with self.assertRaisesMessage(
IOError,
'Error moving temporary file to permanent '
'storage location'):
_store_upload_local('/test_storage', 'testfile.txt',
tu)
def tearDown(self):
upload_tmp_base = getattr(local_settings, 'UPLOAD_TMP', None)
filestore_base = getattr(local_settings, 'FILE_STORE_PATH', None)
upload_tmp_dir = os.path.join(upload_tmp_base, self.upload_id)
upload_tmp_dir2 = os.path.join(upload_tmp_base, self.upload_id2)
tmp_files = [(upload_tmp_dir, self.fn), (upload_tmp_dir2, self.fn)]
test_filename = self.test_target_filename
test_filename2 = self.test_target_filename2
test_dirname = self.test_target_dirname
if test_filename.startswith(os.sep):
test_filename = test_filename[1:]
if test_filename2.startswith(os.sep):
test_filename2 = test_filename2[1:]
if test_dirname.startswith(os.sep):
test_dirname = test_dirname[1:]
test_filename_fn = os.path.join(test_dirname, self.fn)
test_target_file = os.path.join(filestore_base, test_filename)
test_target_file2 = os.path.join(filestore_base, test_filename2)
test_target_dir_fn = os.path.join(filestore_base, test_filename_fn)
test_target_file_fn = os.path.join(filestore_base, self.fn)
test_target_dir = os.path.dirname(test_target_file)
for tmp_file in tmp_files:
tmp_file_path = os.path.join(tmp_file[0], tmp_file[1])
if (os.path.exists(tmp_file_path) and
os.path.isfile(tmp_file_path)):
LOG.debug('Removing temporary file: <%s>' % tmp_file_path)
os.remove(tmp_file_path)
if (os.path.exists(tmp_file[0]) and os.path.isdir(tmp_file[0])):
LOG.debug('Removing temporary dir: <%s>' % tmp_file[0])
os.rmdir(tmp_file[0])
# Remove test_target_file
if (os.path.exists(test_target_file) and
os.path.isfile(test_target_file)):
LOG.debug('Removing test target file: <%s>' % test_target_file)
os.remove(test_target_file)
if (os.path.exists(test_target_file2) and
os.path.isfile(test_target_file2)):
LOG.debug('Removing test target file 2:<%s>' % test_target_file2)
os.remove(test_target_file2)
# If a test was run using only the target directory as the file
# target then the file will be stored with self.fn since no target
# filename has been specified, need to check for this case too
if (os.path.exists(test_target_file_fn) and
os.path.isfile(test_target_file_fn)):
LOG.debug('Removing test_target_file_fn:<%s>'
% test_target_file_fn)
os.remove(test_target_file_fn)
if (os.path.exists(test_target_dir_fn) and
os.path.isfile(test_target_dir_fn)):
LOG.debug('Removing test_target_dir_fn:<%s>'
% test_target_dir_fn)
os.remove(test_target_dir_fn)
# Remove directory
if (os.path.exists(test_target_dir) and
os.path.isdir(test_target_dir)):
LOG.debug('Removing test target dir: <%s>' % test_target_dir)
os.rmdir(test_target_dir)
| 48.168269 | 77 | 0.670276 |
f26d3a5a2c10fa369ad15d0276438bb61ab20007 | 2,354 | py | Python | vertica_python/vertica/messages/backend_messages/parameter_status.py | vinceatbluelabs/vertica-python | 0af888b6b30bc1be2f2b497e564b80cb5220ea87 | [
"Apache-2.0"
] | null | null | null | vertica_python/vertica/messages/backend_messages/parameter_status.py | vinceatbluelabs/vertica-python | 0af888b6b30bc1be2f2b497e564b80cb5220ea87 | [
"Apache-2.0"
] | null | null | null | vertica_python/vertica/messages/backend_messages/parameter_status.py | vinceatbluelabs/vertica-python | 0af888b6b30bc1be2f2b497e564b80cb5220ea87 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, division, absolute_import
from struct import unpack
from ..message import BackendMessage
class ParameterStatus(BackendMessage):
message_id = b'S'
def __init__(self, data):
BackendMessage.__init__(self)
null_byte = data.find(b'\x00')
unpacked = unpack('{0}sx{1}sx'.format(null_byte, len(data) - null_byte - 2), data)
self.name = unpacked[0]
self.value = unpacked[1]
def __str__(self):
return "ParameterStatus: {} = {}".format(self.name, self.value)
BackendMessage.register(ParameterStatus)
| 40.586207 | 90 | 0.749363 |
6d3d25a755b90bbd8978e23fd21349cc6e30f129 | 1,377 | py | Python | translator/osc/osc_plugin.py | mail2nsrajesh/heat-translator | 4a37a5680c4159162c6cb44b0059ce003e1f01da | [
"Apache-2.0"
] | 76 | 2015-01-29T20:10:40.000Z | 2021-11-12T06:09:54.000Z | translator/osc/osc_plugin.py | mail2nsrajesh/heat-translator | 4a37a5680c4159162c6cb44b0059ce003e1f01da | [
"Apache-2.0"
] | 2 | 2016-12-22T19:57:47.000Z | 2018-03-26T16:31:59.000Z | translator/osc/osc_plugin.py | mail2nsrajesh/heat-translator | 4a37a5680c4159162c6cb44b0059ce003e1f01da | [
"Apache-2.0"
] | 41 | 2015-01-15T02:42:26.000Z | 2020-08-06T05:09:32.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from translator.osc import utils
DEFAULT_TRANSLATOR_API_VERSION = '1'
API_VERSION_OPTION = 'os_translator_api_version'
API_NAME = 'translator'
API_VERSIONS = {
'1': 'translator.v1.client.Client',
}
def make_client(instance):
# NOTE(stevemar): We don't need a client because
# heat-translator itself is a command line tool
pass
def build_option_parser(parser):
"""Hook to add global options."""
parser.add_argument(
'--os-translator-api-version',
metavar='<translator-api-version>',
default=utils.env(
'OS_TRANSLATOR_API_VERSION',
default=DEFAULT_TRANSLATOR_API_VERSION),
help='Translator API version, default=' +
DEFAULT_TRANSLATOR_API_VERSION +
' (Env: OS_TRANSLATOR_API_VERSION)')
return parser
| 32.785714 | 77 | 0.702977 |
349afdc7c22541cb74cf3e23aa0c7d471794faf2 | 1,604 | py | Python | aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/CancelOperationAuditRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/CancelOperationAuditRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/CancelOperationAuditRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class CancelOperationAuditRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'CancelOperationAudit')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AuditRecordId(self):
return self.get_query_params().get('AuditRecordId')
def set_AuditRecordId(self,AuditRecordId):
self.add_query_param('AuditRecordId',AuditRecordId)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | 36.454545 | 76 | 0.767456 |
5e24b0ef14da6baf002a0423c25a18152dd0db39 | 625 | py | Python | scrapper.py | AhsanSN/Website-Scrapper | f0589624237c1918a6f7002942d755f99e1724d8 | [
"MIT"
] | null | null | null | scrapper.py | AhsanSN/Website-Scrapper | f0589624237c1918a6f7002942d755f99e1724d8 | [
"MIT"
] | null | null | null | scrapper.py | AhsanSN/Website-Scrapper | f0589624237c1918a6f7002942d755f99e1724d8 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
URL = 'https://www.betexplorer.com/soccer/france/ligue-1/results/'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find_all('tr')
for tr in results:
tds = tr.find_all('td')
for td in tds:
print(td.text)
print("--")
'''
title_elem = job_elem.find('h2', class_='title')
company_elem = job_elem.find('div', class_='company')
location_elem = job_elem.find('div', class_='location')
print(title_elem)
print(company_elem)
print(location_elem)
print()
'''
#print(results.length)
| 23.148148 | 66 | 0.6592 |
fd62f8cf6d94ca7f92edcd710e2dda6163bf7813 | 13,325 | py | Python | python/filemgmt/compare_utils.py | lsst-dm/FileMgmt | 31c566553b225db934acf6f1e3d54e48be154660 | [
"NCSA"
] | null | null | null | python/filemgmt/compare_utils.py | lsst-dm/FileMgmt | 31c566553b225db934acf6f1e3d54e48be154660 | [
"NCSA"
] | null | null | null | python/filemgmt/compare_utils.py | lsst-dm/FileMgmt | 31c566553b225db934acf6f1e3d54e48be154660 | [
"NCSA"
] | null | null | null | """Compare files from local disk and DB location tracking based upon an
archive path."""
import os
import time
import despydmdb.desdmdbi as desdmdbi
from . import disk_utils_local as diskutils
from . import db_utils_local as dbutils
class Args(object):
def __init__(self, **kw):
for item, val in kw.items():
setattr(self, item, val)
def validate_args(dbh, args):
"""Make sure command line arguments have valid values.
Parameters
----------
dbh : database connection
connection to use for checking the database related argumetns
args : dict
dictionary containing the command line arguemtns
Returns
-------
string containing the archive root
"""
if args.relpath is not None:
archive_root, archive_path, relpath = dbutils.get_paths_by_path_compare(dbh, args)
operator = None
pfwid = None
elif (args.reqnum and args.unitname and args.attnum) or args.pfwid:
archive_root, archive_path, relpath, state, operator, pfwid = dbutils.get_paths_by_id(dbh, args)
else:
raise Exception("Either relpath, pfwid, or a reqnum/unitname/attnum triplet must be specified.")
# check path exists on disk
if not os.path.exists(archive_path):
print("Warning: Path does not exist on disk: %s" % (archive_path))
if args.verbose:
args.silent = False
return archive_root, archive_path, relpath, operator, pfwid
def print_all_files(comparison_info, files_from_db, files_from_disk):
"""Print both lists of files.
Parameters
----------
comparison_info : dict
Dictionary containing the results of the comparisons
files_from_db : dict
Dicitonary containing the file info from the database
files_from_disk : dict
Dictionary containing the file info from disk
"""
print("db path/name (filesize, md5sum) F disk path/name (filesize, md5sum)")
allfiles = set(files_from_db) | set(files_from_disk)
fdisk_str = ""
for fname in allfiles:
if fname in files_from_db:
finfo = files_from_db[fname]
fullname = "%s/%s" % (finfo['path'], fname)
filesize = None
if 'filesize' in finfo:
filesize = finfo['filesize']
md5sum = None
if 'md5sum' in finfo:
md5sum = finfo['md5sum']
fdb_str = "%s (%s, %s)" % (fullname, filesize, md5sum)
else:
fdb_str = ""
if fname in files_from_disk:
finfo = files_from_disk[fname]
fullname = "%s/%s" % (finfo['relpath'], fname)
filesize = None
if 'filesize' in finfo:
filesize = finfo['filesize']
md5sum = None
if 'md5sum' in finfo:
md5sum = finfo['md5sum']
fdisk_str = "%s (%s, %s)" % (fullname, filesize, md5sum)
else:
fdisk_str = ""
cmp = 'X'
if fname in comparison_info['equal']:
cmp = '='
print("%-140s %s %-140s" % (fdb_str, cmp, fdisk_str))
def diff_files(comparison_info, files_from_db, files_from_disk, check_md5sum, check_filesize, duplicates, db_duplicates):
"""Print only differences in file lists.
Parameters
----------
comparison_info : dict
Dictionary containing the comparisons of disk and db for each file
file_from_db : dict
Dicitonary containing the file info from the database
files_from_disk : dict
Dictionary containing the file info from disk
check_md5sum : bool
Whether or not to report the md5sum comparison
check_filesize : bool
Whether or not to report the filesize comparison
"""
pdup = []
if len(comparison_info['dbonly']) > 0:
print("Files only found in the database --------- ")
for fname in sorted(comparison_info['dbonly']):
fdb = files_from_db[fname]
print("\t%s/%s" % (fdb['path'], fname))
if len(comparison_info['diskonly']) > 0:
print("\nFiles only found on disk --------- ")
for fname in sorted(comparison_info['diskonly']):
addon = ""
if fname in duplicates:
addon = " *"
fdisk = files_from_disk[fname]
print("\t%s/%s%s" % (fdisk['relpath'], fname, addon))
if len(comparison_info['pathdup']) > 0:
print("\n The following files had multiple paths on disk (path filesize):")
listing = {}
for fname in comparison_info['pathdup']:
pdup.append(fname)
listing[comparison_info['pathdup']['relpath']] = comparison_info['pathdup']['filesize']
first = True
for pth in sorted(listing):
start = " "
if first:
start = "*"
first = False
addon = ""
if fname in files_from_db and files_from_db[fname]['path'] == pth:
addon = " (DB Match)"
print(" %s %s/%s %i%s" % (start, pth, d, listing[pth], addon))
if len(comparison_info['path']) > 0:
print("\nPath mismatch (file name, db path, disk path) --------- ")
for fname in sorted(comparison_info['path']):
addon = ""
if fname in duplicates:
addon = " *"
fdb = files_from_db[fname]
fdisk = files_from_disk[fname]
print("\t%s\t%s\t%s%s" % (fname, fdb['path'], fdisk['relpath'], addon))
if len(comparison_info['duplicates']) > 0:
print(" The following files have multiple disk paths on disk (path filesize):")
for fname in comparison_info['duplicates']:
pdup.append(fname)
listing[comparison_info['duplicates']['relpath']] = comparison_info['duplicates']['filesize']
first = True
for pth in sorted(listing):
start = " "
if first:
start = "*"
first = False
addon = ""
if fname in files_from_db and files_from_db[fname]['path'] == pth:
addon = " (DB Match)"
print(" %s %s/%s %i%s" % (start, pth, d, listing[pth], addon))
if len(comparison_info['filesize']) > 0:
print("\nFilesize mismatch (File name, size in DB, size on disk) --------- ")
for fname in sorted(comparison_info['filesize']):
fdb = files_from_db[fname]
fdisk = files_from_disk[fname]
print("\t%s %s %s" % (fname, fdb['filesize'], fdisk['filesize']))
if 'md5sum' in comparison_info and len(comparison_info['md5sum']) > 0:
print("\nmd5sum mismatch (File name, sum in DB, sum on disk) --------- ")
for fname in sorted(comparison_info['md5sum']):
fdb = files_from_db[fname]
fdisk = files_from_disk[fname]
print("\t%s %s %s" % (fname, fdb['md5sum'], fdisk['md5sum']))
if len(duplicates) > len(pdup):
print("\nThe following files have multiple disk paths on disk (path filesize):")
for d in sorted(duplicates):
if d not in pdup:
listing = {}
for f in duplicates[d]:
listing[f['relpath']] = f['filesize']
first = True
for pth in sorted(listing):
start = " "
if first:
start = "*"
first = False
addon = ""
if d in files_from_db and files_from_db[d]['path'] == pth:
addon = " (DB Match)"
print(" %s %s/%s %i%s" % (start, pth, d, listing[pth], addon))
if len(db_duplicates) > 0:
print("\nThe following files have multiple entries in the database (path filesize):")
for d in sorted(db_duplicates):
listing = {}
for f in db_duplicates[d]:
listing[f['relpath']] = f['filesize']
first = True
for pth in sorted(listing):
start = " "
if first:
start = "*"
first = False
addon = ""
if d in files_from_disk and files_from_disk[d]['path'] == pth:
addon = " (Disk Match)"
print(" %s %s/%s %i%s" % (start, pth, d, listing[pth], addon))
def run_compare(args):
if args.dbh is None:
dbh = desdmdbi.DesDmDbi(args.des_services, args.section)
else:
dbh = args.dbh
if args.date_range:
curs = dbh.cursor()
dates = args.date_range.split(',')
if len(dates) == 1:
dr = "submittime>=TO_DATE('%s 00:00:01', 'YYYY-MM-DD HH24:MI:SS') and submittime<=TO_DATE('%s 23:59:59', 'YYYY-MM-DD HH24:MI:SS')" % (
dates[0], dates[0])
else:
dr = "submittime>=TO_DATE('%s 00:00:01', 'YYYY-MM-DD HH24:MI:SS') and submittime<=TO_DATE('%s 23:59:59', 'YYYY-MM-DD HH24:MI:SS')" % (
dates[0], dates[1])
sql = "select id from pfw_attempt where %s" % dr
if args.pipeline:
sql += " and subpipeprod='%s'" % args.pipeline
sql += " order by id"
curs.execute(sql)
res = curs.fetchall()
if not args.silent:
print("Found %i pfw_attempt_id's for given date range" % (len(res)))
count = 0
for pdwi in res:
args.pfwid = pdwi[0]
count += do_compare(dbh, args)
return count
return do_compare(dbh, args)
def do_compare(dbh, args):
"""Main control.
"""
archive_root, archive_path, relpath, operator, pfwid = validate_args(dbh, args)
#print archive_root
if args.debug:
print("From DB")
files_from_db, db_duplicates = dbutils.get_files_from_db(
dbh, relpath, args.archive, pfwid, None, debug=args.debug, quick=args.quick)
if args.debug:
print("From disk")
files_from_disk, duplicates = diskutils.get_files_from_disk(
relpath, archive_root, args.md5sum, args.debug)
if args.debug:
print("Compare")
comparison_info = diskutils.compare_db_disk(
files_from_db, files_from_disk, duplicates, args.md5sum, args.filesize, args.debug, archive_root)
if not args.script and not args.silent:
print("\nPath = %s" % (archive_path))
print("Archive name = %s" % args.archive)
addon = ""
dbaddon = ""
if len(duplicates) > 0:
addon += "(%i are distinct)" % len(files_from_disk)
if len(db_duplicates) > 0:
dbaddon += "(%i are distinct)" % len(files_from_db)
print("Number of files from db = %i %s" % (len(files_from_db) + len(db_duplicates), dbaddon))
print("Number of files from disk = %i %s" % (len(files_from_disk) + len(duplicates), addon))
if len(duplicates) > 0:
print("Files with multiple paths on disk = %i" % len(duplicates))
# print summary of comparison
print("Comparison Summary")
print("\tEqual:\t%i" % len(comparison_info['equal']))
print("\tDB only:\t%i" % len(comparison_info['dbonly']))
print("\tDisk only:\t%i" % len(comparison_info['diskonly']))
print("\tMismatched paths:\t%i" % len(comparison_info['path']))
print("\tMismatched filesize:\t%i" % len(comparison_info['filesize']))
if 'md5sum' in comparison_info:
print("\tMismatched md5sum:\t%i" % len(comparison_info['md5sum']))
print("")
if args.debug:
print_all_files(comparison_info, files_from_db, files_from_disk)
elif args.verbose:
diff_files(comparison_info, files_from_db, files_from_disk,
args.md5sum, args.filesize, duplicates, db_duplicates)
if len(comparison_info['dbonly']) == len(comparison_info['diskonly']) == len(comparison_info['path']) == len(comparison_info['filesize']) == 0:
if 'md5sum' in comparison_info:
if len(comparison_info['md5sum']) != 0:
print("%s ERROR" % loc)
return 1
return 0
return 1
else:
if args.pfwid is not None:
loc = "%s" % (args.pfwid)
elif args.relpath is None:
loc = "%s %s %s" % (args.reqnum, args.unitname, args.attnum)
else:
loc = args.relpath
if len(comparison_info['dbonly']) == len(comparison_info['diskonly']) == len(comparison_info['path']) == len(comparison_info['filesize']) == 0:
if 'md5sum' in comparison_info:
if len(comparison_info['md5sum']) != 0:
if not args.silent:
print("%s ERROR" % loc)
return 1
if not args.silent:
print("%s OK" % loc)
return 0
if not args.silent:
print("%s ERROR" % loc)
return 1
def compare(dbh=None, des_services=None, section=None, archive='desar2home', reqnum=None, unitname=None, attnum=None, relpath=None,
pfwid=None, date_range=None, pipeline=None, filesize=True, md5sum=False, quick=False, debug=False, script=False, verbose=False, silent=True):
return run_compare(Args(**locals()))
| 38.961988 | 153 | 0.562777 |
fa1d6b6079b248effacfdb48e0dcbd912c243110 | 4,575 | py | Python | src/otpy_func.py | thatswhereurwrongkiddo/otpy | c77fe1982bb3c188b45b33d53cebcd7131397ce3 | [
"MIT"
] | null | null | null | src/otpy_func.py | thatswhereurwrongkiddo/otpy | c77fe1982bb3c188b45b33d53cebcd7131397ce3 | [
"MIT"
] | null | null | null | src/otpy_func.py | thatswhereurwrongkiddo/otpy | c77fe1982bb3c188b45b33d53cebcd7131397ce3 | [
"MIT"
] | 1 | 2022-01-28T02:21:42.000Z | 2022-01-28T02:21:42.000Z | import os
from color_storage import txtc_wb, bgc_wb, resetc_wb
def clearscreen():
os.system('cls' if os.name == 'nt' else 'clear')
vernotice()
def checksys():
import platform
from __init__ import title
platsys = platform.system()
if platsys == "Windows":
os.system("title {0}".format(title))
else:
pass
def vernotice():
from __init__ import ver
#print version notice
print("(otpy v{0} PRE-ALPHA TEST VERSION)".format(ver))
print("""
""")
################################################################################
## functions for main.py #######################################################
################################################################################
from sql_names import mm1
name = 0
money = 800
yokes = 0
oxen = 0
food = 0
food_pounds = 0
ammo_price = 0
ammo = 0
spare_parts_price = 0
spare_parts = 0
class Player:
def profile():
global name
name = mm1
def wait_for_alpha():
print(resetc_wb)
clearscreen()
print(txtc_wb + bgc_wb + "You can play the rest of the game when the alpha version 0.1 is released!")
class Store:
global money
def greet():
print("Hello there, {0}! My name is Jack, and this here's my General Store!".format(name))
print("")
print("I see you've got {0} to spend, let's get down to business!".format(money))
print("")
def buy():
print(txtc_wb + bgc_wb + """Your Cart:
Oxen ${0} (Yokes = {4})
Food ${1} ({7} lbs.)
Ammo ${2} (Boxes = {5})
Spare Parts ${3} (Boxes = {6})
""".format(oxen, food, ammo_price, spare_parts_price, yokes, ammo, spare_parts, food_pounds))
choice = input("What would you like to buy? (Type 'checkout' when finished): ")
if choice.lower() == "oxen":
Store.buy_oxen()
if choice.lower() == "food":
Store.buy_food()
if choice.lower() == "ammo":
Store.buy_ammo()
if choice.lower() == "spare parts":
Store.buy_parts()
if choice.lower() == "checkout":
Store.checkout()
def buy_oxen():
print(resetc_wb)
clearscreen()
global yokes
global oxen
print(txtc_wb + bgc_wb + "A yoke is made up of two oxen")
print("I charge $40 for one yoke")
yokes = input("How many yokes would you like to buy?: ")
oxen = int(yokes) * 40
print(resetc_wb)
clearscreen()
Store.buy()
def buy_food():
print(resetc_wb)
clearscreen()
global food
global food_pounds
print(txtc_wb + bgc_wb + "I recommend 200 pounds of food for each person in your party")
print("Since you have 5, that would be 1000 pounds")
print("I sell food at $0.20 per pound")
food_pounds = input("How many pounds would you like to buy?: ")
food = int(food_pounds) * .2
food = int(food)
print(resetc_wb)
clearscreen()
Store.buy()
def buy_ammo():
global ammo
global ammo_price
print(resetc_wb)
clearscreen()
print(txtc_wb + bgc_wb + "Each of my Grade A ammo boxes holds 20 bullets")
print("and each box costs $2.")
print("")
ammo = input("How many boxes do you want to buy?: ")
ammo_price = int(ammo) * 2
print(resetc_wb)
clearscreen()
Store.buy()
def buy_parts():
global spare_parts
global spare_parts_price
print(resetc_wb)
clearscreen()
print(txtc_wb + bgc_wb + "That wagon is bound to break on you along the way to Oregon now, son.")
print("I strongly recommend getting at least 2 boxes of spare parts in case anything is to happen")
print("I sell each box for $50 each")
spare_parts = input("How many boxes would you like to buy?: ")
spare_parts_price = int(spare_parts) * 50
print(resetc_wb)
clearscreen()
Store.buy()
def checkout():
global oxen
global food
global ammo_price
global spare_parts_price
total = oxen + food + ammo_price + spare_parts_price
print("Your total bill is: {0}".format(total))
pay_now = input("Do you wish to pay now? (yes/no): ")
if pay_now.lower() == "yes":
global money
money = money - total
Player.wait_for_alpha()
if pay_now.lower() == "no":
print(resetc_wb)
clearscreen()
Store.buy()
class Trip:
pass
| 32.21831 | 109 | 0.554317 |
e3285680db4469571c97424c9694d3d27f7ca77e | 1,452 | py | Python | preprocess.py | taxio/photo_mosaic | e298927174a251f272ffe83dc08216fa4deacde4 | [
"MIT"
] | null | null | null | preprocess.py | taxio/photo_mosaic | e298927174a251f272ffe83dc08216fa4deacde4 | [
"MIT"
] | 3 | 2018-03-23T12:35:07.000Z | 2018-03-25T04:15:40.000Z | preprocess.py | taxio/photo_mosaic | e298927174a251f272ffe83dc08216fa4deacde4 | [
"MIT"
] | null | null | null | from PIL import Image, ImageStat
from progressbar import ProgressBar
import photo_mosaic.util
from photo_mosaic import dbmanager
class PreProcess:
def __init__(self, dbname: str):
self._db = dbmanager.DBManager(dbname=dbname)
def drop_table(self):
self._db.drop_table()
def insert_image(self, img_name):
new_material = self.calc_mean(img_name)
if new_material:
self._db.insert_material(new_material)
def calc_mean(self, img_name):
try:
img = Image.open(img_name)
except OSError as e:
return None
img = photo_mosaic.util.convert_to_rgb_image(img)
img = photo_mosaic.util.trim_into_square(img)
stat = ImageStat.Stat(img)
new_material = dbmanager.MaterialImage(name=img_name, R=stat.mean[0], G=stat.mean[1], B=stat.mean[2])
return new_material
def calc_all(self, img_dir_path: str):
image_names = photo_mosaic.util.get_image_names(img_dir_path)
print('calculation images start')
pbar = ProgressBar(max_value=len(image_names))
new_materials = list()
for idx, image_name in enumerate(image_names):
pbar.update(idx)
new_material = self.calc_mean(image_name)
if new_material:
new_materials.append(new_material)
self._db.insert_all_material(new_materials)
pbar.finish()
print('finish')
| 30.893617 | 109 | 0.657025 |
2ad8f1411d821e4de740bef9dcb488629e138311 | 31 | py | Python | finitediff/_release.py | bjodah/finitediff | bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8 | [
"BSD-2-Clause"
] | 27 | 2016-09-14T11:40:35.000Z | 2022-03-05T18:48:26.000Z | finitediff/_release.py | tutoushaonian/finitediff | bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8 | [
"BSD-2-Clause"
] | 4 | 2016-04-08T03:55:14.000Z | 2018-06-27T11:18:58.000Z | finitediff/_release.py | tutoushaonian/finitediff | bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8 | [
"BSD-2-Clause"
] | 5 | 2017-05-25T06:50:40.000Z | 2021-09-13T14:16:59.000Z | __version__ = "0.6.5.dev0+git"
| 15.5 | 30 | 0.677419 |
037c6fe5f03c29e3d419d70fe4ec74ce634edbea | 2,950 | py | Python | selfdrive/controls/lib/pid.py | LOVEChen/raspberry-pilot | 6223103f72ec5692dab7944bf420c763383b1fe9 | [
"MIT"
] | 1 | 2020-12-04T17:43:50.000Z | 2020-12-04T17:43:50.000Z | selfdrive/controls/lib/pid.py | cryptonome/raspberry-pilot | 4f01dcad4629577bcb268118375fae7473103a53 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/pid.py | cryptonome/raspberry-pilot | 4f01dcad4629577bcb268118375fae7473103a53 | [
"MIT"
] | null | null | null | import numpy as np
from common.numpy_fast import clip, interp
def apply_deadzone(error, deadzone):
if error > deadzone:
error -= deadzone
elif error < - deadzone:
error += deadzone
else:
error = 0.
return error
class PIController(object):
def __init__(self, k_p, k_i, k_f=1., pos_limit=None, neg_limit=None, rate=100, sat_limit=0.8, convert=None):
self._k_p = k_p # proportional gain
self._k_i = k_i # integral gain
self.k_f = k_f # feedforward gain
self.pos_limit = 1.0
self.neg_limit = -1.0
self.sat_count_rate = 1.0 / rate
self.i_unwind_rate = 0.3 / rate
self.i_rate = 1.0 / rate
self.sat_limit = sat_limit
self.convert = convert
self.reset()
@property
def k_p(self):
return interp(self.speed, self._k_p[0], self._k_p[1])
@property
def k_i(self):
return interp(self.speed, self._k_i[0], self._k_i[1])
def _check_saturation(self, control, override, error):
saturated = (control < self.neg_limit) or (control > self.pos_limit)
if saturated and not override and abs(error) > 0.1:
self.sat_count += self.sat_count_rate
else:
self.sat_count -= self.sat_count_rate
self.sat_count = clip(self.sat_count, 0.0, 1.0)
return self.sat_count > self.sat_limit
def reset(self):
self.p = 0.0
self.p2 = 0.0
self.i = 0.0
self.f = 0.0
self.sat_count = 0.0
self.saturated = False
self.control = 0
def update(self, setpoint, measurement, speed=0.0, check_saturation=True, override=False, feedforward=0., deadzone=0., freeze_integrator=False, add_error=0.0, p_scale=1.0):
self.speed = speed
error = float(apply_deadzone(setpoint - measurement, deadzone))
self.p = error * self.k_p * p_scale
self.p2 = add_error * self.k_p
self.f = feedforward * self.k_f
if override and not self.saturated:
self.i -= self.i_unwind_rate * float(np.sign(self.i))
else:
i = self.i + error * self.k_i * self.i_rate
control = self.p + self.p2 + self.f + i
if self.convert is not None:
control = self.convert(control, speed=self.speed)
# Update when changing i will move the control away from the limits
# or when i will move towards the sign of the error
if ((error >= 0 and (control <= self.pos_limit or i < 0.0)) or \
(error <= 0 and (control >= self.neg_limit or i > 0.0))) and \
not freeze_integrator and not error * add_error < 0:
self.i = i
control = self.p + self.p2 + self.f + self.i
if self.convert is not None:
control = self.convert(control, speed=self.speed)
if check_saturation:
self.saturated = self._check_saturation(control, override, (error + add_error))
else:
self.saturated = False
self.control = clip(control, self.neg_limit, self.pos_limit)
return self.control
| 31.382979 | 175 | 0.633898 |
fa9bc798d06b5b88177d46f0fa4fc35dba4e98f7 | 785 | py | Python | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716220657.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716220657.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716220657.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """ Decorator Parametors
In the previous ideos we saw some built-in decorators that can handle some arguments:
@wraps(fn) @lru_cache(maxsize=256) <\
def inner(): def factorial(n): \
... ... \>function call
This should look quite differient grom the decorators we have been creating and using:
@timed <----------- no function call
def Fibonacci(n):
...
"""
from unittest import result
def timed(fn):
from time import perf_counter
def inner(*arhs, **kwarrgs):
total_elapse = 0
for i in range(10):
start = perf_counter()
result = fn(*args, **kwargs)
total_elapsed += (perf_counter() - start)
avg_ela
"""
""" | 23.088235 | 86 | 0.546497 |
de624e13ae4d09789c2a6e277c569400869fb618 | 10,301 | py | Python | pybda/testing/old/test_bda_03_ave.py | OxfordSKA/bda | 0f92bead29a26d18fddaf5eb2ecd6f6d92401c2d | [
"BSD-3-Clause"
] | 2 | 2019-11-22T18:39:49.000Z | 2020-03-13T04:03:50.000Z | pybda/testing/old/test_bda_03_ave.py | OxfordSKA/bda | 0f92bead29a26d18fddaf5eb2ecd6f6d92401c2d | [
"BSD-3-Clause"
] | null | null | null | pybda/testing/old/test_bda_03_ave.py | OxfordSKA/bda | 0f92bead29a26d18fddaf5eb2ecd6f6d92401c2d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import numpy as np
import pickle
import progressbar
import matplotlib.pyplot as plt
def load_ms(ms):
dtype = [('uu', 'f8'), ('vv', 'f8'), ('ww', 'f8'),
('time', 'f8'), ('time_c', 'f8'),
('a1', 'i4'), ('a2', 'i4'), ('data', 'c16'), ('t', 'f8')]
tb.open(ms)
num_rows = tb.nrows()
values = np.zeros((num_rows,), dtype=dtype)
values['data'] = np.squeeze(tb.getcol('DATA'))
values['a1'] = np.squeeze(tb.getcol('ANTENNA1'))
values['a2'] = np.squeeze(tb.getcol('ANTENNA2'))
uvw = tb.getcol('UVW')
values['uu'] = uvw[0, :]
values['vv'] = uvw[1, :]
values['ww'] = uvw[2, :]
time = tb.getcol('TIME')
values['time'] = time
time -= time[0]
values['t'] = time
time_c = tb.getcol('TIME_CENTROID')
values['time_c'] = time_c
tb.close()
return values
def load_ms_2(ms):
dtype = [('uu', 'f8'), ('vv', 'f8'), ('ww', 'f8'),
('time', 'f8'), ('time_c', 'f8'), ('exposure', 'f8'),
('interval', 'f8'), ('t', 'f8'),
('sigma', 'f8'), ('weight', 'i4'),
('a1', 'i4'), ('a2', 'i4'),
('data', 'c16'),
('model_data', 'c16'), ('corrected_data', 'c16')]
tb.open(ms)
num_rows = tb.nrows()
values = np.zeros((num_rows,), dtype=dtype)
uvw = tb.getcol('UVW')
values['uu'] = uvw[0, :]
values['vv'] = uvw[1, :]
values['ww'] = uvw[2, :]
time = tb.getcol('TIME')
values['time'] = time
time -= time[0]
values['t'] = time
time_c = tb.getcol('TIME_CENTROID')
values['time_c'] = time_c
values['exposure'] = tb.getcol('EXPOSURE')
values['interval'] = tb.getcol('INTERVAL')
values['sigma'] = tb.getcol('SIGMA')
values['weight'] = tb.getcol('WEIGHT')
values['a1'] = np.squeeze(tb.getcol('ANTENNA1'))
values['a2'] = np.squeeze(tb.getcol('ANTENNA2'))
values['data'] = np.squeeze(tb.getcol('DATA'))
values['model_data'] = np.squeeze(tb.getcol('MODEL_DATA'))
values['corrected_data'] = np.squeeze(tb.getcol('CORRECTED_DATA'))
tb.close()
return values
test_model = True
test_model_ave = True
test_corrupted = True
test_corrupted_ave = True
test_calibrated = True
test_calibrated_bda = True
dir_default = 'f2f_default'
dir_new = 'f2f_new'
if test_model:
print 'Model:'
default_ms = os.path.join(dir_default, 'vis', 'model.ms')
new_ms = os.path.join(dir_new, 'vis', 'model.ms')
default = load_ms(default_ms)
new = load_ms(new_ms)
print '*' * 60
print default.shape
print new.shape
print '*' * 60
print 'Diffs:'
print ' DATA :', np.max(np.abs(default['data'] - new['data']))
print ' TIME :', np.max(np.abs(default['time'] - new['time']))
print ' TIMEC :', np.max(np.abs(default['time_c'] - new['time_c']))
print ' UU :', np.max(np.abs(default['uu'] - new['uu']))
print ' VV :', np.max(np.abs(default['vv'] - new['vv']))
print ' WW :', np.max(np.abs(default['ww'] - new['ww']))
print ' ANT1 :', np.max(np.abs(default['a1'] - new['a1']))
print ' ANT2 :', np.max(np.abs(default['a2'] - new['a2']))
print ''
if test_model_ave:
print 'Model ave:'
default_ms = os.path.join(dir_default, 'vis', 'model_bda.ms')
new_ms = os.path.join(dir_new, 'vis', 'model_bda.ms')
default = load_ms(default_ms)
new = load_ms(new_ms)
print '*' * 60
print default.shape
print new.shape
print '*' * 60
# diff_time = np.abs(default['time'] - new['time'])
# plt.plot(diff_time[1250:1300], '+')
# plt.show()
# diff_data = np.abs(default['data'] - new['data'])
# print len(diff_time[diff_time > 1.0])
# print default['time'][1000]
# print new['time'][1000]
# plt.plot(diff_data[0:10000], '+')
# plt.show()
print 'Diffs:'
print ' DATA :', np.max(np.abs(default['data'] - new['data']))
print ' TIME :', np.max(np.abs(default['time'] - new['time']))
print ' TIMEC :', np.max(np.abs(default['time_c'] - new['time_c']))
print ' T :', np.max(np.abs(default['t'] - new['t']))
print ' UU :', np.max(np.abs(default['uu'] - new['uu']))
print ' VV :', np.max(np.abs(default['vv'] - new['vv']))
print ' WW :', np.max(np.abs(default['ww'] - new['ww']))
print ' ANT1 :', np.max(np.abs(default['a1'] - new['a1']))
print ' ANT2 :', np.max(np.abs(default['a2'] - new['a2']))
print ''
if test_corrupted:
print 'Corrupted:'
default_ms = os.path.join(dir_default, 'vis', 'corrupted.ms')
new_ms = os.path.join(dir_new, 'vis', 'corrupted.ms')
default = load_ms_2(default_ms)
new = load_ms_2(new_ms)
print '*' * 60
print default.shape
print new.shape
print '*' * 60
print 'Diffs:'
print ' DATA :', np.max(np.abs(default['data'] - new['data']))
print ' MODEL :', np.max(np.abs(default['model_data'] - new['model_data']))
print ' CORR :', np.max(np.abs(default['corrected_data'] - new['corrected_data']))
print ' TIME :', np.max(np.abs(default['time'] - new['time']))
print ' TIMEC :', np.max(np.abs(default['time_c'] - new['time_c']))
print ' T :', np.max(np.abs(default['t'] - new['t']))
print ' EXPO :', np.max(np.abs(default['exposure'] - new['exposure']))
print ' INTER :', np.max(np.abs(default['interval'] - new['interval']))
print ' SIGMA :', np.max(np.abs(default['sigma'] - new['sigma']))
print ' WEIGH :', np.max(np.abs(default['weight'] - new['weight']))
print ' UU :', np.max(np.abs(default['uu'] - new['uu']))
print ' VV :', np.max(np.abs(default['vv'] - new['vv']))
print ' WW :', np.max(np.abs(default['ww'] - new['ww']))
print ' ANT1 :', np.max(np.abs(default['a1'] - new['a1']))
print ' ANT2 :', np.max(np.abs(default['a2'] - new['a2']))
print ''
if test_corrupted_ave:
print 'Corrupted ave:'
default_ms = os.path.join(dir_default, 'vis', 'corrupted_bda.ms')
new_ms = os.path.join(dir_new, 'vis', 'corrupted_bda.ms')
default = load_ms_2(default_ms)
new = load_ms_2(new_ms)
print '*' * 60
print default.shape
print new.shape
print '*' * 60
print 'Diffs:'
print ' DATA :', np.max(np.abs(default['data'] - new['data']))
print ' MODEL :', np.max(np.abs(default['model_data'] - new['model_data']))
print ' CORR :', np.max(np.abs(default['corrected_data'] - new['corrected_data']))
print ' TIME :', np.max(np.abs(default['time'] - new['time']))
print ' TIMEC :', np.max(np.abs(default['time_c'] - new['time_c']))
print ' T :', np.max(np.abs(default['t'] - new['t']))
print ' EXPO :', np.max(np.abs(default['exposure'] - new['exposure']))
print ' INTER :', np.max(np.abs(default['interval'] - new['interval']))
print ' SIGMA :', np.max(np.abs(default['sigma'] - new['sigma']))
print ' WEIGH :', np.max(np.abs(default['weight'] - new['weight']))
print ' UU :', np.max(np.abs(default['uu'] - new['uu']))
print ' VV :', np.max(np.abs(default['vv'] - new['vv']))
print ' WW :', np.max(np.abs(default['ww'] - new['ww']))
print ' ANT1 :', np.max(np.abs(default['a1'] - new['a1']))
print ' ANT2 :', np.max(np.abs(default['a2'] - new['a2']))
print ''
if test_calibrated:
print 'Calibrated:'
default_ms = os.path.join(dir_default, 'vis', 'calibrated.ms')
new_ms = os.path.join(dir_new, 'vis', 'calibrated.ms')
default = load_ms_2(default_ms)
new = load_ms_2(new_ms)
print '*' * 60
print default.shape
print new.shape
print '*' * 60
print 'Diffs:'
print ' DATA :', np.max(np.abs(default['data'] - new['data']))
print ' MODEL :', np.max(np.abs(default['model_data'] - new['model_data']))
print ' CORR :', np.max(np.abs(default['corrected_data'] - new['corrected_data']))
print ' TIME :', np.max(np.abs(default['time'] - new['time']))
print ' TIMEC :', np.max(np.abs(default['time_c'] - new['time_c']))
print ' T :', np.max(np.abs(default['t'] - new['t']))
print ' EXPO :', np.max(np.abs(default['exposure'] - new['exposure']))
print ' INTER :', np.max(np.abs(default['interval'] - new['interval']))
print ' SIGMA :', np.max(np.abs(default['sigma'] - new['sigma']))
print ' WEIGH :', np.max(np.abs(default['weight'] - new['weight']))
print ' UU :', np.max(np.abs(default['uu'] - new['uu']))
print ' VV :', np.max(np.abs(default['vv'] - new['vv']))
print ' WW :', np.max(np.abs(default['ww'] - new['ww']))
print ' ANT1 :', np.max(np.abs(default['a1'] - new['a1']))
print ' ANT2 :', np.max(np.abs(default['a2'] - new['a2']))
print ''
if test_calibrated_bda:
print 'Calibrated ave:'
default_ms = os.path.join(dir_default, 'vis', 'calibrated_bda.ms')
new_ms = os.path.join(dir_new, 'vis', 'calibrated_bda.ms')
default = load_ms_2(default_ms)
new = load_ms_2(new_ms)
print '*' * 60
print default.shape
print new.shape
print '*' * 60
diff = np.abs(default['corrected_data'] - new['corrected_data'])
# plt.plot(diff[0:10000], '+')
plt.plot(diff, '.')
plt.show()
print 'Diffs:'
print ' DATA :', np.max(np.abs(default['data'] - new['data']))
print ' MODEL :', np.max(np.abs(default['model_data'] - new['model_data']))
print ' CORR :', np.max(np.abs(default['corrected_data'] - new['corrected_data']))
print ' TIME :', np.max(np.abs(default['time'] - new['time']))
print ' TIMEC :', np.max(np.abs(default['time_c'] - new['time_c']))
print ' T :', np.max(np.abs(default['t'] - new['t']))
print ' EXPO :', np.max(np.abs(default['exposure'] - new['exposure']))
print ' INTER :', np.max(np.abs(default['interval'] - new['interval']))
print ' SIGMA :', np.max(np.abs(default['sigma'] - new['sigma']))
print ' WEIGH :', np.max(np.abs(default['weight'] - new['weight']))
print ' UU :', np.max(np.abs(default['uu'] - new['uu']))
print ' VV :', np.max(np.abs(default['vv'] - new['vv']))
print ' WW :', np.max(np.abs(default['ww'] - new['ww']))
print ' ANT1 :', np.max(np.abs(default['a1'] - new['a1']))
print ' ANT2 :', np.max(np.abs(default['a2'] - new['a2']))
print ''
| 38.01107 | 88 | 0.557713 |
2e392c87f1249d3e6f4dd16507348f23c2faeba3 | 6,618 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/network/aci/aci_l3out_route_tag_policy.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | venv/lib/python2.7/site-packages/ansible/modules/network/aci/aci_l3out_route_tag_policy.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | venv/lib/python2.7/site-packages/ansible/modules/network/aci/aci_l3out_route_tag_policy.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_l3out_route_tag_policy
short_description: Manage route tag policies (l3ext:RouteTagPol)
description:
- Manage route tag policies on Cisco ACI fabrics.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information about the internal APIC class B(l3ext:RouteTagPol) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
rtp:
description:
- The name of the route tag policy.
required: yes
aliases: [ name, rtp_name ]
description:
description:
- The description for the route tag policy.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
tag:
description:
- The value of the route tag (range 0-4294967295).
- The APIC defaults to C(4294967295) when unset during creation.
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_l3out_route_tag_policy:
host: apic
username: admin
password: SomeSecretPassword
rtp: '{{ rtp_name }}'
tenant: production
tag: '{{ tag }}'
description: '{{ description }}'
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
rtp=dict(type='str', required=False, aliases=['name', 'rtp_name']), # Not required for querying all objects
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for quering all objects
description=dict(type='str', aliases=['descr']),
tag=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['rtp', 'tenant']],
['state', 'present', ['rtp', 'tenant']],
],
)
rtp = module.params['rtp']
description = module.params['description']
tag = module.params['tag']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='l3extRouteTagPol',
aci_rn='rttag-{0}'.format(rtp),
filter_target='eq(l3extRouteTagPol.name, "{0}")'.format(rtp),
module_object=rtp,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='l3extRouteTagPol',
class_config=dict(
name=rtp,
descr=description, tag=tag,
),
)
aci.get_diff(aci_class='l3extRouteTagPol')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| 27.575 | 141 | 0.600937 |
fd41909e98290de237b69a7de04e4eb01dde5e16 | 1,435 | py | Python | app/views.py | texib/costfish | 4cd596cad19200139d8eba1e73c315457a72179a | [
"MIT"
] | null | null | null | app/views.py | texib/costfish | 4cd596cad19200139d8eba1e73c315457a72179a | [
"MIT"
] | null | null | null | app/views.py | texib/costfish | 4cd596cad19200139d8eba1e73c315457a72179a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from app import app
@app.route('/')
@app.route('/index')
def index():
import numpy as np
import json
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
json_key = json.load(open('./costfish-3a4d1968df8f.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
gc = gspread.authorize(credentials)
wks = gc.open('costfish')
sheet = wks.sheet1
list_of_lists = sheet.get_all_values()
import pandas as pd
columns_name=['Date','牛','羊','豬','雞','魚','貝','蟹','軟絲','高麗菜','空心菜','絲瓜','南瓜','西瓜','柳丁','鳯梨','百合','蓮子','紅棗']
df = pd.DataFrame(list_of_lists[1:],columns=columns_name,dtype=float)
nparray = df.as_matrix()
rows , columns = nparray.shape
lastPriceList = nparray[rows-1][1:]
last2ThPriceList =nparray[rows-2][1:]
priceChangeList = lastPriceList - last2ThPriceList
priceChPercent = priceChangeList / last2ThPriceList
r = '<img src=https://fbcdn-profile-a.akamaihd.net/hprofile-ak-prn2/v/t1.0-1/c18.0.160.160/p160x160/556433_337509823007948_1184595735_n.jpg?oh=286a67f0cc1a125346a3d7cb7b6b23e8&oe=561FB8BD&__gda__=1444064936_e6a1665103cd0e5412e2d0e198e7d442 /><br><h1>'
kpiindex=[]
for index , change in enumerate(priceChPercent) :
if change > 0.05 :
r += columns_name[index + 1] + ': ' + str(round(change*100,2))+ '%<br>'
return r
| 30.531915 | 252 | 0.712892 |
1b4965f826dcb67cc48fc5e721dd651ca21bc880 | 2,375 | py | Python | ros/src/twist_controller/twist_controller.py | OctopusNO1/CarND-Capstone-master | 39f153cb0bf09bfda0a455864bd5a61c6a45501a | [
"MIT"
] | 1 | 2019-12-12T07:25:55.000Z | 2019-12-12T07:25:55.000Z | ros/src/twist_controller/twist_controller.py | OctopusNO1/CarND-Capstone-master | 39f153cb0bf09bfda0a455864bd5a61c6a45501a | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | OctopusNO1/CarND-Capstone-master | 39f153cb0bf09bfda0a455864bd5a61c6a45501a | [
"MIT"
] | null | null | null | from pid import PID
from yaw_controller import YawController
from lowpass import LowPassFilter
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, *args, **kwargs):
# TODO: Implement
self.vehicle_mass = kwargs['vehicle_mass']
self.fuel_capacity = kwargs['fuel_capacity']
self.brake_deadband = kwargs['brake_deadband']
self.decel_limit = kwargs['decel_limit']
self.accel_limit = kwargs['accel_limit']
self.wheel_radius = kwargs['wheel_radius']
self.wheel_base = kwargs['wheel_base']
self.steer_ratio = kwargs['steer_ratio']
self.max_lat_accel = kwargs['max_lat_accel']
self.max_steer_angle = kwargs['max_steer_angle']
min_speed = 0
self.s_lpf = LowPassFilter(tau = 0.5, ts = 1)
self.linear_pid = PID(kp=0.8, ki=0, kd=0.05, mn=self.decel_limit, mx=0.5 * self.accel_limit)
self.yaw_controller = YawController(self.wheel_base, self.steer_ratio, min_speed, self.max_lat_accel, self.max_steer_angle)
self.steering_pid = PID(kp=0.15, ki=0.001, kd=1, mn=-self.max_steer_angle, mx=self.max_steer_angle)
def control(self, proposed_linear_velocity, proposed_angular_velocity, current_linear_velocity, cross_track_error, duration_in_seconds):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
linear_velocity_error = proposed_linear_velocity - current_linear_velocity
velocity_correction = self.linear_pid.step(linear_velocity_error, duration_in_seconds)
brake = 0
throttle = velocity_correction
if(throttle < 0):
deceleration = abs(throttle)
brake = (self.vehicle_mass + self.fuel_capacity * GAS_DENSITY) * self.wheel_radius * deceleration if deceleration > self.brake_deadband else 1.
throttle = 0.0
predictive_steering = self.yaw_controller.get_steering(proposed_linear_velocity, proposed_angular_velocity, current_linear_velocity)
corrective_steering = self.steering_pid.step(cross_track_error, duration_in_seconds)
steering = predictive_steering + corrective_steering * 0.33
steering = self.s_lpf.filt(steering)
return throttle, brake, steering
def reset(self):
self.linear_pid.reset()
self.steering_pid.reset()
| 40.948276 | 155 | 0.704842 |
4cb1012c3869789062ee64ba7cbd542bc8e2c25e | 27,960 | gyp | Python | third_party/mesa/mesa.gyp | gitFreeByte/sky_engine | 05c9048930f8a0d39c2f6385ba691eccbbdabb20 | [
"BSD-3-Clause"
] | 1 | 2021-06-12T00:47:11.000Z | 2021-06-12T00:47:11.000Z | third_party/mesa/mesa.gyp | gitFreeByte/sky_engine | 05c9048930f8a0d39c2f6385ba691eccbbdabb20 | [
"BSD-3-Clause"
] | null | null | null | third_party/mesa/mesa.gyp | gitFreeByte/sky_engine | 05c9048930f8a0d39c2f6385ba691eccbbdabb20 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'generated_src_dir': 'src/chromium_gensrc',
},
'target_defaults': {
'defines': [
'MAPI_ABI_HEADER="glapi_mapi_tmp_shared.h"',
"PACKAGE_NAME=\"Mesa\"",
"PACKAGE_TARNAME=\"mesa\"",
"PACKAGE_VERSION=\"9.0.3\"",
"PACKAGE_STRING=\"Mesa\ 9.0.3\"",
"PACKAGE_BUGREPORT=\"https://bugs.freedesktop.org/enter_bug.cgi\?product=Mesa\"",
"PACKAGE_URL=\"\"",
"PACKAGE=\"mesa\"",
"VERSION=\"9.0.3\"",
"STDC_HEADERS=1",
"HAVE_SYS_TYPES_H=1",
"HAVE_SYS_STAT_H=1",
"HAVE_STDLIB_H=1",
"HAVE_STRING_H=1",
"HAVE_MEMORY_H=1",
"HAVE_STRINGS_H=1",
"HAVE_INTTYPES_H=1",
"HAVE_STDINT_H=1",
"HAVE_DLFCN_H=1",
"LT_OBJDIR=\".libs/\"",
"YYTEXT_POINTER=1",
"HAVE_LIBEXPAT=1",
"HAVE_LIBXCB_DRI2=1",
"FEATURE_GL=1",
'MAPI_MODE_GLAPI',
#"USE_X86_64_ASM",
"IN_DRI_DRIVER",
"USE_XCB",
"GLX_INDIRECT_RENDERING",
"GLX_DIRECT_RENDERING",
"USE_EXTERNAL_DXTN_LIB=1",
"IN_DRI_DRIVER",
"HAVE_ALIAS",
"HAVE_MINCORE",
"HAVE_LIBUDEV",
"_GLAPI_NO_EXPORTS",
],
'conditions': [
['OS=="android" or OS=="linux"', {
'defines': [
'_GNU_SOURCE',
],
}],
['OS=="win"', {
'defines': [
# Generated files use const only if __cplusplus or __STDC__ is
# defined. On Windows, neither is defined, so define YY_USE_CONST
# to explicitly enable const.
'YY_USE_CONST',
],
}],
['os_posix == 1', {
'defines': [
'HAVE_DLOPEN',
'HAVE_PTHREAD=1',
'HAVE_UNISTD_H=1',
],
}],
['os_posix == 1 and OS != "android"', {
'defines': [
'HAVE_POSIX_MEMALIGN',
],
}],
['os_posix == 1 and OS != "mac" and OS != "android"', {
'cflags': [
'-fPIC',
],
}],
['ubsan_vptr == 1', {
'cflags!': [
# UBsan's vptr is not compatible with -fno-rtti,
# which is used by gallium/auxiliary/Makefile.
'-fsanitize=null',
'-fsanitize=vptr',
'-fsanitize-coverage=<(sanitizer_coverage)',
],
}],
],
},
'targets': [
{
'target_name': 'mesa_headers',
'type': 'none',
'direct_dependent_settings': {
'include_dirs': [
'src/include',
],
},
'conditions': [
['use_x11==0', {
'direct_dependent_settings': {
'defines': [
'MESA_EGL_NO_X11_HEADERS',
],
},
}],
],
},
{
'target_name': 'mesa_libglslcommon',
'type': 'static_library',
'include_dirs': [
'src/src/gallium/auxiliary',
'src/src/gallium/include',
'src/src/glsl',
'src/src/glsl/glcpp',
'src/src/mapi',
'src/src/mapi/glapi',
'src/src/mesa',
'src/src/mesa/main',
'src/include',
'<(generated_src_dir)/mesa/',
'<(generated_src_dir)/mesa/main',
'<(generated_src_dir)/mesa/program',
'<(generated_src_dir)/mesa/glapi',
],
'dependencies': [
'mesa_headers',
],
# TODO(scottmg): http://crbug.com/143877 These should be removed if
# Mesa is ever rolled and the warnings are fixed.
'msvs_disabled_warnings': [
4005, 4018, 4065, 4090, 4099, 4291, 4345, 4267,
],
'variables': {
'clang_warning_flags': [
'-Wno-tautological-constant-out-of-range-compare',
'-Wno-mismatched-tags', # Fixed upstream.
],
'clang_warning_flags_unset': [
# Don't warn about string->bool used in asserts.
'-Wstring-conversion',
],
},
'sources': [
'<(generated_src_dir)/mesa/main/dispatch.h',
'src/src/glsl/ast_expr.cpp',
'src/src/glsl/ast_function.cpp',
'src/src/glsl/ast_to_hir.cpp',
'src/src/glsl/ast_type.cpp',
'src/src/glsl/builtin_variables.cpp',
'<(generated_src_dir)/mesa/glcpp-lex.c',
'<(generated_src_dir)/mesa/glcpp-parse.c',
'<(generated_src_dir)/mesa/glcpp-parse.h',
'src/src/glsl/glcpp/glcpp.h',
'src/src/glsl/glcpp/pp.c',
'<(generated_src_dir)/mesa/glsl_lexer.cc',
'<(generated_src_dir)/mesa/glsl_parser.cc',
'src/src/glsl/glsl_parser_extras.cpp',
'src/src/glsl/glsl_parser_extras.h',
'src/src/glsl/glsl_symbol_table.cpp',
'src/src/glsl/glsl_symbol_table.h',
'src/src/glsl/glsl_types.cpp',
'src/src/glsl/glsl_types.h',
'src/src/glsl/hir_field_selection.cpp',
'src/src/glsl/ir.cpp',
'src/src/glsl/ir.h',
'src/src/glsl/ir_basic_block.cpp',
'src/src/glsl/ir_basic_block.h',
'src/src/glsl/ir_builder.cpp',
'src/src/glsl/ir_builder.h',
'src/src/glsl/ir_clone.cpp',
'src/src/glsl/ir_constant_expression.cpp',
'src/src/glsl/ir_expression_flattening.cpp',
'src/src/glsl/ir_expression_flattening.h',
'src/src/glsl/ir_function.cpp',
'src/src/glsl/ir_function_can_inline.cpp',
'src/src/glsl/ir_function_detect_recursion.cpp',
'src/src/glsl/ir_hierarchical_visitor.cpp',
'src/src/glsl/ir_hierarchical_visitor.h',
'src/src/glsl/ir_hv_accept.cpp',
'src/src/glsl/ir_import_prototypes.cpp',
'src/src/glsl/ir_print_visitor.cpp',
'src/src/glsl/ir_print_visitor.h',
'src/src/glsl/ir_reader.cpp',
'src/src/glsl/ir_reader.h',
'src/src/glsl/ir_rvalue_visitor.cpp',
'src/src/glsl/ir_rvalue_visitor.h',
'src/src/glsl/ir_set_program_inouts.cpp',
'src/src/glsl/ir_validate.cpp',
'src/src/glsl/ir_variable_refcount.cpp',
'src/src/glsl/ir_variable_refcount.h',
'src/src/glsl/link_functions.cpp',
'src/src/glsl/link_uniform_initializers.cpp',
'src/src/glsl/link_uniforms.cpp',
'src/src/glsl/linker.cpp',
'src/src/glsl/linker.h',
'src/src/glsl/loop_analysis.cpp',
'src/src/glsl/loop_analysis.h',
'src/src/glsl/loop_controls.cpp',
'src/src/glsl/loop_unroll.cpp',
'src/src/glsl/lower_clip_distance.cpp',
'src/src/glsl/lower_discard.cpp',
'src/src/glsl/lower_discard_flow.cpp',
'src/src/glsl/lower_if_to_cond_assign.cpp',
'src/src/glsl/lower_instructions.cpp',
'src/src/glsl/lower_jumps.cpp',
'src/src/glsl/lower_mat_op_to_vec.cpp',
'src/src/glsl/lower_noise.cpp',
'src/src/glsl/lower_output_reads.cpp',
'src/src/glsl/lower_texture_projection.cpp',
'src/src/glsl/lower_ubo_reference.cpp',
'src/src/glsl/lower_variable_index_to_cond_assign.cpp',
'src/src/glsl/lower_vec_index_to_cond_assign.cpp',
'src/src/glsl/lower_vec_index_to_swizzle.cpp',
'src/src/glsl/lower_vector.cpp',
'src/src/glsl/opt_algebraic.cpp',
'src/src/glsl/opt_array_splitting.cpp',
'src/src/glsl/opt_constant_folding.cpp',
'src/src/glsl/opt_constant_propagation.cpp',
'src/src/glsl/opt_constant_variable.cpp',
'src/src/glsl/opt_copy_propagation.cpp',
'src/src/glsl/opt_copy_propagation_elements.cpp',
'src/src/glsl/opt_dead_code.cpp',
'src/src/glsl/opt_dead_code_local.cpp',
'src/src/glsl/opt_dead_functions.cpp',
'src/src/glsl/opt_function_inlining.cpp',
'src/src/glsl/opt_if_simplification.cpp',
'src/src/glsl/opt_noop_swizzle.cpp',
'src/src/glsl/opt_redundant_jumps.cpp',
'src/src/glsl/opt_structure_splitting.cpp',
'src/src/glsl/opt_swizzle_swizzle.cpp',
'src/src/glsl/opt_tree_grafting.cpp',
'src/src/glsl/program.h',
'src/src/glsl/ralloc.c',
'src/src/glsl/ralloc.h',
'src/src/glsl/s_expression.cpp',
'src/src/glsl/s_expression.h',
# This file is not needed and has duplicate symbols (although it
# happens to link because of static library link ordering).
#'src/src/glsl/standalone_scaffolding.cpp',
#'src/src/glsl/standalone_scaffolding.h',
'src/src/glsl/strtod.c',
'src/src/glsl/strtod.h',
],
},
{
'target_name': 'mesa',
'type': 'static_library',
'include_dirs': [
'src/src/gallium/auxiliary',
'src/src/gallium/include',
'src/src/glsl',
'src/src/glsl/glcpp',
'src/src/mapi',
'src/src/mapi/glapi',
'src/src/mesa',
'src/src/mesa/main',
'<(generated_src_dir)/mesa/',
'<(generated_src_dir)/mesa/main',
'<(generated_src_dir)/mesa/program',
'<(generated_src_dir)/mesa/glapi',
],
'dependencies': [
'mesa_headers',
'mesa_libglslcommon',
],
# TODO(scottmg): http://crbug.com/143877 These should be removed if
# Mesa is ever rolled and the warnings are fixed.
'msvs_disabled_warnings': [
4005, 4018, 4090, 4099, 4146, 4291, 4305, 4334, 4748, 4267,
],
'variables': {
'clang_warning_flags': [
'-Wno-tautological-constant-out-of-range-compare',
'-Wno-absolute-value', # Fires on st_atom_array.c, might be a bug
'-Wno-mismatched-tags', # Fixed upstream.
],
'clang_warning_flags_unset': [
# Don't warn about string->bool used in asserts.
'-Wstring-conversion',
],
},
'sources': [
'<(generated_src_dir)/mesa/builtin_function.cpp',
'<(generated_src_dir)/mesa/glapi_mapi_tmp_shared.h',
'src/src/mapi/mapi/entry.c',
'src/src/mapi/mapi/entry.h',
'src/src/mapi/mapi/mapi_glapi.c',
'src/src/mapi/mapi/stub.c',
'src/src/mapi/mapi/stub.h',
'src/src/mapi/mapi/table.c',
'src/src/mapi/mapi/table.h',
'src/src/mapi/mapi/u_current.c',
'src/src/mapi/mapi/u_current.h',
'src/src/mapi/mapi/u_execmem.c',
'src/src/mapi/mapi/u_execmem.h',
'src/src/mesa/main/accum.c',
'src/src/mesa/main/accum.h',
'src/src/mesa/main/api_arrayelt.c',
'src/src/mesa/main/api_arrayelt.h',
'src/src/mesa/main/api_exec.c',
'src/src/mesa/main/api_exec.h',
'src/src/mesa/main/api_loopback.c',
'src/src/mesa/main/api_loopback.h',
'src/src/mesa/main/api_validate.c',
'src/src/mesa/main/api_validate.h',
'src/src/mesa/main/arbprogram.c',
'src/src/mesa/main/arbprogram.h',
'src/src/mesa/main/arrayobj.c',
'src/src/mesa/main/arrayobj.h',
'src/src/mesa/main/atifragshader.c',
'src/src/mesa/main/atifragshader.h',
'src/src/mesa/main/attrib.c',
'src/src/mesa/main/attrib.h',
'src/src/mesa/main/blend.c',
'src/src/mesa/main/blend.h',
'src/src/mesa/main/bufferobj.c',
'src/src/mesa/main/bufferobj.h',
'src/src/mesa/main/buffers.c',
'src/src/mesa/main/buffers.h',
'src/src/mesa/main/clear.c',
'src/src/mesa/main/clear.h',
'src/src/mesa/main/clip.c',
'src/src/mesa/main/clip.h',
'src/src/mesa/main/colortab.c',
'src/src/mesa/main/colortab.h',
'src/src/mesa/main/condrender.c',
'src/src/mesa/main/condrender.h',
'src/src/mesa/main/context.c',
'src/src/mesa/main/context.h',
'src/src/mesa/main/convolve.c',
'src/src/mesa/main/convolve.h',
'src/src/mesa/main/cpuinfo.c',
'src/src/mesa/main/cpuinfo.h',
'src/src/mesa/main/debug.c',
'src/src/mesa/main/debug.h',
'src/src/mesa/main/depth.c',
'src/src/mesa/main/depth.h',
'src/src/mesa/main/dlist.c',
'src/src/mesa/main/dlist.h',
'src/src/mesa/main/drawpix.c',
'src/src/mesa/main/drawpix.h',
'src/src/mesa/main/drawtex.c',
'src/src/mesa/main/drawtex.h',
'src/src/mesa/main/enable.c',
'src/src/mesa/main/enable.h',
'<(generated_src_dir)/mesa/enums.c',
'src/src/mesa/main/enums.h',
'src/src/mesa/main/errors.c',
'src/src/mesa/main/errors.h',
'src/src/mesa/main/eval.c',
'src/src/mesa/main/eval.h',
'src/src/mesa/main/execmem.c',
'src/src/mesa/main/extensions.c',
'src/src/mesa/main/extensions.h',
'src/src/mesa/main/fbobject.c',
'src/src/mesa/main/fbobject.h',
'src/src/mesa/main/feedback.c',
'src/src/mesa/main/feedback.h',
'src/src/mesa/main/ff_fragment_shader.cpp',
'src/src/mesa/main/ffvertex_prog.c',
'src/src/mesa/main/ffvertex_prog.h',
'src/src/mesa/main/fog.c',
'src/src/mesa/main/fog.h',
'src/src/mesa/main/format_pack.c',
'src/src/mesa/main/format_pack.h',
'src/src/mesa/main/format_unpack.c',
'src/src/mesa/main/format_unpack.h',
'src/src/mesa/main/formats.c',
'src/src/mesa/main/formats.h',
'src/src/mesa/main/framebuffer.c',
'src/src/mesa/main/framebuffer.h',
'src/src/mesa/main/get.c',
'src/src/mesa/main/get.h',
'src/src/mesa/main/getstring.c',
'src/src/mesa/main/glformats.c',
'src/src/mesa/main/glformats.h',
'src/src/mesa/main/hash.c',
'src/src/mesa/main/hash.h',
'src/src/mesa/main/hint.c',
'src/src/mesa/main/hint.h',
'src/src/mesa/main/histogram.c',
'src/src/mesa/main/histogram.h',
'src/src/mesa/main/image.c',
'src/src/mesa/main/image.h',
'src/src/mesa/main/imports.c',
'src/src/mesa/main/imports.h',
'src/src/mesa/main/light.c',
'src/src/mesa/main/light.h',
'src/src/mesa/main/lines.c',
'src/src/mesa/main/lines.h',
'src/src/mesa/main/matrix.c',
'src/src/mesa/main/matrix.h',
'src/src/mesa/main/mipmap.c',
'src/src/mesa/main/mipmap.h',
'src/src/mesa/main/mm.c',
'src/src/mesa/main/mm.h',
'src/src/mesa/main/multisample.c',
'src/src/mesa/main/multisample.h',
'src/src/mesa/main/nvprogram.c',
'src/src/mesa/main/nvprogram.h',
'src/src/mesa/main/pack.c',
'src/src/mesa/main/pack.h',
'src/src/mesa/main/pbo.c',
'src/src/mesa/main/pbo.h',
'src/src/mesa/main/pixel.c',
'src/src/mesa/main/pixel.h',
'src/src/mesa/main/pixelstore.c',
'src/src/mesa/main/pixelstore.h',
'src/src/mesa/main/pixeltransfer.c',
'src/src/mesa/main/pixeltransfer.h',
'src/src/mesa/main/points.c',
'src/src/mesa/main/points.h',
'src/src/mesa/main/polygon.c',
'src/src/mesa/main/polygon.h',
'src/src/mesa/main/queryobj.c',
'src/src/mesa/main/queryobj.h',
'src/src/mesa/main/rastpos.c',
'src/src/mesa/main/rastpos.h',
'src/src/mesa/main/readpix.c',
'src/src/mesa/main/readpix.h',
'src/src/mesa/main/remap.c',
'src/src/mesa/main/remap.h',
'src/src/mesa/main/renderbuffer.c',
'src/src/mesa/main/renderbuffer.h',
'src/src/mesa/main/samplerobj.c',
'src/src/mesa/main/samplerobj.h',
'src/src/mesa/main/scissor.c',
'src/src/mesa/main/scissor.h',
'src/src/mesa/main/shader_query.cpp',
'src/src/mesa/main/shaderapi.c',
'src/src/mesa/main/shaderapi.h',
'src/src/mesa/main/shaderobj.c',
'src/src/mesa/main/shaderobj.h',
'src/src/mesa/main/shared.c',
'src/src/mesa/main/shared.h',
'src/src/mesa/main/state.c',
'src/src/mesa/main/state.h',
'src/src/mesa/main/stencil.c',
'src/src/mesa/main/stencil.h',
'src/src/mesa/main/syncobj.c',
'src/src/mesa/main/syncobj.h',
'src/src/mesa/main/texcompress.c',
'src/src/mesa/main/texcompress.h',
'src/src/mesa/main/texcompress_cpal.c',
'src/src/mesa/main/texcompress_cpal.h',
'src/src/mesa/main/texcompress_etc.c',
'src/src/mesa/main/texcompress_etc.h',
'src/src/mesa/main/texcompress_fxt1.c',
'src/src/mesa/main/texcompress_fxt1.h',
'src/src/mesa/main/texcompress_rgtc.c',
'src/src/mesa/main/texcompress_rgtc.h',
'src/src/mesa/main/texcompress_s3tc.c',
'src/src/mesa/main/texcompress_s3tc.h',
'src/src/mesa/main/texenv.c',
'src/src/mesa/main/texenv.h',
'src/src/mesa/main/texformat.c',
'src/src/mesa/main/texformat.h',
'src/src/mesa/main/texgen.c',
'src/src/mesa/main/texgen.h',
'src/src/mesa/main/texgetimage.c',
'src/src/mesa/main/texgetimage.h',
'src/src/mesa/main/teximage.c',
'src/src/mesa/main/teximage.h',
'src/src/mesa/main/texobj.c',
'src/src/mesa/main/texobj.h',
'src/src/mesa/main/texparam.c',
'src/src/mesa/main/texparam.h',
'src/src/mesa/main/texstate.c',
'src/src/mesa/main/texstate.h',
'src/src/mesa/main/texstorage.c',
'src/src/mesa/main/texstorage.h',
'src/src/mesa/main/texstore.c',
'src/src/mesa/main/texstore.h',
'src/src/mesa/main/texturebarrier.c',
'src/src/mesa/main/texturebarrier.h',
'src/src/mesa/main/transformfeedback.c',
'src/src/mesa/main/transformfeedback.h',
'src/src/mesa/main/uniform_query.cpp',
'src/src/mesa/main/uniforms.c',
'src/src/mesa/main/uniforms.h',
'src/src/mesa/main/varray.c',
'src/src/mesa/main/varray.h',
'src/src/mesa/main/version.c',
'src/src/mesa/main/version.h',
'src/src/mesa/main/viewport.c',
'src/src/mesa/main/viewport.h',
'src/src/mesa/main/vtxfmt.c',
'src/src/mesa/main/vtxfmt.h',
'src/src/mesa/math/m_debug_clip.c',
'src/src/mesa/math/m_debug_norm.c',
'src/src/mesa/math/m_debug_xform.c',
'src/src/mesa/math/m_eval.c',
'src/src/mesa/math/m_eval.h',
'src/src/mesa/math/m_matrix.c',
'src/src/mesa/math/m_matrix.h',
'src/src/mesa/math/m_translate.c',
'src/src/mesa/math/m_translate.h',
'src/src/mesa/math/m_vector.c',
'src/src/mesa/math/m_vector.h',
'src/src/mesa/math/m_xform.c',
'src/src/mesa/math/m_xform.h',
'src/src/mesa/program/arbprogparse.c',
'src/src/mesa/program/arbprogparse.h',
'src/src/mesa/program/hash_table.c',
'src/src/mesa/program/hash_table.h',
'src/src/mesa/program/ir_to_mesa.cpp',
'src/src/mesa/program/ir_to_mesa.h',
'<(generated_src_dir)/mesa/lex.yy.c',
'src/src/mesa/program/nvfragparse.c',
'src/src/mesa/program/nvfragparse.h',
'src/src/mesa/program/nvvertparse.c',
'src/src/mesa/program/nvvertparse.h',
'src/src/mesa/program/prog_cache.c',
'src/src/mesa/program/prog_cache.h',
'src/src/mesa/program/prog_execute.c',
'src/src/mesa/program/prog_execute.h',
'src/src/mesa/program/prog_instruction.c',
'src/src/mesa/program/prog_instruction.h',
'src/src/mesa/program/prog_noise.c',
'src/src/mesa/program/prog_noise.h',
'src/src/mesa/program/prog_opt_constant_fold.c',
'src/src/mesa/program/prog_optimize.c',
'src/src/mesa/program/prog_optimize.h',
'src/src/mesa/program/prog_parameter.c',
'src/src/mesa/program/prog_parameter.h',
'src/src/mesa/program/prog_parameter_layout.c',
'src/src/mesa/program/prog_parameter_layout.h',
'src/src/mesa/program/prog_print.c',
'src/src/mesa/program/prog_print.h',
'src/src/mesa/program/prog_statevars.c',
'src/src/mesa/program/prog_statevars.h',
'src/src/mesa/program/program.c',
'src/src/mesa/program/program.h',
'<(generated_src_dir)/mesa/program/program_parse.tab.c',
'<(generated_src_dir)/mesa/program/program_parse.tab.h',
'src/src/mesa/program/program_parse_extra.c',
'src/src/mesa/program/programopt.c',
'src/src/mesa/program/programopt.h',
'src/src/mesa/program/register_allocate.c',
'src/src/mesa/program/register_allocate.h',
'src/src/mesa/program/sampler.cpp',
'src/src/mesa/program/sampler.h',
'src/src/mesa/program/string_to_uint_map.cpp',
'src/src/mesa/program/symbol_table.c',
'src/src/mesa/program/symbol_table.h',
'src/src/mesa/swrast/s_aaline.c',
'src/src/mesa/swrast/s_aaline.h',
'src/src/mesa/swrast/s_aatriangle.c',
'src/src/mesa/swrast/s_aatriangle.h',
'src/src/mesa/swrast/s_alpha.c',
'src/src/mesa/swrast/s_alpha.h',
'src/src/mesa/swrast/s_atifragshader.c',
'src/src/mesa/swrast/s_atifragshader.h',
'src/src/mesa/swrast/s_bitmap.c',
'src/src/mesa/swrast/s_blend.c',
'src/src/mesa/swrast/s_blend.h',
'src/src/mesa/swrast/s_blit.c',
'src/src/mesa/swrast/s_clear.c',
'src/src/mesa/swrast/s_context.c',
'src/src/mesa/swrast/s_context.h',
'src/src/mesa/swrast/s_copypix.c',
'src/src/mesa/swrast/s_depth.c',
'src/src/mesa/swrast/s_depth.h',
'src/src/mesa/swrast/s_drawpix.c',
'src/src/mesa/swrast/s_feedback.c',
'src/src/mesa/swrast/s_feedback.h',
'src/src/mesa/swrast/s_fog.c',
'src/src/mesa/swrast/s_fog.h',
'src/src/mesa/swrast/s_fragprog.c',
'src/src/mesa/swrast/s_fragprog.h',
'src/src/mesa/swrast/s_lines.c',
'src/src/mesa/swrast/s_lines.h',
'src/src/mesa/swrast/s_logic.c',
'src/src/mesa/swrast/s_logic.h',
'src/src/mesa/swrast/s_masking.c',
'src/src/mesa/swrast/s_masking.h',
'src/src/mesa/swrast/s_points.c',
'src/src/mesa/swrast/s_points.h',
'src/src/mesa/swrast/s_renderbuffer.c',
'src/src/mesa/swrast/s_renderbuffer.h',
'src/src/mesa/swrast/s_span.c',
'src/src/mesa/swrast/s_span.h',
'src/src/mesa/swrast/s_stencil.c',
'src/src/mesa/swrast/s_stencil.h',
'src/src/mesa/swrast/s_texcombine.c',
'src/src/mesa/swrast/s_texcombine.h',
'src/src/mesa/swrast/s_texfetch.c',
'src/src/mesa/swrast/s_texfetch.h',
'src/src/mesa/swrast/s_texfilter.c',
'src/src/mesa/swrast/s_texfilter.h',
'src/src/mesa/swrast/s_texrender.c',
'src/src/mesa/swrast/s_texture.c',
'src/src/mesa/swrast/s_triangle.c',
'src/src/mesa/swrast/s_triangle.h',
'src/src/mesa/swrast/s_zoom.c',
'src/src/mesa/swrast/s_zoom.h',
'src/src/mesa/swrast_setup/ss_context.c',
'src/src/mesa/swrast_setup/ss_context.h',
'src/src/mesa/swrast_setup/ss_triangle.c',
'src/src/mesa/swrast_setup/ss_triangle.h',
'src/src/mesa/tnl/t_context.c',
'src/src/mesa/tnl/t_context.h',
'src/src/mesa/tnl/t_draw.c',
'src/src/mesa/tnl/t_pipeline.c',
'src/src/mesa/tnl/t_pipeline.h',
'src/src/mesa/tnl/t_rasterpos.c',
'src/src/mesa/tnl/t_vb_fog.c',
'src/src/mesa/tnl/t_vb_light.c',
'src/src/mesa/tnl/t_vb_normals.c',
'src/src/mesa/tnl/t_vb_points.c',
'src/src/mesa/tnl/t_vb_program.c',
'src/src/mesa/tnl/t_vb_render.c',
'src/src/mesa/tnl/t_vb_texgen.c',
'src/src/mesa/tnl/t_vb_texmat.c',
'src/src/mesa/tnl/t_vb_vertex.c',
'src/src/mesa/tnl/t_vertex.c',
'src/src/mesa/tnl/t_vertex.h',
'src/src/mesa/tnl/t_vertex_generic.c',
'src/src/mesa/tnl/t_vertex_sse.c',
'src/src/mesa/tnl/t_vp_build.c',
'src/src/mesa/tnl/t_vp_build.h',
'src/src/mesa/vbo/vbo_context.c',
'src/src/mesa/vbo/vbo_context.h',
'src/src/mesa/vbo/vbo_exec.c',
'src/src/mesa/vbo/vbo_exec.h',
'src/src/mesa/vbo/vbo_exec_api.c',
'src/src/mesa/vbo/vbo_exec_array.c',
'src/src/mesa/vbo/vbo_exec_draw.c',
'src/src/mesa/vbo/vbo_exec_eval.c',
'src/src/mesa/vbo/vbo_noop.c',
'src/src/mesa/vbo/vbo_noop.h',
'src/src/mesa/vbo/vbo_primitive_restart.c',
'src/src/mesa/vbo/vbo_rebase.c',
'src/src/mesa/vbo/vbo_save.c',
'src/src/mesa/vbo/vbo_save.h',
'src/src/mesa/vbo/vbo_save_api.c',
'src/src/mesa/vbo/vbo_save_draw.c',
'src/src/mesa/vbo/vbo_save_loopback.c',
'src/src/mesa/vbo/vbo_split.c',
'src/src/mesa/vbo/vbo_split.h',
'src/src/mesa/vbo/vbo_split_copy.c',
'src/src/mesa/vbo/vbo_split_inplace.c',
'src/src/mesa/x86-64/x86-64.c',
'src/src/mesa/x86-64/x86-64.h',
],
'conditions': [
['OS=="android" and clang==0', {
# Disable sincos() optimization to avoid a linker error
# since Android's math library doesn't have sincos().
# Either -fno-builtin-sin or -fno-builtin-cos works.
'cflags': [
'-fno-builtin-sin',
],
}],
['OS=="win"', {
'defines': [
# Because we're building as a static library
'_GLAPI_NO_EXPORTS',
],
}],
],
},
# Building this target will hide the native OpenGL shared library and
# replace it with a slow software renderer.
{
'target_name': 'osmesa',
'type': 'loadable_module',
'mac_bundle': 0,
'dependencies': [
'mesa_headers',
'mesa',
],
'xcode_settings': {
'OTHER_LDFLAGS': [
'-lstdc++',
],
},
'conditions': [
['OS=="win"', {
'defines': [
'BUILD_GL32',
'KEYWORD1=GLAPI',
'KEYWORD2=GLAPIENTRY',
],
}],
['OS=="linux"', {
'link_settings': {
'libraries': [
'-ldl',
'-lm',
'-lstdc++',
],
},
}],
],
'include_dirs': [
'src/src/mapi',
'src/src/mesa',
'src/src/mesa/drivers',
'<(generated_src_dir)/mesa',
],
'msvs_disabled_warnings': [
4005, 4018, 4065, 4090, 4099, 4291, 4345, 4267,
],
'sources': [
'src/src/mesa/drivers/common/driverfuncs.c',
'src/src/mesa/drivers/common/driverfuncs.h',
'src/src/mesa/drivers/common/meta.c',
'src/src/mesa/drivers/common/meta.h',
'src/src/mesa/drivers/osmesa/osmesa.c',
'src/src/mesa/drivers/osmesa/osmesa.def',
],
'variables': {
'clang_warning_flags_unset': [
# Don't warn about string->bool used in asserts.
'-Wstring-conversion',
],
},
},
],
'conditions': [
['OS=="android"', {
'targets': [
{
# Copies libosmesa.so to the out/$BUILDTYPE/lib/ directory so that
# the write_ordered_libraries.py script won't assume it to be a
# system library. This will cause the library to be stripped allowing
# targets to embed it in the to-be-generated APK.
'target_name': 'osmesa_in_lib_dir',
'type': 'none',
'dependencies': [
'osmesa',
],
'actions': [
{
'action_name': 'copy_libosmesa',
'inputs': ['<(PRODUCT_DIR)/libosmesa.so'],
'outputs': ['<(SHARED_LIB_DIR)/libosmesa.so'],
'action': [
'cp',
'<(PRODUCT_DIR)/libosmesa.so',
'<(SHARED_LIB_DIR)/libosmesa.so',
],
},
],
},
],
}],
],
}
| 37.131474 | 87 | 0.573498 |
496f49b82faea16a49cb1c8cc31320056c703887 | 4,192 | py | Python | src/plugins/github/plugins/github_issue/__init__.py | Inetgeek/QQ-GitHub-Bot | 35c20d28aafaedc1813c6213ede9f2f51e56d5a2 | [
"MIT"
] | 1 | 2022-03-28T03:48:31.000Z | 2022-03-28T03:48:31.000Z | src/plugins/github/plugins/github_issue/__init__.py | Inetgeek/QQ-GitHub-Bot | 35c20d28aafaedc1813c6213ede9f2f51e56d5a2 | [
"MIT"
] | null | null | null | src/plugins/github/plugins/github_issue/__init__.py | Inetgeek/QQ-GitHub-Bot | 35c20d28aafaedc1813c6213ede9f2f51e56d5a2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author : yanyongyu
@Date : 2021-03-09 15:15:02
@LastEditors : yanyongyu
@LastEditTime : 2021-08-25 17:16:42
@Description : None
@GitHub : https://github.com/yanyongyu
"""
__author__ = "yanyongyu"
import re
import base64
from typing import Dict
from nonebot import on_regex
from nonebot.typing import T_State
from playwright.async_api import Error
from httpx import HTTPStatusError, TimeoutException
from nonebot.adapters.cqhttp import Bot, MessageEvent, MessageSegment, GroupMessageEvent
from src.utils import only_group
from ... import github_config as config
from ...utils import send_github_message
from ...libs.redis import get_group_bind_repo
from ...libs.issue import get_issue, issue_to_image
# allow using api without token
try:
from ...libs.auth import get_user_token
except ImportError:
get_user_token = None
ISSUE_REGEX = r"^#(?P<number>\d+)$"
REPO_REGEX: str = (r"^(?P<owner>[a-zA-Z0-9][a-zA-Z0-9\-]*)/"
r"(?P<repo>[a-zA-Z0-9_\-\.]+)$")
REPO_ISSUE_REGEX = (r"^(?P<owner>[a-zA-Z0-9][a-zA-Z0-9\-]*)/"
r"(?P<repo>[a-zA-Z0-9_\-\.]+)#(?P<number>\d+)$")
GITHUB_LINK_REGEX = (
r"github\.com/"
r"(?P<owner>[a-zA-Z0-9][a-zA-Z0-9\-]*)/"
r"(?P<repo>[a-zA-Z0-9_\-\.]+)/(?:issues|pull)/(?P<number>\d+)")
issue = on_regex(REPO_ISSUE_REGEX, priority=config.github_command_priority)
issue.__doc__ = """
^owner/repo#number$
获取指定仓库 issue / pr
"""
link = on_regex(GITHUB_LINK_REGEX, priority=config.github_command_priority)
link.__doc__ = """
github.com/owner/repo/issues/number
识别链接获取仓库 issue / pr
"""
@issue.handle()
@link.handle()
async def handle(bot: Bot, event: MessageEvent, state: T_State):
group: Dict[str, str] = state["_matched_dict"]
owner = group["owner"]
repo = group["repo"]
number = int(group["number"])
token = None
if get_user_token:
token = get_user_token(event.get_user_id())
try:
issue_ = await get_issue(owner, repo, number, token)
except TimeoutException:
await issue.finish(f"获取issue数据超时!请尝试重试")
return
except HTTPStatusError:
await issue.finish(f"仓库{owner}/{repo}不存在issue#{number}!")
return
try:
img = await issue_to_image(owner, repo, issue_)
except TimeoutException:
await issue.finish(f"获取issue数据超时!请尝试重试")
except Error:
await issue.finish(f"生成图片超时!请尝试重试")
else:
if img:
await send_github_message(
issue_short, owner, repo, number,
MessageSegment.image(
f"base64://{base64.b64encode(img).decode()}"))
issue_short = on_regex(ISSUE_REGEX,
rule=only_group,
priority=config.github_command_priority)
issue_short.__doc__ = """
^#number$
获取指定仓库 issue / pr (需通过 /bind 将群与仓库绑定后使用)
"""
@issue_short.handle()
async def handle_short(bot: Bot, event: GroupMessageEvent, state: T_State):
group = state["_matched_dict"]
number = int(group["number"])
full_name = get_group_bind_repo(str(event.group_id))
if not full_name:
await issue_short.finish("此群尚未与仓库绑定!")
return
match = re.match(REPO_REGEX, full_name)
if not match:
await issue_short.finish("绑定的仓库名不合法!请重新尝试绑定~")
return
owner = match.group("owner")
repo = match.group("repo")
token = None
if get_user_token:
token = get_user_token(event.get_user_id())
try:
issue_ = await get_issue(owner, repo, number, token)
except TimeoutException:
await issue.finish(f"获取issue数据超时!请尝试重试")
return
except HTTPStatusError:
await issue.finish(f"仓库{owner}/{repo}不存在issue#{number}!")
return
try:
img = await issue_to_image(owner, repo, issue_)
except TimeoutException:
await issue.finish(f"获取issue数据超时!请尝试重试")
except Error:
await issue.finish(f"生成图片超时!请尝试重试")
else:
if img:
await send_github_message(
issue_short, owner, repo, number,
MessageSegment.image(
f"base64://{base64.b64encode(img).decode()}"))
| 30.376812 | 88 | 0.641698 |
713ea5fa6db069d30670da5ee1cf875a305194b2 | 6,519 | py | Python | embuilder.py | nmrugg/emscripten | 4840fb3baa6e820dd2a1ba0ec6aeb9bdb3c422b5 | [
"MIT"
] | null | null | null | embuilder.py | nmrugg/emscripten | 4840fb3baa6e820dd2a1ba0ec6aeb9bdb3c422b5 | [
"MIT"
] | null | null | null | embuilder.py | nmrugg/emscripten | 4840fb3baa6e820dd2a1ba0ec6aeb9bdb3c422b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2014 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Tool to manage building of system libraries and ports.
In general emcc will build them automatically on demand, so you do not
strictly need to use this tool, but it gives you more control over the
process (in particular, if emcc does this automatically, and you are
running multiple build commands in parallel, confusion can occur).
"""
import argparse
import logging
import sys
import time
from tools import shared
from tools import system_libs
from tools import ports
from tools.settings import settings
import emscripten
SYSTEM_LIBRARIES = system_libs.Library.get_all_variations()
SYSTEM_TASKS = list(SYSTEM_LIBRARIES.keys())
# This is needed to build the generated_struct_info.json file.
# It is not a system library, but it needs to be built before running with FROZEN_CACHE.
SYSTEM_TASKS += ['struct_info']
# Minimal subset of SYSTEM_TASKS used by CI systems to build enough to useful
MINIMAL_TASKS = [
'libcompiler_rt',
'libc',
'libc++abi',
'libc++abi-except',
'libc++abi-noexcept',
'libc++',
'libc++-except',
'libc++-noexcept',
'libal',
'libdlmalloc',
'libdlmalloc-debug',
'libemmalloc',
'libemmalloc-debug',
'libemmalloc-memvalidate',
'libemmalloc-verbose',
'libemmalloc-memvalidate-verbose',
'libGL',
'libhtml5',
'libsockets',
'libc_rt_wasm',
'libc_rt_wasm-optz',
'struct_info',
'libstandalonewasm',
'crt1',
'libunwind-except'
]
# Variant builds that we want to support for cetain ports
# TODO: It would be nice if the ports themselves could specify the variants that they
# support.
PORT_VARIANTS = {
'regal-mt': ('regal', {'USE_PTHREADS': 1}),
'harfbuzz-mt': ('harfbuzz', {'USE_PTHREADS': 1}),
'sdl2-mt': ('sdl2', {'USE_PTHREADS': 1}),
'sdl2_mixer_mp3': ('sdl2_mixer', {'SDL2_MIXER_FORMATS': ["mp3"]}),
'sdl2_mixer_none': ('sdl2_mixer', {'SDL2_MIXER_FORMATS': []}),
'sdl2_image_png': ('sdl2_image', {'SDL2_IMAGE_FORMATS': ["png"]}),
'sdl2_image_jpg': ('sdl2_image', {'SDL2_IMAGE_FORMATS': ["jpg"]}),
}
PORTS = sorted(list(ports.ports_by_name.keys()) + list(PORT_VARIANTS.keys()))
temp_files = shared.configuration.get_temp_files()
logger = logging.getLogger('embuilder')
force = False
legacy_prefixes = {
'libgl': 'libGL',
}
def get_help():
all_tasks = SYSTEM_TASKS + PORTS
all_tasks.sort()
return '''
Available targets:
build %s
Issuing 'embuilder.py build ALL' causes each task to be built.
''' % '\n '.join(all_tasks)
def build_port(port_name):
if port_name in PORT_VARIANTS:
port_name, extra_settings = PORT_VARIANTS[port_name]
old_settings = settings.dict().copy()
for key, value in extra_settings.items():
setattr(settings, key, value)
else:
old_settings = None
if force:
ports.clear_port(port_name, settings)
ports.build_port(port_name, settings)
if old_settings:
settings.dict().update(old_settings)
def main():
global force
all_build_start_time = time.time()
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=get_help())
parser.add_argument('--lto', action='store_true', help='build bitcode object for LTO')
parser.add_argument('--pic', action='store_true',
help='build relocatable objects for suitable for dynamic linking')
parser.add_argument('--force', action='store_true',
help='force rebuild of target (by removing it first)')
parser.add_argument('operation', help='currently only "build" is supported')
parser.add_argument('targets', nargs='+', help='see below')
args = parser.parse_args()
if args.operation != 'build':
shared.exit_with_error('unfamiliar operation: ' + args.operation)
# process flags
# Check sanity so that if settings file has changed, the cache is cleared here.
# Otherwise, the cache will clear in an emcc process, which is invoked while building
# a system library into the cache, causing trouble.
shared.check_sanity()
if args.lto:
settings.LTO = "full"
if args.pic:
settings.RELOCATABLE = 1
if args.force:
force = True
# process tasks
auto_tasks = False
tasks = args.targets
if 'SYSTEM' in tasks:
tasks = SYSTEM_TASKS
auto_tasks = True
elif 'USER' in tasks:
tasks = PORTS
auto_tasks = True
elif 'MINIMAL' in tasks:
tasks = MINIMAL_TASKS
auto_tasks = True
elif 'ALL' in tasks:
tasks = SYSTEM_TASKS + PORTS
auto_tasks = True
if auto_tasks:
# cocos2d: must be ported, errors on
# "Cannot recognize the target platform; are you targeting an unsupported platform?"
skip_tasks = ['cocos2d']
tasks = [x for x in tasks if x not in skip_tasks]
print('Building targets: %s' % ' '.join(tasks))
for what in tasks:
for old, new in legacy_prefixes.items():
if what.startswith(old):
what = what.replace(old, new)
logger.info('building and verifying ' + what)
start_time = time.time()
if what in SYSTEM_LIBRARIES:
library = SYSTEM_LIBRARIES[what]
if force:
library.erase()
library.get_path()
elif what == 'sysroot':
if force:
shared.Cache.erase_file('sysroot_install.stamp')
system_libs.ensure_sysroot()
elif what == 'struct_info':
if force:
shared.Cache.erase_file('generated_struct_info.json')
emscripten.generate_struct_info()
elif what in PORTS:
build_port(what)
else:
logger.error('unfamiliar build target: ' + what)
return 1
time_taken = time.time() - start_time
logger.info('...success. Took %s(%.2fs)' % (('%02d:%02d mins ' % (time_taken // 60, time_taken % 60) if time_taken >= 60 else ''), time_taken))
if len(tasks) > 1:
all_build_time_taken = time.time() - all_build_start_time
logger.info('Built %d targets in %s(%.2fs)' % (len(tasks), ('%02d:%02d mins ' % (all_build_time_taken // 60, all_build_time_taken % 60) if all_build_time_taken >= 60 else ''), all_build_time_taken))
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
logger.warning("KeyboardInterrupt")
sys.exit(1)
| 30.605634 | 202 | 0.681546 |
43a23aba1f95e39c1dd5914830dcab0372b4aad2 | 5,287 | py | Python | behavior_control_patch.py | Moonkrad/Vector | f39701180512a37d17a45e0cbd9d25a5361ef4a3 | [
"Apache-2.0"
] | null | null | null | behavior_control_patch.py | Moonkrad/Vector | f39701180512a37d17a45e0cbd9d25a5361ef4a3 | [
"Apache-2.0"
] | null | null | null | behavior_control_patch.py | Moonkrad/Vector | f39701180512a37d17a45e0cbd9d25a5361ef4a3 | [
"Apache-2.0"
] | null | null | null | --- connection.py Sun Oct 21 18:37:10 2018
+++ connection.py Sun Oct 28 23:04:36 2018
@@ -252,6 +252,22 @@
"""
return self._control_events.lost_event
+ # WV
+ @property
+ def control_granted_event(self) -> asyncio.Event:
+ """This provides an :class:`asyncio.Event` that a user may :func:`wait()` upon to
+ detect when Vector has taken control of the behaviors at a higher priority.
+
+ .. testcode::
+
+ import anki_vector
+
+ async def auto_reconnect(conn: anki_vector.connection.Connection):
+ await conn.control_granted_event.wait()
+ conn.request_control()
+ """
+ return self._control_events.granted_event
+
def request_control(self, timeout: float = 10.0):
"""Explicitly request control. Typically used after detecting :func:`control_lost_event`.
@@ -274,7 +290,8 @@
except futures.TimeoutError as e:
raise exceptions.VectorControlException(f"Surpassed timeout of {timeout}s") from e
- def connect(self, timeout: float = 10.0) -> None:
+ # WV -- Add request_control parameter
+ def connect(self, timeout: float = 10.0, request_control: bool = True) -> None:
"""Connect to Vector. This will start the connection thread which handles all messages
between Vector and Python.
@@ -299,7 +316,7 @@
if self._thread:
raise Exception("\n\nRepeated connections made to open Connection.")
self._ready_signal.clear()
- self._thread = threading.Thread(target=self._connect, args=(timeout,), daemon=True, name="gRPC Connection Handler Thread")
+ self._thread = threading.Thread(target=self._connect, args=(timeout,request_control,), daemon=True, name="gRPC Connection Handler Thread")
self._thread.start()
ready = self._ready_signal.wait(timeout=2 * timeout)
if not ready:
@@ -309,7 +326,8 @@
delattr(self._ready_signal, "exception")
raise e
- def _connect(self, timeout: float) -> None:
+ # WV -- Add request_control parameter
+ def _connect(self, timeout: float, request_control: bool) -> None:
"""The function that runs on the connection thread. This will connect to Vector,
and establish the BehaviorControl stream.
"""
@@ -351,7 +369,9 @@
raise exceptions.VectorInvalidVersionException(version, protocol_version)
self._control_stream_task = self._loop.create_task(self._open_connections())
- self._loop.run_until_complete(self._request_control(timeout=timeout))
+ # WV -->--
+ if (request_control): self._loop.run_until_complete(self._request_control(timeout=timeout))
+ # WV --<--
except Exception as e: # pylint: disable=broad-except
# Propagate the errors to the calling thread
setattr(self._ready_signal, "exception", e)
--- robot.py Sun Oct 21 18:37:10 2018
+++ robot.py Sun Oct 28 22:25:40 2018
@@ -88,7 +88,9 @@
:param enable_vision_mode: Turn on face detection.
:param enable_camera_feed: Turn camera feed on/off.
:param enable_audio_feed: Turn audio feed on/off.
- :param show_viewer: Render camera feed on/off."""
+ :param show_viewer: Render camera feed on/off.
+ :param request_control: Requesting control on connection
+ """
def __init__(self,
serial: str = None,
@@ -100,7 +102,8 @@
enable_vision_mode: bool = False,
enable_camera_feed: bool = False,
enable_audio_feed: bool = False,
- show_viewer: bool = False):
+ show_viewer: bool = False,
+ request_control: bool = True):
if default_logging:
util.setup_basic_logging()
@@ -165,6 +168,10 @@
self._enable_audio_feed = enable_audio_feed
self._show_viewer = show_viewer
+ # WV -->--
+ self._request_control = request_control
+ # WV --<--
+
def _read_configuration(self, serial: str) -> dict:
"""Open the default conf file, and read it into a :class:`configparser.ConfigParser`
@@ -601,7 +608,7 @@
:param timeout: The time to allow for a connection before a
:class:`anki_vector.exceptions.VectorTimeoutException` is raised.
"""
- self.conn.connect(timeout=timeout)
+ self.conn.connect(timeout=timeout, request_control=self._request_control)
self.events.start(self.conn)
# Initialize components
@@ -740,6 +747,10 @@
duration_scalar=duration_scalar)
return await self.conn.grpc_interface.SayText(say_text_request)
+ @connection.on_connection_thread()
+ async def release_control(self) -> None:
+ release_request = protocol.BehaviorControlRequest(control_release=protocol.ControlRelease())
+ self.conn.grpc_interface.BehaviorControl(release_request)
class AsyncRobot(Robot):
"""The AsyncRobot object is just like the Robot object, but allows multiple commands | 44.428571 | 148 | 0.616418 |
e374e2674d7619f538123df98b45610c453a4e0c | 371 | py | Python | requests_auth/version.py | Colin-b/requests_auth | dc971baf73b8dbc7a1d0465c9717b5a3fe4c80ce | [
"MIT"
] | 24 | 2018-07-23T09:43:00.000Z | 2022-02-07T18:05:21.000Z | requests_auth/version.py | Colin-b/requests_auth | dc971baf73b8dbc7a1d0465c9717b5a3fe4c80ce | [
"MIT"
] | 48 | 2018-09-06T05:29:08.000Z | 2022-02-14T14:28:59.000Z | requests_auth/version.py | Colin-b/requests_auth | dc971baf73b8dbc7a1d0465c9717b5a3fe4c80ce | [
"MIT"
] | 11 | 2018-09-29T16:30:39.000Z | 2022-02-23T13:06:41.000Z | # Version number as Major.Minor.Patch
# The version modification must respect the following rules:
# Major should be incremented in case there is a breaking change. (eg: 2.5.8 -> 3.0.0)
# Minor should be incremented in case there is an enhancement. (eg: 2.5.8 -> 2.6.0)
# Patch should be incremented in case there is a bug fix. (eg: 2.5.8 -> 2.5.9)
__version__ = "5.3.0"
| 53 | 86 | 0.71159 |
9360bd1cc67d26e8bc462b0da117ffcabb5f7703 | 1,049 | py | Python | ingenico/direct/sdk/log/sys_out_communicator_logger.py | Ingenico/direct-sdk-python3 | d2b30b8e8afb307153a1f19ac4c054d5344449ce | [
"Apache-2.0"
] | null | null | null | ingenico/direct/sdk/log/sys_out_communicator_logger.py | Ingenico/direct-sdk-python3 | d2b30b8e8afb307153a1f19ac4c054d5344449ce | [
"Apache-2.0"
] | 1 | 2021-03-30T12:55:39.000Z | 2021-04-08T08:23:27.000Z | ingenico/direct/sdk/log/sys_out_communicator_logger.py | Ingenico/direct-sdk-python3 | d2b30b8e8afb307153a1f19ac4c054d5344449ce | [
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from threading import Lock
from ingenico.direct.sdk.log.python_communicator_logger import CommunicatorLogger
class SysOutCommunicatorLogger(CommunicatorLogger):
"""
A communicator logger that prints its message to sys.stdout
It includes a timestamp in yyyy-MM-ddTHH:mm:ss format in the system time zone.
"""
def __init__(self):
CommunicatorLogger.__init__(self)
_global_lock = Lock()
_old_print = print
@staticmethod
def INSTANCE():
return _SYS_OUT_COMMUNICATOR_LOGGER_INSTANCE
def __print(self, *a):
with self._global_lock:
self._old_print(*a)
def log(self, message, thrown=None):
# Make sure the same object is used for locking and printing
self.__print(self.__get_date_prefix() + message)
if thrown:
self.__print(str(thrown))
def __get_date_prefix(self):
return datetime.now().strftime("%Y-%m-%dT%H:%M:%S ")
_SYS_OUT_COMMUNICATOR_LOGGER_INSTANCE = SysOutCommunicatorLogger()
| 26.225 | 82 | 0.698761 |
3fbd1d25a3f63845378e6b107365783d11dcd583 | 995 | py | Python | src/scanner/controls/unix/linux/users/system_accounts_secured.py | alex-dya/security_scanner | 7aeb6af863ccdbf6c066d52446318aaf898afe7b | [
"MIT"
] | null | null | null | src/scanner/controls/unix/linux/users/system_accounts_secured.py | alex-dya/security_scanner | 7aeb6af863ccdbf6c066d52446318aaf898afe7b | [
"MIT"
] | 7 | 2021-05-28T14:31:27.000Z | 2022-03-12T00:57:40.000Z | src/scanner/controls/unix/linux/users/system_accounts_secured.py | alex-dya/security_scanner | 7aeb6af863ccdbf6c066d52446318aaf898afe7b | [
"MIT"
] | 1 | 2021-05-15T12:18:49.000Z | 2021-05-15T12:18:49.000Z | from scanner.const import os
from scanner.functions.unix.passwd_parser import PasswdParser
from scanner.transports import get_transport
from scanner.types import BaseContol, is_item_detected
class Control(BaseContol, control_number=10):
def prerequisite(self):
return is_item_detected(os.LINUX)
def check(self):
transport = get_transport('unix')
result = transport.get_file_content('/etc/passwd')
user_list = [
item.Name
for item in PasswdParser(content=result.Output)
if item.UID < 1000
if item.Name not in ['root', 'sync', 'shutdown', 'halt']
if item.Shell != '/usr/sbin/nologin'
]
if len(user_list) > 0:
self.control.not_compliance(
result=f'{len(user_list)} system accounts are not protected: {",".join(user_list)}'
)
return
self.control.compliance(
result=f'System accounts are secured'
)
| 32.096774 | 99 | 0.623116 |
a66c17131b1131e5873fce39f852c4c9ecb47d4d | 4,397 | py | Python | robot_self_balance_learning/self_balance/envs/robot_env.py | BIDISHA-CHANDA/Hacktoberfest2021-8 | b9dfe2089908b6b6e17a4a2cd2b1be09e6c6f2d8 | [
"MIT"
] | 15 | 2021-10-03T08:27:22.000Z | 2022-01-10T11:20:56.000Z | robot_self_balance_learning/self_balance/envs/robot_env.py | BIDISHA-CHANDA/Hacktoberfest2021-8 | b9dfe2089908b6b6e17a4a2cd2b1be09e6c6f2d8 | [
"MIT"
] | 3 | 2021-10-03T08:23:57.000Z | 2021-10-15T05:21:15.000Z | robot_self_balance_learning/self_balance/envs/robot_env.py | BIDISHA-CHANDA/Hacktoberfest2021-8 | b9dfe2089908b6b6e17a4a2cd2b1be09e6c6f2d8 | [
"MIT"
] | 51 | 2021-10-03T08:20:44.000Z | 2021-10-31T12:51:11.000Z | '''hello everyone! I am Pratyush and below is the code of our 2-wheeled robot environment.
To check different cases, you could change the values of Torque(by changing the values in array whose one value(randomly for bot learning) is assigned to self.tar_vel in bot_practice_action function) and checkout whether it improves stability of bot.
You could also change control from torque control to velocity control if you want '''
import os
import time
import math
import numpy as np
import gym
import random
from gym import spaces
from gym.utils import seeding
import pybullet as p
import pybullet_data
class self_balance(gym.Env):
def __init__(self):
self._observation = []
self.action_space = spaces.Discrete(13)
self.observation_space = spaces.Box(low = np.array([-math.pi, -math.pi, -5]),high = np.array([math.pi, math.pi, 5]))
self.physicsClient = p.connect(p.GUI)
self.right_action=[]
self._seed()
p.setAdditionalSearchPath(pybullet_data.getDataPath())
self.i=0
self.vt = 0
def bot_practice_action(self, action):
#random_velocity= np.random.uniform(9.9, 20.08 , 8)
#ml=random.choice([1, -1])
#self.tar_vel= ml*random_velocity[action]
#print(self._observation[0])
#if(self._observation[0]>0):
#self.tar_vel= random_velocity[action]
#if(self._observation[0]<0):
#self.tar_vel= -random_velocity[action]
self.tar_vel=[-16.3,-16,-15,-14 ,-13, -12, -11, 11 ,12 ,13 ,14, 15 ,16.3][action]
right_joint=1
left_joint=0
Max_velocity=0
p.setJointMotorControl2(bodyUniqueId=self.botId,jointIndex=left_joint,controlMode=p.TORQUE_CONTROL,force=self.tar_vel)
p.setJointMotorControl2(bodyUniqueId=self.botId,jointIndex=right_joint,controlMode=p.TORQUE_CONTROL,force=self.tar_vel)
def tell_observation(self):
cubePos, cubeOrn = p.getBasePositionAndOrientation(self.botId)
cubeEuler = p.getEulerFromQuaternion(cubeOrn)
linear, angular = p.getBaseVelocity(self.botId)
#print(cubeEuler)
return [cubeEuler[0], angular[0], self.tar_vel]
def tell_reward(self):
unused_variable, cubeOrn = p.getBasePositionAndOrientation(self.botId)
cubeEuler = p.getEulerFromQuaternion(cubeOrn)
return (math.pi/2-abs(cubeEuler[0]))
def check_done(self):
cubePos, _ = p.getBasePositionAndOrientation(self.botId)
if cubePos[2] < 0.05:
self.i+=1
if self.i >1000:
done = True
else:
done = False
else:
done = False
self.i = 0
return done or self.practice_time_interval >= 5200
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
print(seed)
return [seed]
def _step(self, action):
self.bot_practice_action(action)
p.stepSimulation()
#time.sleep(1/240)
self._observation = self.tell_observation()
reward = self.tell_reward()
done = self.check_done()
if(done==1):
self.set()
self.practice_time_interval += 1
return np.array(self._observation), reward, done, {}
def _reset(self):
self.tar_vel = 0
self.vd = 0
self.practice_time_interval = 0
p.resetSimulation()
p.setGravity(0, 0, -9.8)
#p.setTimeStep(0)
planeId = p.loadURDF("plane.urdf")
cubeStartPos = [0, 0, 0.01]
cubeStartOrientation = p.getQuaternionFromEuler([0, 0, 0])
path = os.path.abspath(os.path.dirname(__file__))
self.botId = p.loadURDF(os.path.join(path, "balancebot_simple.urdf"), cubeStartPos, cubeStartOrientation)
self._observation = self.tell_observation()
return np.array(self._observation)
def _render(self, mode='human', close=False):
pass
def set(self):
self.tar_vel = 0
self.vd = 0
self.practice_time_interval = 0
cubeStartPos = [0, 0, 0.01]
cubeStartOrientation = p.getQuaternionFromEuler([0, 0, 0])
p.resetBasePositionAndOrientation(bodyUniqueId=self.botId,posObj=cubeStartPos,ornObj=cubeStartOrientation)
| 32.813433 | 251 | 0.62861 |
0eab9f7f068d0bb8afac10416ea1a2eb4a1a4942 | 5,870 | py | Python | tests/test_original_sa_consistency.py | rwiddhic96/apotoma | 4636d6bf72673471b0c022aa4354170fc7b03ac4 | [
"MIT"
] | null | null | null | tests/test_original_sa_consistency.py | rwiddhic96/apotoma | 4636d6bf72673471b0c022aa4354170fc7b03ac4 | [
"MIT"
] | null | null | null | tests/test_original_sa_consistency.py | rwiddhic96/apotoma | 4636d6bf72673471b0c022aa4354170fc7b03ac4 | [
"MIT"
] | null | null | null | # TODO in this folder we will implement integration tests.
# Integration tests test larger workflows than unit tests and may thus take longer to implement.
# Integration tests are typically executed only when merging into main or selectively during development.
import os
import shutil
import unittest
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import load_model
from apotoma.surprise_adequacy import DSA
from apotoma.surprise_adequacy import LSA
from apotoma.surprise_adequacy import SurpriseAdequacyConfig
class TestSurpriseAdequacyConsistency(unittest.TestCase):
def setUp(self) -> None:
path = '/tmp/data/'
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
self.config = SurpriseAdequacyConfig(saved_path=path, is_classification=True, layer_names=['activation_3'],
ds_name='mnist', num_classes=10, min_var_threshold=1e-5, batch_size=128)
self.model: tf.keras.Model = load_model('./tests/assets/model_mnist_1.h5')
(self.train_data, _), (self.test_data, y_test) = mnist.load_data()
self.train_data = self.train_data.reshape(-1, 28, 28, 1)
self.test_data = self.test_data.reshape(-1, 28, 28, 1)
self.train_data = self.train_data.astype("float32")
self.train_data = (self.train_data / 255.0) - (1.0 - 0.5)
self.test_data = self.test_data.astype("float32")
self.test_data = (self.test_data / 255.0) - (1.0 - 0.5)
def test_train_ats_calculation_against_kims_implementation(self):
datasplit_train, datasplit_test = self.train_data, self.test_data
# HERE you'll calculate the ats on your code
nodes = 10
sa = DSA(self.model, datasplit_train, config=self.config)
ats, pred = sa._calculate_ats(datasplit_train)
# Here you load the values from kims implementation
kim_ats = np.load('./tests/assets/original_mnist_train_activation_3_ats.npy')
kim_pred = np.load('./tests/assets/original_mnist_train_pred.npy')
self.assertIsInstance(ats, np.ndarray)
self.assertEqual(ats.shape, (60000, nodes))
self.assertEqual(ats.dtype, np.float32)
np.testing.assert_almost_equal(ats, kim_ats, decimal=5)
self.assertIsInstance(pred, np.ndarray)
self.assertEqual(pred.shape, (60000,))
self.assertEqual(pred.dtype, np.int)
np.testing.assert_equal(pred, kim_pred)
def test_dsa_is_consistent_with_original_implementation(self):
our_dsa = DSA(model=self.model, train_data=self.train_data, config=self.config)
our_dsa.prep()
test_dsa, predictions = our_dsa.calc(self.test_data, "test", use_cache=False)
original_dsa = np.load("./tests/assets/original_dsa_scores.npy")
np.testing.assert_almost_equal(actual=test_dsa,
desired=original_dsa, decimal=2)
def test_lsa_is_consistent_with_original_implementation(self):
our_lsa = LSA(model=self.model, train_data=self.train_data, config=self.config)
# train_ats, train_pred, kde, removed_rows will be overridden in next steps
our_lsa.prep()
our_lsa.train_ats = np.load("./tests/assets/original_mnist_train_activation_3_ats.npy")
our_lsa.train_pred = np.load("./tests/assets/original_mnist_train_pred.npy")
our_lsa._load_or_create_likelyhood_estimator(use_cache=False)
from_original_test_ats = np.load("./tests/assets/mnist_test_activation_3_ats.npy")
from_original_test_pred = np.load("./tests/assets/mnist_test_pred.npy")
# Method under test
our_lsa = our_lsa._calc_lsa(from_original_test_ats, from_original_test_pred)
original_lsa = np.load("./tests/assets/original_lsa_scores.npy")
np.testing.assert_almost_equal(actual=our_lsa,
desired=original_lsa, decimal=2)
def test_lsa_kdes(self):
nodes = 10
our_lsa = LSA(model=self.model, train_data=self.train_data, config=self.config)
our_lsa.prep()
test_kdes, test_rm_rows = our_lsa._calc_kdes()
self.assertIsInstance(test_kdes, dict)
self.assertIsInstance(test_rm_rows, list)
self.assertEqual(len(test_kdes), nodes)
if len(test_rm_rows) == 0:
self.assertEqual(np.array(test_rm_rows).dtype, float)
else:
self.assertEqual(np.array(test_rm_rows).dtype, int)
def test_output_dim_reduction(self):
def original_implementation(layer_output):
# This is the original dimensionality reduction implemented by Kim et al
# (only thing we replaced is len(dataset) with layer_output.shape[0] and the array conversion)
mapper = map(lambda x: [np.mean(x[..., j]) for j in range(x.shape[-1])],
[layer_output[i] for i in range(layer_output.shape[0])])
return np.array([x for x in mapper])
layer_outputs_1 = np.zeros(shape=(100, 25, 25, 3))
expected = original_implementation(layer_outputs_1)
actual = LSA._output_dim_reduction(layer_outputs_1)
np.testing.assert_almost_equal(expected, actual)
np.random.seed(0)
shape = (100, 20, 20, 3)
layer_outputs_2 = np.random.rand(np.prod(shape)).reshape(shape)
expected = original_implementation(layer_outputs_2)
actual = LSA._output_dim_reduction(layer_outputs_2)
np.testing.assert_almost_equal(expected, actual)
shape = (100, 10, 11, 12, 13)
layer_outputs_3 = np.random.rand(np.prod(shape)).reshape(shape)
expected = original_implementation(layer_outputs_3)
actual = LSA._output_dim_reduction(layer_outputs_3)
np.testing.assert_almost_equal(expected, actual)
| 45.859375 | 117 | 0.687394 |
5d7c21c3eacdcc8b3ee71f1805e54f9e0de55fc4 | 17,977 | py | Python | src/networkx/linalg/algebraicconnectivity.py | MarletteFunding/aws-kube-codesuite | ab4e5ce45416b83bffb947ab8d234df5437f4fca | [
"Apache-2.0"
] | 184 | 2017-12-20T21:50:06.000Z | 2022-03-19T13:24:58.000Z | src/networkx/linalg/algebraicconnectivity.py | MarletteFunding/aws-kube-codesuite | ab4e5ce45416b83bffb947ab8d234df5437f4fca | [
"Apache-2.0"
] | 26 | 2020-03-24T18:07:06.000Z | 2022-03-12T00:12:27.000Z | src/networkx/linalg/algebraicconnectivity.py | MarletteFunding/aws-kube-codesuite | ab4e5ce45416b83bffb947ab8d234df5437f4fca | [
"Apache-2.0"
] | 136 | 2018-01-09T22:52:06.000Z | 2022-02-24T13:26:18.000Z | # -*- coding: utf-8 -*-
"""
Algebraic connectivity and Fiedler vectors of undirected graphs.
"""
__author__ = """ysitu <ysitu@users.noreply.github.com>"""
# Copyright (C) 2014 ysitu <ysitu@users.noreply.github.com>
# All rights reserved.
# BSD license.
from functools import partial
import networkx as nx
from networkx.utils import not_implemented_for
from networkx.utils import reverse_cuthill_mckee_ordering
from re import compile
try:
from numpy import (array, asmatrix, asarray, dot, matrix, ndarray, ones,
reshape, sqrt, zeros)
from numpy.linalg import norm, qr
from numpy.random import normal
from scipy.linalg import eigh, inv
from scipy.sparse import csc_matrix, spdiags
from scipy.sparse.linalg import eigsh, lobpcg
__all__ = ['algebraic_connectivity', 'fiedler_vector', 'spectral_ordering']
except ImportError:
__all__ = []
try:
from scipy.linalg.blas import dasum, daxpy, ddot
except ImportError:
if __all__:
# Make sure the imports succeeded.
# Use minimal replacements if BLAS is unavailable from SciPy.
dasum = partial(norm, ord=1)
ddot = dot
def daxpy(x, y, a):
y += a * x
return y
_tracemin_method = compile('^tracemin(?:_(.*))?$')
class _PCGSolver(object):
"""Preconditioned conjugate gradient method.
"""
def __init__(self, A, M):
self._A = A
self._M = M or (lambda x: x.copy())
def solve(self, B, tol):
B = asarray(B)
X = ndarray(B.shape, order='F')
for j in range(B.shape[1]):
X[:, j] = self._solve(B[:, j], tol)
return X
def _solve(self, b, tol):
A = self._A
M = self._M
tol *= dasum(b)
# Initialize.
x = zeros(b.shape)
r = b.copy()
z = M(r)
rz = ddot(r, z)
p = z.copy()
# Iterate.
while True:
Ap = A(p)
alpha = rz / ddot(p, Ap)
x = daxpy(p, x, a=alpha)
r = daxpy(Ap, r, a=-alpha)
if dasum(r) < tol:
return x
z = M(r)
beta = ddot(r, z)
beta, rz = beta / rz, beta
p = daxpy(p, z, a=beta)
class _CholeskySolver(object):
"""Cholesky factorization.
"""
def __init__(self, A):
if not self._cholesky:
raise nx.NetworkXError('Cholesky solver unavailable.')
self._chol = self._cholesky(A)
def solve(self, B):
return self._chol(B)
try:
from scikits.sparse.cholmod import cholesky
_cholesky = cholesky
except ImportError:
_cholesky = None
class _LUSolver(object):
"""LU factorization.
"""
def __init__(self, A):
if not self._splu:
raise nx.NetworkXError('LU solver unavailable.')
self._LU = self._splu(A)
def solve(self, B):
B = asarray(B)
X = ndarray(B.shape, order='F')
for j in range(B.shape[1]):
X[:, j] = self._LU.solve(B[:, j])
return X
try:
from scipy.sparse.linalg import splu
_splu = partial(splu, permc_spec='MMD_AT_PLUS_A', diag_pivot_thresh=0.,
options={'Equil': True, 'SymmetricMode': True})
except ImportError:
_splu = None
def _preprocess_graph(G, weight):
"""Compute edge weights and eliminate zero-weight edges.
"""
if G.is_directed():
H = nx.MultiGraph()
H.add_nodes_from(G)
H.add_weighted_edges_from(((u, v, e.get(weight, 1.))
for u, v, e in G.edges(data=True)
if u != v), weight=weight)
G = H
if not G.is_multigraph():
edges = ((u, v, abs(e.get(weight, 1.)))
for u, v, e in G.edges(data=True) if u != v)
else:
edges = ((u, v, sum(abs(e.get(weight, 1.)) for e in G[u][v].values()))
for u, v in G.edges() if u != v)
H = nx.Graph()
H.add_nodes_from(G)
H.add_weighted_edges_from((u, v, e) for u, v, e in edges if e != 0)
return H
def _rcm_estimate(G, nodelist):
"""Estimate the Fiedler vector using the reverse Cuthill-McKee ordering.
"""
G = G.subgraph(nodelist)
order = reverse_cuthill_mckee_ordering(G)
n = len(nodelist)
index = dict(zip(nodelist, range(n)))
x = ndarray(n, dtype=float)
for i, u in enumerate(order):
x[index[u]] = i
x -= (n - 1) / 2.
return x
def _tracemin_fiedler(L, X, normalized, tol, method):
"""Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.
"""
n = X.shape[0]
if normalized:
# Form the normalized Laplacian matrix and determine the eigenvector of
# its nullspace.
e = sqrt(L.diagonal())
D = spdiags(1. / e, [0], n, n, format='csr')
L = D * L * D
e *= 1. / norm(e, 2)
if not normalized:
def project(X):
"""Make X orthogonal to the nullspace of L.
"""
X = asarray(X)
for j in range(X.shape[1]):
X[:, j] -= X[:, j].sum() / n
else:
def project(X):
"""Make X orthogonal to the nullspace of L.
"""
X = asarray(X)
for j in range(X.shape[1]):
X[:, j] -= dot(X[:, j], e) * e
if method is None:
method = 'pcg'
if method == 'pcg':
# See comments below for the semantics of P and D.
def P(x):
x -= asarray(x * X * X.T)[0, :]
if not normalized:
x -= x.sum() / n
else:
x = daxpy(e, x, a=-ddot(x, e))
return x
solver = _PCGSolver(lambda x: P(L * P(x)), lambda x: D * x)
elif method == 'chol' or method == 'lu':
# Convert A to CSC to suppress SparseEfficiencyWarning.
A = csc_matrix(L, dtype=float, copy=True)
# Force A to be nonsingular. Since A is the Laplacian matrix of a
# connected graph, its rank deficiency is one, and thus one diagonal
# element needs to modified. Changing to infinity forces a zero in the
# corresponding element in the solution.
i = (A.indptr[1:] - A.indptr[:-1]).argmax()
A[i, i] = float('inf')
solver = (_CholeskySolver if method == 'chol' else _LUSolver)(A)
else:
raise nx.NetworkXError('unknown linear system solver.')
# Initialize.
Lnorm = abs(L).sum(axis=1).flatten().max()
project(X)
W = asmatrix(ndarray(X.shape, order='F'))
while True:
# Orthonormalize X.
X = qr(X)[0]
# Compute interation matrix H.
W[:, :] = L * X
H = X.T * W
sigma, Y = eigh(H, overwrite_a=True)
# Compute the Ritz vectors.
X *= Y
# Test for convergence exploiting the fact that L * X == W * Y.
res = dasum(W * asmatrix(Y)[:, 0] - sigma[0] * X[:, 0]) / Lnorm
if res < tol:
break
# Depending on the linear solver to be used, two mathematically
# equivalent formulations are used.
if method == 'pcg':
# Compute X = X - (P * L * P) \ (P * L * X) where
# P = I - [e X] * [e X]' is a projection onto the orthogonal
# complement of [e X].
W *= Y # L * X == W * Y
W -= (W.T * X * X.T).T
project(W)
# Compute the diagonal of P * L * P as a Jacobi preconditioner.
D = L.diagonal().astype(float)
D += 2. * (asarray(X) * asarray(W)).sum(axis=1)
D += (asarray(X) * asarray(X * (W.T * X))).sum(axis=1)
D[D < tol * Lnorm] = 1.
D = 1. / D
# Since TraceMIN is globally convergent, the relative residual can
# be loose.
X -= solver.solve(W, 0.1)
else:
# Compute X = L \ X / (X' * (L \ X)). L \ X can have an arbitrary
# projection on the nullspace of L, which will be eliminated.
W[:, :] = solver.solve(X)
project(W)
X = (inv(W.T * X) * W.T).T # Preserves Fortran storage order.
return sigma, asarray(X)
def _get_fiedler_func(method):
"""Return a function that solves the Fiedler eigenvalue problem.
"""
match = _tracemin_method.match(method)
if match:
method = match.group(1)
def find_fiedler(L, x, normalized, tol):
q = 2 if method == 'pcg' else min(4, L.shape[0] - 1)
X = asmatrix(normal(size=(q, L.shape[0]))).T
sigma, X = _tracemin_fiedler(L, X, normalized, tol, method)
return sigma[0], X[:, 0]
elif method == 'lanczos' or method == 'lobpcg':
def find_fiedler(L, x, normalized, tol):
L = csc_matrix(L, dtype=float)
n = L.shape[0]
if normalized:
D = spdiags(1. / sqrt(L.diagonal()), [0], n, n, format='csc')
L = D * L * D
if method == 'lanczos' or n < 10:
# Avoid LOBPCG when n < 10 due to
# https://github.com/scipy/scipy/issues/3592
# https://github.com/scipy/scipy/pull/3594
sigma, X = eigsh(L, 2, which='SM', tol=tol,
return_eigenvectors=True)
return sigma[1], X[:, 1]
else:
X = asarray(asmatrix(x).T)
M = spdiags(1. / L.diagonal(), [0], n, n)
Y = ones(n)
if normalized:
Y /= D.diagonal()
sigma, X = lobpcg(L, X, M=M, Y=asmatrix(Y).T, tol=tol,
maxiter=n, largest=False)
return sigma[0], X[:, 0]
else:
raise nx.NetworkXError("unknown method '%s'." % method)
return find_fiedler
@not_implemented_for('directed')
def algebraic_connectivity(G, weight='weight', normalized=False, tol=1e-8,
method='tracemin'):
"""Return the algebraic connectivity of an undirected graph.
The algebraic connectivity of a connected undirected graph is the second
smallest eigenvalue of its Laplacian matrix.
Parameters
----------
G : NetworkX graph
An undirected graph.
weight : object, optional
The data key used to determine the weight of each edge. If None, then
each edge has unit weight. Default value: None.
normalized : bool, optional
Whether the normalized Laplacian matrix is used. Default value: False.
tol : float, optional
Tolerance of relative residual in eigenvalue computation. Default
value: 1e-8.
method : string, optional
Method of eigenvalue computation. It should be one of 'tracemin'
(TraceMIN), 'lanczos' (Lanczos iteration) and 'lobpcg' (LOBPCG).
Default value: 'tracemin'.
The TraceMIN algorithm uses a linear system solver. The following
values allow specifying the solver to be used.
=============== ========================================
Value Solver
=============== ========================================
'tracemin_pcg' Preconditioned conjugate gradient method
'tracemin_chol' Cholesky factorization
'tracemin_lu' LU factorization
=============== ========================================
Returns
-------
algebraic_connectivity : float
Algebraic connectivity.
Raises
------
NetworkXNotImplemented
If G is directed.
NetworkXError
If G has less than two nodes.
Notes
-----
Edge weights are interpreted by their absolute values. For MultiGraph's,
weights of parallel edges are summed. Zero-weighted edges are ignored.
To use Cholesky factorization in the TraceMIN algorithm, the
:samp:`scikits.sparse` package must be installed.
See Also
--------
laplacian_matrix
"""
if len(G) < 2:
raise nx.NetworkXError('graph has less than two nodes.')
G = _preprocess_graph(G, weight)
if not nx.is_connected(G):
return 0.
L = nx.laplacian_matrix(G)
if L.shape[0] == 2:
return 2. * L[0, 0] if not normalized else 2.
find_fiedler = _get_fiedler_func(method)
x = None if method != 'lobpcg' else _rcm_estimate(G, G)
return find_fiedler(L, x, normalized, tol)[0]
@not_implemented_for('directed')
def fiedler_vector(G, weight='weight', normalized=False, tol=1e-8,
method='tracemin'):
"""Return the Fiedler vector of a connected undirected graph.
The Fiedler vector of a connected undirected graph is the eigenvector
corresponding to the second smallest eigenvalue of the Laplacian matrix of
of the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
weight : object, optional
The data key used to determine the weight of each edge. If None, then
each edge has unit weight. Default value: None.
normalized : bool, optional
Whether the normalized Laplacian matrix is used. Default value: False.
tol : float, optional
Tolerance of relative residual in eigenvalue computation. Default
value: 1e-8.
method : string, optional
Method of eigenvalue computation. It should be one of 'tracemin'
(TraceMIN), 'lanczos' (Lanczos iteration) and 'lobpcg' (LOBPCG).
Default value: 'tracemin'.
The TraceMIN algorithm uses a linear system solver. The following
values allow specifying the solver to be used.
=============== ========================================
Value Solver
=============== ========================================
'tracemin_pcg' Preconditioned conjugate gradient method
'tracemin_chol' Cholesky factorization
'tracemin_lu' LU factorization
=============== ========================================
Returns
-------
fiedler_vector : NumPy array of floats.
Fiedler vector.
Raises
------
NetworkXNotImplemented
If G is directed.
NetworkXError
If G has less than two nodes or is not connected.
Notes
-----
Edge weights are interpreted by their absolute values. For MultiGraph's,
weights of parallel edges are summed. Zero-weighted edges are ignored.
To use Cholesky factorization in the TraceMIN algorithm, the
:samp:`scikits.sparse` package must be installed.
See Also
--------
laplacian_matrix
"""
if len(G) < 2:
raise nx.NetworkXError('graph has less than two nodes.')
G = _preprocess_graph(G, weight)
if not nx.is_connected(G):
raise nx.NetworkXError('graph is not connected.')
if len(G) == 2:
return array([1., -1.])
find_fiedler = _get_fiedler_func(method)
L = nx.laplacian_matrix(G)
x = None if method != 'lobpcg' else _rcm_estimate(G, G)
return find_fiedler(L, x, normalized, tol)[1]
def spectral_ordering(G, weight='weight', normalized=False, tol=1e-8,
method='tracemin'):
"""Compute the spectral_ordering of a graph.
The spectral ordering of a graph is an ordering of its nodes where nodes
in the same weakly connected components appear contiguous and ordered by
their corresponding elements in the Fiedler vector of the component.
Parameters
----------
G : NetworkX graph
A graph.
weight : object, optional
The data key used to determine the weight of each edge. If None, then
each edge has unit weight. Default value: None.
normalized : bool, optional
Whether the normalized Laplacian matrix is used. Default value: False.
tol : float, optional
Tolerance of relative residual in eigenvalue computation. Default
value: 1e-8.
method : string, optional
Method of eigenvalue computation. It should be one of 'tracemin'
(TraceMIN), 'lanczos' (Lanczos iteration) and 'lobpcg' (LOBPCG).
Default value: 'tracemin'.
The TraceMIN algorithm uses a linear system solver. The following
values allow specifying the solver to be used.
=============== ========================================
Value Solver
=============== ========================================
'tracemin_pcg' Preconditioned conjugate gradient method
'tracemin_chol' Cholesky factorization
'tracemin_lu' LU factorization
=============== ========================================
Returns
-------
spectral_ordering : NumPy array of floats.
Spectral ordering of nodes.
Raises
------
NetworkXError
If G is empty.
Notes
-----
Edge weights are interpreted by their absolute values. For MultiGraph's,
weights of parallel edges are summed. Zero-weighted edges are ignored.
To use Cholesky factorization in the TraceMIN algorithm, the
:samp:`scikits.sparse` package must be installed.
See Also
--------
laplacian_matrix
"""
if len(G) == 0:
raise nx.NetworkXError('graph is empty.')
G = _preprocess_graph(G, weight)
find_fiedler = _get_fiedler_func(method)
order = []
for component in nx.connected_components(G):
size = len(component)
if size > 2:
L = nx.laplacian_matrix(G, component)
x = None if method != 'lobpcg' else _rcm_estimate(G, component)
fiedler = find_fiedler(L, x, normalized, tol)[1]
order.extend(
u for x, c, u in sorted(zip(fiedler, range(size), component)))
else:
order.extend(component)
return order
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
import scipy.sparse
except ImportError:
raise SkipTest('SciPy not available.')
| 32.332734 | 79 | 0.560271 |
3fe5e056df3ad72ba732459970e150ce86d617d0 | 3,276 | py | Python | src/commands/fleet.py | jportner/kibbe | 72d62ba0c1ca206430fc606c72d19eace6329e41 | [
"MIT"
] | 2 | 2021-08-17T15:04:38.000Z | 2021-12-28T15:41:42.000Z | src/commands/fleet.py | jportner/kibbe | 72d62ba0c1ca206430fc606c72d19eace6329e41 | [
"MIT"
] | 7 | 2021-08-24T07:53:19.000Z | 2022-01-26T20:45:24.000Z | src/commands/fleet.py | jportner/kibbe | 72d62ba0c1ca206430fc606c72d19eace6329e41 | [
"MIT"
] | 2 | 2021-07-15T16:19:44.000Z | 2022-01-19T20:25:30.000Z | import subprocess
import ipaddress
from termcolor import colored
from src.util import is_es_running, is_kibana_running
import click
import re
_RE_COMBINE_WHITESPACE = re.compile(r"\s+")
@click.command()
@click.option(
"--snapshot",
default="docker.elastic.co/beats/elastic-agent:8.0.0-SNAPSHOT",
help=(
"If you want to specify a different snapshot. Default to"
" docker.elastic.co/beats/elastic-agent:8.0.0-SNAPSHOT "
),
)
@click.option(
"--docker-ip",
help=(
"The docker host IP. Kibbe will try to autodetect it but you can overwrite it"
" with this option"
),
)
@click.option(
"--run",
is_flag=True,
help=(
"It will attempt to run fleet server locally. You should have kibana and"
" elastic search running locally already"
),
)
def fleet(run, snapshot, docker_ip):
"""
Utilities to work with the security solutions fleet server locally
"""
if run:
if not is_kibana_running():
click.echo(
colored(
"Kibana is not running. Kibana must be runnnig to run fleet via"
" this helper",
"red",
)
)
raise click.ClickException("Kibana not running")
if not is_es_running():
click.echo(
colored(
"Elasticsearch is not running. Elasticsearch must be runnnig to"
" run fleet via this helper",
"red",
)
)
raise click.ClickException("Elasticsearch not running")
docker_ip = get_docker_ip(docker_ip)
click.echo(" - Autodetected docker host ip: " + colored(docker_ip, "blue"))
docker_command = """docker run \
--restart no \
--add-host kibana:{host_ip} \
--add-host elasticsearch:{host_ip} \
--add-host fleetserver:127.0.0.1 \
-e KIBANA_HOST=http://kibana:5601 \
-e KIBANA_USERNAME=elastic \
-e KIBANA_PASSWORD=changeme \
-e ELASTICSEARCH_HOST=http://elasticsearch:9200 \
-e ELASTICSEARCH_USERNAME=elastic \
-e ELASTICSEARCH_PASSWORD=changeme \
-e FLEET_SERVER_HOST=0.0.0.0 \
-e FLEET_INSECURE=1 \
-e KIBANA_FLEET_SETUP=1 \
-e FLEET_SERVER_ENABLE=1 \
-e FLEET_SERVER_INSECURE_HTTP=1 \
-p 8220:8220 {snapshot}
""".replace(
"\n", ""
)
docker_command = docker_command.format(host_ip=docker_ip, snapshot=snapshot)
docker_command = _RE_COMBINE_WHITESPACE.sub(" ", docker_command).strip()
click.echo(" - Will run docker with:\n")
click.echo(colored(docker_command, "yellow"))
docker_command = docker_command.split(" ")
subprocess.run(docker_command)
def get_docker_ip(default_ip):
ip = default_ip if default_ip and len(default_ip > 0) else "172.17.0.1"
possible_ip = subprocess.getoutput(
"docker network inspect bridge -f '{{range .IPAM.Config}}{{.Gateway}}{{end}}'"
)
try:
ipaddress.ip_address(possible_ip)
ip = possible_ip
except ValueError:
pass
return ip
| 31.805825 | 86 | 0.583333 |
4885e74cb14b96a97af1ffdead85b338865c28c3 | 45 | py | Python | xtremettt.py | wooky/xtremettt | e8088fce6ddc8a01ce9530a6bb5eb4434328bef5 | [
"WTFPL"
] | null | null | null | xtremettt.py | wooky/xtremettt | e8088fce6ddc8a01ce9530a6bb5eb4434328bef5 | [
"WTFPL"
] | null | null | null | xtremettt.py | wooky/xtremettt | e8088fce6ddc8a01ce9530a6bb5eb4434328bef5 | [
"WTFPL"
] | null | null | null | if __name__ == "__main__":
import logic.main | 22.5 | 26 | 0.733333 |
9b821faf3bc4245c82788bb6e66dc698e6ffb725 | 3,293 | py | Python | nusex/spec/nsc.py | Jonxslays/nusex | 8ec8a628f3ba253b218d966be41e231ed449c264 | [
"BSD-3-Clause"
] | 3 | 2021-07-30T20:47:35.000Z | 2021-07-31T20:18:30.000Z | nusex/spec/nsc.py | Jonxslays/nusex | 8ec8a628f3ba253b218d966be41e231ed449c264 | [
"BSD-3-Clause"
] | 20 | 2021-10-04T21:28:45.000Z | 2021-12-13T17:12:10.000Z | nusex/spec/nsc.py | Jonxslays/nusex | 8ec8a628f3ba253b218d966be41e231ed449c264 | [
"BSD-3-Clause"
] | 2 | 2021-10-08T02:22:30.000Z | 2021-11-05T00:41:35.000Z | # Copyright (c) 2021, Ethan Henderson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from nusex import CONFIG_FILE
from nusex.errors import UnsupportedFile
SPEC_ID = b"\x99\x63"
class NSCSpecIO:
__slots__ = ("defaults",)
def __init__(self):
self.defaults = {
"profile": "default",
"last_update": "000101",
"use_wildmatch_ignore": False,
"auto_update": False,
}
def read(self):
data = self.defaults.copy()
with open(CONFIG_FILE, "rb") as f:
# Validate format.
if f.read(2) != SPEC_ID:
raise UnsupportedFile("Not a valid NSC file")
# Load profile data.
data["profile"] = f.read(24).decode().strip()
date = f.read(6).decode()
try:
int(date)
data["last_update"] = date
except ValueError:
# Some invalid or broken config.
...
# Not guaranteed from here.
attrs = ("use_wildmatch_ignore", "auto_update")
for attr in attrs:
try:
data[attr] = f.read(1) == b"\x01"
except Exception as exc:
# Most likely no more options to read, so exit.
break
return data
def write(self, data):
with open(CONFIG_FILE, "wb") as f:
# Identify format.
f.write(SPEC_ID)
# Write data.
f.write(data["profile"].ljust(24).encode())
f.write(data["last_update"].encode())
# Not guaranteed, so write a default value if not present.
f.write((b"\x00", b"\x01")[data.get("use_wildmatch_ignore", 0)])
f.write((b"\x00", b"\x01")[data.get("auto_update", 0)])
| 38.290698 | 80 | 0.633465 |
c673a898e9a307bc3ca5314df30b6501a0f05e8b | 1,897 | py | Python | client/states/playing.py | AndrewIjano/distributed-tic-tac-toe | 8ac050ce45d0ad3a0a7d06c5cea1850b8cdb9f01 | [
"MIT"
] | null | null | null | client/states/playing.py | AndrewIjano/distributed-tic-tac-toe | 8ac050ce45d0ad3a0a7d06c5cea1850b8cdb9f01 | [
"MIT"
] | null | null | null | client/states/playing.py | AndrewIjano/distributed-tic-tac-toe | 8ac050ce45d0ad3a0a7d06c5cea1850b8cdb9f01 | [
"MIT"
] | null | null | null | from client.states import waiting, logged_in
from client.states.base import State
from client.commands import Command
from client.exceptions import MoveAlreadyDone, MoveOutOfBounds
class PlayingState(State):
def __init__(self, client) -> None:
super().__init__(client)
print(f"Your turn. You are {self.client.mark.value}")
def handle_input_command(self, command, *args):
return {
Command.SEND: self._handle_send,
Command.DELAY: self._handle_delay,
Command.END: self._handle_end,
Command.SKIP: self._handle_skip,
}.get(Command(command), self._handle_default)(*args)
def _handle_send(self, row, col):
try:
self.client.board.add_move(row, col)
self.client.opponent.send_move(row, col)
self.client.board.show()
if self.client.board.is_game_ended:
print("It's a tie!" if self.client.board.is_tie else "You win!")
self.client.opponent.close_connection()
self.client.server.send_game_result(
self.client.user,
self.client.opponent.username,
is_tie=self.client.board.is_tie,
)
self.client.change_state(logged_in.LoggedInState(self.client))
else:
self.client.change_state(waiting.WaitingState(self.client))
except MoveOutOfBounds:
print(f"Move {row} {col} is out of bounds!")
except MoveAlreadyDone:
print(f"Move {row} {col} is already done!")
def _handle_delay(self):
print(f"Current measured latency:")
for delay in self.client.opponent.delays:
print(f"{delay: 7.3f}ms")
def _handle_end(self):
self.client.opponent.end_game()
self.client.change_state(logged_in.LoggedInState(self.client))
| 37.94 | 80 | 0.619399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.