text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .job_information_basic import JobInformationBasic
class JobInformation(JobInformationBasic):
"""The extended Data Lake Analytics job information properties returned when
retrieving a specific job.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar job_id: the job's unique identifier (a GUID).
:vartype job_id: str
:param name: the friendly name of the job.
:type name: str
:param type: the job type of the current job (Hive, USql, or Scope (for
internal use only)). Possible values include: 'USql', 'Hive', 'Scope'
:type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType
:ivar submitter: the user or account that submitted the job.
:vartype submitter: str
:param degree_of_parallelism: the degree of parallelism used for this job.
This must be greater than 0, if set to less than 0 it will default to 1.
Default value: 1 .
:type degree_of_parallelism: int
:param priority: the priority value for the current job. Lower numbers
have a higher priority. By default, a job has a priority of 1000. This
must be greater than 0.
:type priority: int
:ivar submit_time: the time the job was submitted to the service.
:vartype submit_time: datetime
:ivar start_time: the start time of the job.
:vartype start_time: datetime
:ivar end_time: the completion time of the job.
:vartype end_time: datetime
:ivar state: the job state. When the job is in the Ended state, refer to
Result and ErrorMessage for details. Possible values include: 'Accepted',
'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling',
'Starting', 'Paused', 'WaitingForCapacity'
:vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState
:ivar result: the result of job execution or the current result of the
running job. Possible values include: 'None', 'Succeeded', 'Cancelled',
'Failed'
:vartype result: str or
~azure.mgmt.datalake.analytics.job.models.JobResult
:ivar log_folder: the log folder path to use in the following format:
adl://<accountName>.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/.
:vartype log_folder: str
:param log_file_patterns: the list of log file name patterns to find in
the logFolder. '*' is the only matching character allowed. Example format:
jobExecution*.log or *mylog*.txt
:type log_file_patterns: list[str]
:param related: the recurring job relationship information properties.
:type related:
~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties
:param tags: the key-value pairs used to add additional metadata to the
job information. (Only for use internally with Scope job type.)
:type tags: dict[str, str]
:ivar error_message: the error message details for the job, if the job
failed.
:vartype error_message:
list[~azure.mgmt.datalake.analytics.job.models.JobErrorDetails]
:ivar state_audit_records: the job state audit records, indicating when
various operations have been performed on this job.
:vartype state_audit_records:
list[~azure.mgmt.datalake.analytics.job.models.JobStateAuditRecord]
:param properties: the job specific properties.
:type properties: ~azure.mgmt.datalake.analytics.job.models.JobProperties
"""
_validation = {
'job_id': {'readonly': True},
'name': {'required': True},
'type': {'required': True},
'submitter': {'readonly': True},
'submit_time': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'state': {'readonly': True},
'result': {'readonly': True},
'log_folder': {'readonly': True},
'error_message': {'readonly': True},
'state_audit_records': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'job_id': {'key': 'jobId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'JobType'},
'submitter': {'key': 'submitter', 'type': 'str'},
'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'},
'priority': {'key': 'priority', 'type': 'int'},
'submit_time': {'key': 'submitTime', 'type': 'iso-8601'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'JobState'},
'result': {'key': 'result', 'type': 'JobResult'},
'log_folder': {'key': 'logFolder', 'type': 'str'},
'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'},
'related': {'key': 'related', 'type': 'JobRelationshipProperties'},
'tags': {'key': 'tags', 'type': '{str}'},
'error_message': {'key': 'errorMessage', 'type': '[JobErrorDetails]'},
'state_audit_records': {'key': 'stateAuditRecords', 'type': '[JobStateAuditRecord]'},
'properties': {'key': 'properties', 'type': 'JobProperties'},
}
def __init__(self, name, type, properties, degree_of_parallelism=1, priority=None, log_file_patterns=None, related=None, tags=None):
super(JobInformation, self).__init__(name=name, type=type, degree_of_parallelism=degree_of_parallelism, priority=priority, log_file_patterns=log_file_patterns, related=related, tags=tags)
self.error_message = None
self.state_audit_records = None
self.properties = properties
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information.py
|
Python
|
mit
| 6,107
| 0.000655
|
def neighbors(node, all_nodes):
dirs = [[0, 1], [1, 0], [-1, 0], [0, -1]]
ddirs = [[1, 1], [1, -1], [-1, 1], [-1, -1]]
result = set()
# cdef bool x
for dir in dirs:
nx, ny = node[0] + dir[0], node[1] + dir[1]
try:
all_nodes[nx][ny]
except IndexError:
pass
else:
result.add((nx, ny))
for dir in ddirs:
nx, ny = node[0] + dir[0], node[1] + dir[1]
try:
all_nodes[nx][ny]
except IndexError:
pass
else:
x, y = False, False
for r in result:
if nx - 1 == r[0] and ny == r[1]:
x = True
elif nx + 1 == r[0] and ny == r[1]:
x = True
if ny - 1 == r[1] and nx == r[0]:
y = True
elif ny + 1 == r[1] and nx == r[0]:
y = True
if y and x:
result.add((nx, ny))
return result
def get_score(c, node, goal, heightmap):
score = c.score
if c.node[0] != node[0] and c.node[1] != node[1]:
score += 14
else:
score += 10
gx = abs(goal[0] - c.node[0])
gy = abs(goal[1] - c.node[1])
score += (gx + gy) * 5
penalty = heightmap[c.node[0]][c.node[1]] * 1
# print(score, "penalty:", penalty)
score -= penalty
return score
class Candidate:
def __init__(self, node, lastnode=None):
self.node = node
self.score = 0
self.visited = False
self.lastnode = lastnode
def get_path(all_nodes, node, goal, heightmap):
open_list = []
closed_list = []
path_list = []
final_list = []
start = Candidate(node, None)
current = Candidate(node, start)
count, current.count = 0, 0
while current.node != goal:
candidates = []
for n in neighbors(current.node, all_nodes):
c = Candidate(n, current)
candidates.append(c)
for c in candidates:
closed = False
for cc in closed_list:
if c.node == cc.node:
closed = True
for co in open_list:
if co.node == c.node:
closed = True
if not closed:
c.count = count
count += 1
c.score = get_score(c, current.node, goal, heightmap)
open_list.append(c)
open_list = sorted(
open_list,
key=lambda x: x.count,
reverse=False
)
if len(open_list) > 0:
# count += 1
next_c = open_list[0]
closed_list.append(next_c)
current = next_c
open_list.remove(next_c)
else:
print("Goal not found. Node {0} broke it.".format(node))
break
nextnode = current # goal
path_list = [nextnode.node]
while nextnode.node != start.node:
nextnode = nextnode.lastnode
path_list.append(nextnode.node)
for c in reversed(path_list):
final_list.append(c)
if len(final_list) > 0:
print("Pathfinding successful!")
print("Steps: {0}".format(len(final_list)))
return final_list, True
else:
print("ERROR: Pathfinding went wrong, returning to start.")
final_list = [start]
return final_list, False
|
NiclasEriksen/importANT
|
pypf.py
|
Python
|
mit
| 3,410
| 0
|
import demistomock as demisto
from CommonServerPython import BaseClient
import BitSightForSecurityPerformanceManagement as bitsight
from datetime import datetime
def test_get_companies_guid_command(mocker):
# Positive Scenario
client = bitsight.Client(base_url='https://test.com')
res = {"my_company": {"guid": "123"}, "companies": [{"name": "abc", "shortname": "abc", "guid": "123"}]}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
_, outputs, _ = bitsight.get_companies_guid_command(client)
assert outputs[0].get('guid') == '123'
def test_get_company_details_command(mocker):
inp_args = {'guid': '123'}
client = bitsight.Client(base_url='https://test.com')
res = {"name": "abc"}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
_, outputs, _ = bitsight.get_company_details_command(client, inp_args)
assert outputs.get('name') == 'abc'
def test_get_company_findings_command(mocker):
inp_args = {'guid': '123', 'first_seen': '2021-01-01', 'last_seen': '2021-01-02'}
client = bitsight.Client(base_url='https://test.com')
res = {"results": [{"severity": "severe"}]}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
_, outputs, _ = bitsight.get_company_findings_command(client, inp_args)
assert outputs[0].get('severity') == 'severe'
def test_fetch_incidents(mocker):
inp_args = {'guid': '123', 'findings_min_severity': 'severe', 'findings_grade': 'WARN',
'findings_asset_category': 'high', 'risk_vector': 'breaches,dkim'}
client = bitsight.Client(base_url='https://test.com')
mocker.patch.object(demisto, 'params', return_value=inp_args)
res = {"results": [{"severity": "severe", "first_seen": "2021-02-01", "temporary_id": "temp1"}]}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
last_run, events = bitsight.fetch_incidents(client=client,
last_run={'time': '2020-12-01T01:01:01Z'},
params=inp_args)
curr_date = datetime.now().strftime('%Y-%m-%d')
assert curr_date in last_run['time']
assert events == [{'name': 'BitSight Finding - temp1', 'occurred': '2021-02-01T00:00:00Z',
'rawJSON': '{"severity": "severe", "first_seen": "2021-02-01", "temporary_id": "temp1"}'}]
|
demisto/content
|
Packs/BitSight/Integrations/BitSightForSecurityPerformanceManagement/BitSightForSecurityPerformanceManagement_test.py
|
Python
|
mit
| 2,406
| 0.003325
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Mechanical Turk"
prefix = "mechanicalturk"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AcceptQualificationRequest = Action("AcceptQualificationRequest")
ApproveAssignment = Action("ApproveAssignment")
ApproveRejectedAssignment = Action("ApproveRejectedAssignment")
AssignQualification = Action("AssignQualification")
AssociateQualificationWithWorker = Action("AssociateQualificationWithWorker")
BlockWorker = Action("BlockWorker")
ChangeHITTypeOfHIT = Action("ChangeHITTypeOfHIT")
CreateAdditionalAssignmentsForHIT = Action("CreateAdditionalAssignmentsForHIT")
CreateHIT = Action("CreateHIT")
CreateHITType = Action("CreateHITType")
CreateHITWithHITType = Action("CreateHITWithHITType")
CreateQualificationType = Action("CreateQualificationType")
CreateWorkerBlock = Action("CreateWorkerBlock")
DeleteHIT = Action("DeleteHIT")
DeleteQualificationType = Action("DeleteQualificationType")
DeleteWorkerBlock = Action("DeleteWorkerBlock")
DisableHIT = Action("DisableHIT")
DisassociateQualificationFromWorker = Action("DisassociateQualificationFromWorker")
DisposeHIT = Action("DisposeHIT")
DisposeQualificationType = Action("DisposeQualificationType")
ExtendHIT = Action("ExtendHIT")
ForceExpireHIT = Action("ForceExpireHIT")
GetAccountBalance = Action("GetAccountBalance")
GetAssignment = Action("GetAssignment")
GetAssignmentsForHIT = Action("GetAssignmentsForHIT")
GetBlockedWorkers = Action("GetBlockedWorkers")
GetBonusPayments = Action("GetBonusPayments")
GetFileUploadURL = Action("GetFileUploadURL")
GetHIT = Action("GetHIT")
GetHITsForQualificationType = Action("GetHITsForQualificationType")
GetQualificationRequests = Action("GetQualificationRequests")
GetQualificationScore = Action("GetQualificationScore")
GetQualificationType = Action("GetQualificationType")
GetQualificationsForQualificationType = Action("GetQualificationsForQualificationType")
GetRequesterStatistic = Action("GetRequesterStatistic")
GetRequesterWorkerStatistic = Action("GetRequesterWorkerStatistic")
GetReviewResultsForHIT = Action("GetReviewResultsForHIT")
GetReviewableHITs = Action("GetReviewableHITs")
GrantBonus = Action("GrantBonus")
GrantQualification = Action("GrantQualification")
ListAssignmentsForHIT = Action("ListAssignmentsForHIT")
ListBonusPayments = Action("ListBonusPayments")
ListHITs = Action("ListHITs")
ListHITsForQualificationType = Action("ListHITsForQualificationType")
ListQualificationRequests = Action("ListQualificationRequests")
ListQualificationTypes = Action("ListQualificationTypes")
ListReviewPolicyResultsForHIT = Action("ListReviewPolicyResultsForHIT")
ListReviewableHITs = Action("ListReviewableHITs")
ListWorkerBlocks = Action("ListWorkerBlocks")
ListWorkersWithQualificationType = Action("ListWorkersWithQualificationType")
NotifyWorkers = Action("NotifyWorkers")
RegisterHITType = Action("RegisterHITType")
RejectAssignment = Action("RejectAssignment")
RejectQualificationRequest = Action("RejectQualificationRequest")
RevokeQualification = Action("RevokeQualification")
SearchHITs = Action("SearchHITs")
SearchQualificationTypes = Action("SearchQualificationTypes")
SendBonus = Action("SendBonus")
SendTestEventNotification = Action("SendTestEventNotification")
SetHITAsReviewing = Action("SetHITAsReviewing")
SetHITTypeNotification = Action("SetHITTypeNotification")
UnblockWorker = Action("UnblockWorker")
UpdateExpirationForHIT = Action("UpdateExpirationForHIT")
UpdateHITReviewStatus = Action("UpdateHITReviewStatus")
UpdateHITTypeOfHIT = Action("UpdateHITTypeOfHIT")
UpdateNotificationSettings = Action("UpdateNotificationSettings")
UpdateQualificationScore = Action("UpdateQualificationScore")
UpdateQualificationType = Action("UpdateQualificationType")
|
cloudtools/awacs
|
awacs/mechanicalturk.py
|
Python
|
bsd-2-clause
| 4,190
| 0.000716
|
# Copyright 2015 Cisco Systems, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.db.device_manager import ( # noqa
hd_models)
from networking_cisco.plugins.cisco.db.l3 import ( # noqa
ha_db)
from networking_cisco.plugins.cisco.db.l3 import ( # noqa
l3_models)
from networking_cisco.plugins.ml2.drivers.cisco.n1kv import ( # noqa
n1kv_models)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import ( # noqa
nexus_models_v2)
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import ( # noqa
ucsm_model)
def get_metadata():
return bc.model_base.BASEV2.metadata
|
Gitweijie/first_project
|
networking_cisco/db/migration/models/head.py
|
Python
|
apache-2.0
| 1,228
| 0
|
''' -- imports from python libraries -- '''
import os
import csv
import json
import ast
import time
import datetime
''' imports from installed packages '''
from django.core.management.base import BaseCommand, CommandError
from mongokit import IS
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' imports from application folders/files '''
from gnowsys_ndf.ndf.models import DATA_TYPE_CHOICES
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.models import Node
from gnowsys_ndf.ndf.models import GSystemType, AttributeType, RelationType
from gnowsys_ndf.ndf.models import GSystem, GAttribute, GRelation
from gnowsys_ndf.ndf.views.methods import create_gattribute, create_grelation, create_college_group_and_setup_data
from gnowsys_ndf.ndf.views.methods import get_student_enrollment_code
####################################################################################################################
# TODO:
# 1) Name of attributes/relation in property_order field needs to be replaced with their respective ObjectIds
# 2) regex query needs to be modified because in current situation it's not considering names with space
# - searching for terms till it finds first space
SCHEMA_ROOT = os.path.join( os.path.dirname(__file__), "schema_files" )
log_list = [] # To hold intermediate errors
log_list.append("\n######### Script run on : " + time.strftime("%c") + " #########\n############################################################\n")
is_json_file_exists = False
gsystem_type_node = None
gsystem_type_id = None
gsystem_type_name = ""
home_grp = node_collection.one({'_type': "Group", 'name': "home"})
group_id = home_grp._id
user_id = 1
mis_group = node_collection.one({
'_type': "Group",
'$or': [{
'name': {'$regex': u"MIS_admin", '$options': 'i'}
}, {
'altnames': {'$regex': u"MIS_admin", '$options': 'i'}
}],
'group_type': "PRIVATE"
}, {
'created_by': 1
})
if mis_group is not None:
group_id = mis_group._id
user_id = mis_group.created_by # User who created the above private group
college_gst = node_collection.one({
"_type": "GSystemType", "name": "College"
})
college_dict = {}
college_name_dict = {}
attr_type_dict = {}
rel_type_dict = {}
create_student_enrollment_code = False
create_private_college_group = False
node_repeated = False
class Command(BaseCommand):
help = "Based on "
def handle(self, *args, **options):
try:
for file_name in args:
file_path = os.path.join(SCHEMA_ROOT, file_name)
global gsystem_type_node
global gsystem_type_id
global gsystem_type_name
gsystem_type_node = None
gsystem_type_id = None
gsystem_type_name = ""
if os.path.exists(file_path):
gsystem_type_name = os.path.basename(file_path)
gsystem_type_name = os.path.splitext(gsystem_type_name)[0]
gsystem_type_name = gsystem_type_name.replace("_", " ")
if gsystem_type_name == u"Student":
global create_student_enrollment_code
create_student_enrollment_code = True
elif gsystem_type_name == u"College":
global create_private_college_group
create_private_college_group = True
gsystem_type_node = node_collection.one({
"_type": "GSystemType",
"$or": [{
"name": {"$regex": "^"+gsystem_type_name+"$", '$options': 'i'}
}, {
"altnames": {"$regex": "^"+gsystem_type_name+"$", '$options': 'i'}
}]
})
if gsystem_type_node:
gsystem_type_id = gsystem_type_node._id
else:
error_message = "\n GSystemTypeError: This GSystemType ("+gsystem_type_name+") doesn't exists for creating it's own GSystem !!!"
log_list.append(error_message)
raise Exception(error_message)
file_extension = os.path.splitext(file_name)[1]
if "csv" in file_extension:
# Process csv file and convert it to json format at first
total_rows = 0
info_message = "\n CSVType: Following file (" + file_path + ") found!!!"
log_list.append(info_message)
try:
csv_file_path = file_path
json_file_name = file_name.rstrip("csv") + "json"
json_file_path = os.path.join(SCHEMA_ROOT, json_file_name)
json_file_content = ""
with open(csv_file_path, 'rb') as csv_file:
csv_file_content = csv.DictReader(csv_file, delimiter=",")
json_file_content = []
for row in csv_file_content:
total_rows += 1
json_file_content.append(row)
info_message = "\n- File '" + file_name + "' contains : " + str(total_rows) + " entries/rows (excluding top-header/column-names)."
print info_message
log_list.append(str(info_message))
with open(json_file_path, 'w') as json_file:
json.dump(json_file_content,
json_file,
indent=4,
sort_keys=False)
if os.path.exists(json_file_path):
file_path = json_file_path
is_json_file_exists = True
info_message = "\n JSONType: Following file (" + json_file_path + ") created successfully.\n"
log_list.append(info_message)
except Exception as e:
error_message = "\n CSV-JSONError: " + str(e)
log_list.append(error_message)
# End of csv-json coversion
elif "json" in file_extension:
is_json_file_exists = True
else:
error_message = "\n FileTypeError: Please choose either 'csv' or 'json' format supported files!!!\n"
log_list.append(error_message)
raise Exception(error_mesage)
if is_json_file_exists:
# Process json file and create required GSystems, GRelations, and GAttributes
info_message = "\n Task initiated: Processing json-file...\n"
log_list.append(info_message)
t0 = time.time()
parse_data_create_gsystem(file_path, file_name)
t1 = time.time()
time_diff = t1 - t0
# print time_diff
total_time_minute = round( (time_diff/60), 2) if time_diff else 0
total_time_hour = round( (time_diff/(60*60)), 2) if time_diff else 0
# End of processing json file
info_message = "\n------- Task finised: Successfully processed json-file -------\n"
info_message += "- Total time taken for the processing: \n\n\t" + str(total_time_minute) + " MINUTES\n\t=== OR ===\n\t" + str(total_time_hour) + " HOURS\n"
print info_message
log_list.append(str(info_message))
# End of processing json file
else:
error_message = "\n FileNotFound: Following path (" + file_path + ") doesn't exists!!!\n"
log_list.append(error_message)
raise Exception(error_message)
except Exception as e:
error_message = str(e)
print "\n >>> >>>> >>>>>" + error_message
finally:
if log_list:
log_list.append("\n ============================================================ End of Iteration ============================================================\n")
log_file_name = gsystem_type_name + ".log"
log_file_path = os.path.join(SCHEMA_ROOT, log_file_name)
with open(log_file_path, 'a') as log_file:
log_file.writelines(log_list)
# --- End of handle() ---
# -----------------------------------------------------------------------------------------------------------------
# Function that process json data according to the structure field
# -----------------------------------------------------------------------------------------------------------------
def parse_data_create_gsystem(json_file_path, file_name):
json_file_content = ""
try:
print "\n file_name == ",file_name
with open(json_file_path) as json_file:
json_file_content = json_file.read()
json_documents_list = json.loads(json_file_content)
# Process data in proper format
node = node_collection.collection.GSystem()
node_keys = node.keys()
node_structure = node.structure
json_documents_list_spaces = json_documents_list
json_documents_list = []
# Removes leading and trailing spaces from keys as well as values
for json_document_spaces in json_documents_list_spaces:
json_document = {}
for key_spaces, value_spaces in json_document_spaces.iteritems():
json_document[key_spaces.strip().lower()] = value_spaces.strip()
json_documents_list.append(json_document)
except Exception as e:
error_message = "\n While parsing the file ("+json_file_path+") got following error...\n " + str(e)
log_list.append(error_message)
print error_message
raise error_message
for i, json_document in enumerate(json_documents_list):
try:
if file_name == "QuizItem.csv":
print "\n\n *******************"
question_content = json_document['content']
question_content = question_content.split(' ')
question_content = question_content[:4]
question_content = ' '.join(question_content)
json_document['name'] = question_content
json_document['altnames'] = json_document['content']
group_id = ObjectId(json_document['group_id'])
group_obj = node_collection.one({'_id': group_id})
if group_obj:
group_id = group_obj._id
else:
group_id = home_grp._id
user_id = int(json_document['user_id'])
print "\n\n NAME ======= ", json_document['name'], group_id, user_id
global node_repeated
node_repeated = False
n_name = ""
if "first name" in json_document:
n_name = json_document["first name"] + " "
if json_document["middle name"]:
n_name += json_document["middle name"]
if json_document["last name"]:
n_name += " "
n_name += json_document["last name"]
json_document["name"] = n_name.title()
info_message = "\n ============ #"+ str(i+1) +" : Start of "+gsystem_type_name+"'s GSystem ("+json_document['name']+") creation/updation ============\n"
log_list.append(info_message)
parsed_json_document = {}
attribute_relation_list = []
for key in json_document.iterkeys():
# print "\n key ",key
parsed_key = key.lower()
parsed_key = parsed_key.replace(" ", "_")
if parsed_key in node_keys:
if node_structure[parsed_key] == unicode:
parsed_json_document[parsed_key] = unicode(json_document[key])
elif node_structure[parsed_key] == datetime.datetime:
parsed_json_document[parsed_key] = datetime.datetime.strptime(json_document[key], "%d/%m/%Y")
else:
parsed_json_document[parsed_key] = json_document[key]
else:
parsed_json_document[key] = json_document[key]
attribute_relation_list.append(key)
info_message = "\n Creating "+gsystem_type_name+" ("+parsed_json_document["name"]+")..."
log_list.append(info_message)
print "\n HERE == "
node = create_edit_gsystem(gsystem_type_id, gsystem_type_name, parsed_json_document, user_id)
print "\n node created === ", node._id, " === ", node.name, node.altnames
# print "attribute_relation_list == ",attribute_relation_list
if node:
if not attribute_relation_list:
# Neither possible attribute fields, nor possible relations defined for this node
info_message = "\n "+gsystem_type_name+" ("+node.name+"): Neither possible attribute fields, nor possible relations defined for this node !\n"
log_list.append(info_message)
continue
gst_possible_attributes_dict = node.get_possible_attributes(gsystem_type_id)
print "\n gsystem_type_id ===",gst_possible_attributes_dict
relation_list = []
json_document['name'] = node.name
# Write code for setting atrributes
for key in attribute_relation_list:
is_relation = True
for attr_key, attr_value in gst_possible_attributes_dict.iteritems():
# print "\n\n attr_key === ", attr_key
# print "\n\n altnames -- === ", attr_value['altnames']
if attr_value['altnames'] and key == attr_value['altnames'].lower() or key == attr_key.lower():
is_relation = False
if json_document[key]:
try:
if attr_value['data_type'] == basestring:
if u"\u2013" in json_document[key]:
json_document[key] = json_document[key].replace(u"\u2013", "-")
info_message = "\n For GAttribute parsing content | key: " + attr_key + " -- " + json_document[key]
log_list.append(info_message)
if attr_value['data_type'] == unicode:
json_document[key] = unicode(json_document[key])
elif attr_value['data_type'] == bool:
if json_document[key].lower() == "yes":
json_document[key] = True
elif json_document[key].lower() == "no":
json_document[key] = False
else:
json_document[key] = None
elif attr_value['data_type'] == datetime.datetime:
# Use small-case altnames
if key in ["dob", "date of birth", "date of registration"]:
if json_document[key]:
json_document[key] = datetime.datetime.strptime(json_document[key], "%d/%m/%Y")
else:
if json_document[key]:
json_document[key] = datetime.datetime.strptime(json_document[key], "%Y")
elif attr_value['data_type'] in [int, float, long]:
if not json_document[key]:
json_document[key] = 0
else:
if attr_value['data_type'] == int:
json_document[key] = int(json_document[key])
elif attr_value['data_type'] == float:
json_document[key] = float(json_document[key])
else:
json_document[key] = long(json_document[key])
elif type(attr_value['data_type']) == IS:
for op in attr_value['data_type']._operands:
if op.lower() == json_document[key].lower():
json_document[key] = op
elif (attr_value['data_type'] in [list, dict]) or (type(attr_value['data_type']) in [list, dict]):
if "," not in json_document[key]:
# Necessary to inform perform_eval_type() that handle this value as list
json_document[key] = "\"" + json_document[key] + "\", "
else:
formatted_value = ""
for v in json_document[key].split(","):
formatted_value += "\""+v.strip(" ")+"\", "
json_document[key] = formatted_value
perform_eval_type(key, json_document, "GSystem")
subject_id = node._id
attribute_type_node = None
if attr_key in attr_type_dict:
attribute_type_node = attr_type_dict[attr_key]
else:
attribute_type_node = node_collection.one({
'_type': "AttributeType",
'$or': [{
'name': {'$regex': "^" + attr_key + "$", '$options': 'i'}
}, {
'altnames': {'$regex': "^" + attr_key + "$", '$options': 'i'}
}]
})
attr_type_dict[attr_key] = attribute_type_node
object_value = json_document[key]
ga_node = None
info_message = "\n Creating GAttribute (" + node.name + " -- " + attribute_type_node.name + " -- " + str(json_document[key]) + ") ...\n"
log_list.append(info_message)
ga_node = create_gattribute(subject_id, attribute_type_node, object_value)
except Exception as e:
error_message = "\n While creating GAttribute (" + attr_key + ") for "+gsystem_type_name+"'s GSystem ("+json_document['name']+") got following error...\n " + str(e) + "\n"
log_list.append(error_message)
print error_message # Keep it!
# To break outer for loop as key found
break
else:
error_message = "\n DataNotFound: No data found for field ("+attr_key+") while creating GSystem (" + gsystem_type_name + " -- " + node.name + ") !!!\n"
log_list.append(error_message)
if is_relation:
relation_list.append(key)
if not relation_list:
# No possible relations defined for this node
info_message = "\n "+gsystem_type_name+" ("+node.name+"): No possible relations defined for this node !!!\n"
log_list.append(info_message)
else:
gst_possible_relations_dict = node.get_possible_relations(gsystem_type_id)
# Write code for setting relations
for key in relation_list:
is_relation = True
for rel_key, rel_value in gst_possible_relations_dict.iteritems():
if key == rel_value['altnames'].lower() or key == rel_key.lower():
is_relation = False
if json_document[key]:
# Here semi-colon(';') is used instead of comma(',')
# Beacuse one of the value may contain comma(',') which causes problem in finding required value in database
try:
if ";" not in json_document[key]:
# Necessary to inform perform_eval_type() that handle this value as list
json_document[key] = "\""+json_document[key]+"\", "
else:
formatted_value = ""
for v in json_document[key].split(";"):
formatted_value += "\""+v.strip(" ")+"\", "
json_document[key] = formatted_value
info_message = "\n For GRelation parsing content | key: " + rel_key + " -- " + json_document[key]
log_list.append(info_message)
perform_eval_type(key, json_document, "GSystem", "GSystem")
# for right_subject_id in json_document[key]:
subject_id = node._id
# Here we are appending list of ObjectIds of GSystemType's type_of field
# along with the ObjectId of GSystemType's itself (whose GSystem is getting created)
# This is because some of the RelationType's are holding Base class's ObjectId
# and not that of the Derived one's
# Delibrately keeping GSystemType's ObjectId first in the list
# And hence, used $in operator in the query!
rel_subject_type = []
rel_subject_type.append(gsystem_type_id)
if gsystem_type_node.type_of:
rel_subject_type.extend(gsystem_type_node.type_of)
relation_type_node = None
if rel_key in rel_type_dict:
relation_type_node = rel_type_dict[rel_key]
else:
relation_type_node = node_collection.one({
'_type': "RelationType",
'$or': [{
'name': {'$regex': "^" + rel_key + "$", '$options': 'i'}
}, {
'altnames': {'$regex': "^" + rel_key + "$", '$options': 'i'}
}],
'subject_type': {'$in': rel_subject_type}
})
rel_type_dict[rel_key] = relation_type_node
info_message = "\n Creating GRelation ("+node.name+" -- "+rel_key+" -- "+str(json_document[key])+") ...\n"
log_list.append(info_message)
gr_node = create_grelation(subject_id, relation_type_node, json_document[key])
except Exception as e:
error_message = "\n While creating GRelation (" + rel_key + ") for "+gsystem_type_name+"'s GSystem ("+json_document['name']+") got following error...\n" + str(e) + "\n"
log_list.append(error_message)
pass
if college_gst._id in relation_type_node.object_type:
# Fetch college node's group id
# Append it to node's group_set
node_group_set = node.group_set
is_group_set_changed = False
# Iterate through each college
# Find it's corresponding group's ObjectId
# Append it to node's group_set
for each in json_document[key]:
each = ObjectId(each)
each_str = str(each)
if each_str in college_dict:
college_group_id = college_dict[each_str]
if college_group_id not in node_group_set:
node_group_set.append(college_group_id)
is_group_set_changed = True
else:
# If not found in college_dict
# Then find and update college_dict
college_node = node_collection.collection.aggregate([{
"$match": {"_id": each}
}, {
"$project": {"group_id": "$relation_set.has_group"}
}])
college_node = college_node["result"]
if college_node:
college_node = college_node[0]
college_group_id = college_node["group_id"]
if college_group_id:
college_group_id = college_group_id[0][0]
college_dict[each_str] = college_group_id
node_group_set.append(college_group_id)
is_group_set_changed = True
# Update node's group_set with updated list
# if changed
if is_group_set_changed:
node_collection.collection.update({
"_id": subject_id
}, {
"$set": {"group_set": node_group_set}
},
upsert=False, multi=False
)
# To break outer for loop if key found
break
else:
error_message = "\n DataNotFound: No data found for relation ("+rel_key+") while creating GSystem ("+gsystem_type_name+" -- "+node.name+") !!!\n"
log_list.append(error_message)
# print error_message
break
# Create enrollment code (Only for Student)
if create_student_enrollment_code and not node_repeated:
enrollment_code_at = node_collection.one({
"_type": "AttributeType", "name": "enrollment_code"
})
node_exist = node_collection.one({"_id": node._id, "attribute_set.enrollment_code": {"$exists": True}})
if not node_exist:
# It means enrollment_code is not set for given student node
# Then set it
try:
college_id = None
group_id = None
for k, v in college_dict.items():
college_id = ObjectId(k)
group_id = ObjectId(v)
student_enrollment_code = get_student_enrollment_code(college_id, node._id, json_document["date of registration"], group_id)
info_message = "\n Creating GAttribute (" + node.name + " -- " + enrollment_code_at.name + " -- " + str(student_enrollment_code) + ") ...\n"
log_list.append(info_message)
ga_node = create_gattribute(node._id, enrollment_code_at, student_enrollment_code)
except Exception as e:
error_message = "\n StudentEnrollmentCreateError: " + str(e) + "!!!"
log_list.append(error_message)
elif create_private_college_group:
# Create a private group for respective college node
node_exist = node_collection.one({"_id": node._id, "relation_set.has_group": {"$exists": True}})
if not node_exist:
try:
info_message = "\n Creating private group for given college (" + node.name + ") via RelationType (has_group)...\n"
log_list.append(info_message)
college_group, college_group_gr = create_college_group_and_setup_data(node)
except Exception as e:
error_message = "\n CollegeGroupCreateError: " + str(e) + "!!!"
log_list.append(error_message)
except Exception as e:
error_message = "\n While creating "+gsystem_type_name+"'s GSystem ("+json_document['name']+") got following error...\n " + str(e)
log_list.append(error_message)
print error_message # Keep it!
import sys
print "\n ****\n"
print 'Error on line {}'.format(sys.exc_info()[-1].tb_lineno)
def create_edit_gsystem(gsystem_type_id, gsystem_type_name, json_document, user_id):
"""Creates/Updates respective GSystem and it's related GAttribute(s)
and GRelation(s)
"""
node = None
if "(" in json_document['name'] or ")" in json_document['name']:
query = {
"_type": "GSystem",
'name': json_document['name'],
'member_of': gsystem_type_id
}
else:
query = {
"_type": "GSystem",
'$or': [{
'name': {'$regex': "^"+json_document['name']+"$", '$options': 'i'}
}, {
'altnames': {'$regex': "^"+json_document['name']+"$", '$options': 'i'}
}],
'member_of': gsystem_type_id
}
if "date of birth" in json_document:
dob = json_document["date of birth"]
if dob:
query.update({"attribute_set.dob": datetime.datetime.strptime(dob, "%d/%m/%Y")})
if "contact number (mobile)" in json_document:
mobile_number = json_document["contact number (mobile)"]
if mobile_number:
query.update({"attribute_set.mobile_number": long(mobile_number)})
if "degree name / highest degree" in json_document:
degree_name = json_document["degree name / highest degree"]
if degree_name:
query.update({"attribute_set.degree_name": degree_name})
if "year of study" in json_document:
degree_year = json_document["year of study"]
if degree_year:
query.update({"attribute_set.degree_year": degree_year})
if "college ( graduation )" in json_document:
college_name = json_document["college ( graduation )"]
if college_name not in college_name_dict:
college_node = node_collection.one({
"member_of": college_gst._id, "name": college_name
}, {
"name": 1
})
college_name_dict[college_name] = college_node
query.update({"relation_set.student_belongs_to_college": college_name_dict[college_name]._id})
info_message = "\n query for " + json_document['name'] + " : " + str(query) + "\n"
log_list.append(info_message)
if gsystem_type_name != "QuizItem":
node = node_collection.one(query)
if node is None:
try:
node = node_collection.collection.GSystem()
personal_details = []
address_details = []
details_12 = []
graduation_details = []
work_experience = []
education_details = []
tot_details = []
property_order = []
# TODO: Name of attributes/relation to be replaced with their respective ObjectIds
if gsystem_type_name in ["Student", "Voluntary Teacher"]:
personal_details = [
("first_name", "First Name"),
("middle_name", "Middle Name"),
("last_name", "Last Name"),
("gender", "Gender"),
("dob", "Date of Birth"),
("religion", "Religion"),
("languages_known", "Languages Known"),
("mobile_number", "Contact Number (Mobile)"),
("alternate_number", "Alternate Number / Landline"),
("email_id", "Email ID")
]
if gsystem_type_name in ["College", "University", "Student", "Voluntary Teacher"]:
address_details = [
("house_street", "House / Street"),
("village", "Village"),
("taluka", "Taluka"),
("town_city", "Town / City"),
("pin_code", "Pin Code")
]
if gsystem_type_name in ["Voluntary Teacher"]:
work_experience = [
("key_skills", "Key Skills"),
("profession", "Profession"),
("designation", "Profession"),
("work_exp", "Year of Experience (if Any)")
]
education_details = [
("degree_name", "Degree Name / Highest Degree"),
("degree_specialization", "Degree Specialization"),
("degree_passing_year", "Year of Passing Degree"),
("other_qualifications", "Any other Qualification")
]
tot_details = [
("trainer_of_college", "Volunteer to teach College(s) [At max. 2]"),
("trainer_of_course", "Volunteer to teach Course(s) [At max. 2]"),
("is_tot_attended", "Did you attend TOT?"),
("tot_when", "When did you attend TOT?"),
]
if gsystem_type_name in ["Student"]:
details_12 = [
("student_has_domicile", "State/Union Territory of Domicile"),
("12_passing_year", "Year of Passing XII")
]
graduation_details = [
("student_belongs_to_college", "College (Graduation)"),
("degree_name", "Degree Name / Highest Degree"),
("degree_year", "Year of Study"),
("college_enroll_num", "College Enrolment Number / Roll No"),
("student_belongs_to_university", "University"),
("is_nss_registered", "Are you registered for NSS?"),
("is_dropout_student", "Are you a dropout student?")
]
if gsystem_type_name in ["College", "University"]:
address_details.insert(4, ("organization_belongs_to_country", "Country"))
address_details.insert(4, ("organization_belongs_to_state", "State"))
address_details.insert(4, ("organization_belongs_to_district", "District"))
property_order = [
["Address", address_details]
]
if gsystem_type_name in ["University"]:
affiliated_college_details = [
("affiliated_college", "Affiliated Colleges")
]
property_order.append(["Affiliated Colleges", affiliated_college_details])
if gsystem_type_name in ["Voluntary Teacher"]:
address_details.insert(4, ("person_belongs_to_country", "Country"))
address_details.insert(4, ("person_belongs_to_state", "State"))
address_details.insert(4, ("person_belongs_to_district", "District"))
property_order = [
["Personal", personal_details],
["Address", address_details],
["Education", education_details],
["Work Experience", work_experience],
["TOT Details", tot_details],
]
if gsystem_type_name in ["Student"]:
personal_details.insert(6, ("student_of_caste_category", "Caste Category"))
address_details.insert(4, ("person_belongs_to_country", "Country"))
address_details.insert(4, ("person_belongs_to_state", "State"))
address_details.insert(4, ("person_belongs_to_district", "District"))
property_order = [
["Personal", personal_details],
["Address", address_details],
["XII", details_12],
["Graduation", graduation_details]
]
node.property_order = property_order
# Save Node first with it's basic attribute fields
for key in json_document.keys():
if node.has_key(key):
node[key] = json_document[key]
node.created_by = user_id
node.modified_by = user_id
if user_id not in node.contributors:
node.contributors.append(user_id)
node.member_of.append(gsystem_type_id)
node.group_set.append(group_id)
node.status = u"PUBLISHED"
node.save()
info_message = "\n "+gsystem_type_name+" ("+node.name+") created successfully.\n"
log_list.append(info_message)
except Exception as e:
error_message = "\n "+gsystem_type_name+"Error: Failed to create ("+json_document['name']+") as " + str(e) + "\n"
log_list.append(error_message)
raise Exception(error_message)
else:
# Code for updation
is_node_changed = False
global node_repeated
node_repeated = True
try:
for key in json_document.iterkeys():
if key in node:
if type(node[key]) == list:
if set(node[key]) != set(json_document[key]):
node[key] = json_document[key]
is_node_changed = True
elif type(node[key]) == dict:
if cmp(node[key], json_document[key]) != 0:
node[key] = json_document[key]
is_node_changed = True
else:
if node[key] != json_document[key]:
node[key] = json_document[key]
is_node_changed = True
if is_node_changed:
node.modified_by = user_id
if user_id not in node.contributors:
node.contributors.append(user_id)
node.status = u"PUBLISHED"
node.save()
info_message = "\n "+gsystem_type_name+" ("+node.name+") updated successfully.\n"
log_list.append(info_message)
else:
info_message = "\n "+gsystem_type_name+" ("+node.name+") already exists (Nothing updated) !\n"
log_list.append(info_message)
except Exception as e:
error_message = "\n "+gsystem_type_name+"Error: Failed to update ("+node.name+") as " + str(e) + "\n"
log_list.append(error_message)
raise Exception(error_message)
return node
def perform_eval_type(eval_field, json_document, type_to_create, type_convert_objectid=None):
"""Converts eval_field's data in json-type to it's corresponding python-type, and
resets eval_field with that converted data
"""
try:
json_document[eval_field] = ast.literal_eval(json_document[eval_field])
except Exception as e:
if u"\u201c" in json_document[eval_field]:
json_document[eval_field] = json_document[eval_field].replace(u"\u201c", "\"")
if u"\u201d" in json_document[eval_field]:
json_document[eval_field] = json_document[eval_field].replace(u"\u201d", "\"")
if u"\u2013" in json_document[eval_field]:
json_document[eval_field] = json_document[eval_field].replace(u"\u2013", "-")
try:
json_document[eval_field] = ast.literal_eval(json_document[eval_field])
except Exception as e:
error_message = "\n InvalidDataError: For " + type_to_create + " ("+json_document['name']+") invalid data found -- " + str(e) + "!!!\n"
log_list.append(error_message)
raise Exception(error_message)
type_list = []
for data in json_document[eval_field]:
if type_convert_objectid is None:
if eval_field == "when did you attend tot?":
type_list.append(datetime.datetime.strptime(data, "%d/%m/%Y"))
else:
type_list.append(data)
else:
if "(" in data or ")" in data:
node = node_collection.one({'_type': type_convert_objectid,
'name': data,
'group_set': group_id
},
{'_id': 1}
)
else:
node = node_collection.one({'_type': type_convert_objectid,
'$or': [{'name': {'$regex': "^"+data+"$", '$options': 'i'}},
{'altnames': {'$regex': "^"+data+"$", '$options': 'i'}}],
'group_set': group_id
},
{'_id': 1}
)
if node:
type_list.append(node._id)
else:
error_message = "\n "+type_convert_objectid+"Error ("+eval_field+"): This "+type_convert_objectid+" (" + data + ") doesn't exists for creating "+type_to_create+" (" + json_document['name'] + ") !!!\n"
log_list.append(error_message)
raise Exception(error_message)
# Sets python-type converted list
json_document[eval_field] = type_list
|
AvadootNachankar/gstudio
|
gnowsys-ndf/gnowsys_ndf/ndf/management/commands/data_entry.py
|
Python
|
agpl-3.0
| 45,442
| 0.004577
|
from flask import Flask
from flask import make_response
from flask import request
from flask import render_template
from flask import redirect
from flask import url_for
import logging
from logging.handlers import RotatingFileHandler
app = Flask(__name__)
@app.route('/')
def index():
app.logger.info('index')
username = request.cookies.get('username')
if (username == None):
return redirect(url_for('login'))
else:
return render_template('index.html', username=username)
@app.route('/login', methods=['GET','POST'])
def login():
app.logger.info('login')
if request.method == 'POST':
if validate_credentials(request.form['username'], request.form['password']):
resp = make_response(redirect(url_for('index')))
resp.set_cookie('username', request.form['username'])
return resp
else:
return render_template('login.html', error='Invalid username or password')
else:
return render_template('login.html')
@app.route('/logout')
def logout():
app.logger.info('logout')
resp = make_response(redirect(url_for('index')))
resp.set_cookie('username', '', expires=0)
return resp
def validate_credentials(username, password):
return username == password
if __name__ == '__main__':
handler = RotatingFileHandler('todo.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.run()
|
CoderDojoSG/todo
|
todo1/application.py
|
Python
|
apache-2.0
| 1,472
| 0.007473
|
class EmptyResult(object):
'''
Null Object pattern to prevent Null reference errors
when there is no result
'''
def __init__(self):
self.status = 0
self.body = ''
self.msg = ''
self.reason = ''
def __nonzero__(self):
return False
class HapiError(ValueError):
"""Any problems get thrown as HapiError exceptions with the relevant info inside"""
as_str_template = u'''
---- request ----
{method} {host}{url}, [timeout={timeout}]
---- body ----
{body}
---- headers ----
{headers}
---- result ----
{result_status}
---- body -----
{result_body}
---- headers -----
{result_headers}
---- reason ----
{result_reason}
---- trigger error ----
{error}
'''
def __init__(self, result, request, err=None):
super(HapiError,self).__init__(result and result.reason or "Unknown Reason")
if result == None:
self.result = EmptyResult()
else:
self.result = result
if request == None:
request = {}
self.request = request
self.err = err
def __str__(self):
return self.__unicode__().encode('ascii', 'replace')
def __unicode__(self):
params = {}
request_keys = ('method', 'host', 'url', 'data', 'headers', 'timeout', 'body')
result_attrs = ('status', 'reason', 'msg', 'body', 'headers')
params['error'] = self.err
for key in request_keys:
params[key] = self.request.get(key)
for attr in result_attrs:
params['result_%s' % attr] = getattr(self.result, attr, '')
params = self._dict_vals_to_unicode(params)
return self.as_str_template.format(**params)
def _dict_vals_to_unicode(self, data):
unicode_data = {}
for key, val in data.items():
if not isinstance(val, basestring):
unicode_data[key] = unicode(val)
elif not isinstance(val, unicode):
unicode_data[key] = unicode(val, 'utf8', 'ignore')
else:
unicode_data[key] = val
return unicode_data
# Create more specific error cases, to make filtering errors easier
class HapiBadRequest(HapiError):
'''Error wrapper for most 40X results and 501 results'''
class HapiNotFound(HapiError):
'''Error wrapper for 404 and 410 results'''
class HapiTimeout(HapiError):
'''Wrapper for socket timeouts, sslerror, and 504'''
class HapiUnauthorized(HapiError):
'''Wrapper for 401 Unauthorized errors'''
class HapiServerError(HapiError):
'''Wrapper for most 500 errors'''
|
jonathan-s/happy
|
happy/error.py
|
Python
|
apache-2.0
| 2,606
| 0.005372
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from bs4 import BeautifulSoup as bs
from sasila.system_normal.spider.spider_core import SpiderCore
from sasila.system_normal.pipeline.console_pipeline import ConsolePipeline
from sasila.system_normal.processor.base_processor import BaseProcessor
from sasila.system_normal.downloader.http.spider_request import Request
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf-8')
class FirstProcessor(BaseProcessor):
spider_id = 'test'
spider_name = 'test'
allowed_domains = ['mzitu.com']
start_requests = [Request(url="http://www.mzitu.com/")]
def process(self, response):
soup = bs(response.m_response.content, 'lxml')
a_list = soup.select("a")
for a in a_list:
if "href" in a.attrs:
url = response.nice_join(a["href"])
yield {'url': url}
# if __name__ == '__main__':
# spider = SpiderCore(FirstProcessor()).set_pipeline(ConsolePipeline()).start()
|
DarkSand/Sasila
|
sasila/system_normal/processor/first_processor.py
|
Python
|
apache-2.0
| 1,026
| 0.000975
|
#############################################################################
# Copyright (c) 2010 by Casey Duncan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name(s) of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AS IS AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#############################################################################
from __future__ import division
import math
import planar
from planar.util import cached_property, assert_unorderable, cos_sin_deg
class Affine(tuple):
"""Two dimensional affine transform for linear mapping from 2D coordinates
to other 2D coordinates. Parallel lines are preserved by these
transforms. Affine transforms can perform any combination of translations,
scales/flips, shears, and rotations. Class methods are provided to
conveniently compose transforms from these operations.
Internally the transform is stored as a 3x3 transformation matrix. The
transform may be constructed directly by specifying the first two rows of
matrix values as 6 floats. Since the matrix is an affine transform, the
last row is always ``(0, 0, 1)``.
:param members: 6 floats for the first two matrix rows.
:type members: float
"""
def __new__(self, *members):
if len(members) == 6:
mat3x3 = [x * 1.0 for x in members] + [0.0, 0.0, 1.0]
return tuple.__new__(Affine, mat3x3)
else:
raise TypeError(
"Expected 6 number args, got %s" % len(members))
@classmethod
def identity(cls):
"""Return the identity transform.
:rtype: Affine
"""
return identity
@classmethod
def translation(cls, offset):
"""Create a translation transform from an offset vector.
:param offset: Translation offset.
:type offset: :class:`~planar.Vec2`
:rtype: Affine
"""
ox, oy = offset
return tuple.__new__(cls,
(1.0, 0.0, ox,
0.0, 1.0, oy,
0.0, 0.0, 1.0))
@classmethod
def scale(cls, scaling):
"""Create a scaling transform from a scalar or vector.
:param scaling: The scaling factor. A scalar value will
scale in both dimensions equally. A vector scaling
value scales the dimensions independently.
:type scaling: float or :class:`~planar.Vec2`
:rtype: Affine
"""
try:
sx = sy = float(scaling)
except TypeError:
sx, sy = scaling
return tuple.__new__(cls,
(sx, 0.0, 0.0,
0.0, sy, 0.0,
0.0, 0.0, 1.0))
@classmethod
def shear(cls, x_angle=0, y_angle=0):
"""Create a shear transform along one or both axes.
:param x_angle: Angle in degrees to shear along the x-axis.
:type x_angle: float
:param y_angle: Angle in degrees to shear along the y-axis.
:type y_angle: float
:rtype: Affine
"""
sx = math.tan(math.radians(x_angle))
sy = math.tan(math.radians(y_angle))
return tuple.__new__(cls,
(1.0, sy, 0.0,
sx, 1.0, 0.0,
0.0, 0.0, 1.0))
@classmethod
def rotation(cls, angle, pivot=None):
"""Create a rotation transform at the specified angle,
optionally about the specified pivot point.
:param angle: Rotation angle in degrees
:type angle: float
:param pivot: Point to rotate about, if omitted the
rotation is about the origin.
:type pivot: :class:`~planar.Vec2`
:rtype: Affine
"""
ca, sa = cos_sin_deg(angle)
if pivot is None:
return tuple.__new__(cls,
(ca, sa, 0.0,
-sa, ca, 0.0,
0.0, 0.0, 1.0))
else:
px, py = pivot
return tuple.__new__(cls,
(ca, sa, px - px*ca + py*sa,
-sa, ca, py - px*sa - py*ca,
0.0, 0.0, 1.0))
def __str__(self):
"""Concise string representation."""
return ("|% .2f,% .2f,% .2f|\n"
"|% .2f,% .2f,% .2f|\n"
"|% .2f,% .2f,% .2f|") % self
def __repr__(self):
"""Precise string representation."""
return ("Affine(%r, %r, %r,\n"
" %r, %r, %r)") % self[:6]
@cached_property
def determinant(self):
"""The determinant of the transform matrix. This value
is equal to the area scaling factor when the transform
is applied to a shape.
"""
a, b, c, d, e, f, g, h, i = self
return a*e - b*d
@cached_property
def is_identity(self):
"""True if this transform equals the identity matrix,
within rounding limits.
"""
return self is identity or self.almost_equals(identity)
@cached_property
def is_rectilinear(self):
"""True if the transform is rectilinear, i.e., whether a shape would
remain axis-aligned, within rounding limits, after applying the
transform.
"""
a, b, c, d, e, f, g, h, i = self
return ((abs(a) < planar.EPSILON and abs(e) < planar.EPSILON)
or (abs(d) < planar.EPSILON and abs(b) < planar.EPSILON))
@cached_property
def is_conformal(self):
"""True if the transform is conformal, i.e., if angles between points
are preserved after applying the transform, within rounding limits.
This implies that the transform has no effective shear.
"""
a, b, c, d, e, f, g, h, i = self
return abs(a*b + d*e) < planar.EPSILON
@cached_property
def is_orthonormal(self):
"""True if the transform is orthonormal, which means that the
transform represents a rigid motion, which has no effective scaling or
shear. Mathematically, this means that the axis vectors of the
transform matrix are perpendicular and unit-length. Applying an
orthonormal transform to a shape always results in a congruent shape.
"""
a, b, c, d, e, f, g, h, i = self
return (self.is_conformal
and abs(1.0 - (a*a + d*d)) < planar.EPSILON
and abs(1.0 - (b*b + e*e)) < planar.EPSILON)
@cached_property
def is_degenerate(self):
"""True if this transform is degenerate, which means that it will
collapse a shape to an effective area of zero. Degenerate transforms
cannot be inverted.
"""
return abs(self.determinant) < planar.EPSILON
@property
def column_vectors(self):
"""The values of the transform as three 2D column vectors"""
a, b, c, d, e, f, _, _, _ = self
return planar.Vec2(a, d), planar.Vec2(b, e), planar.Vec2(c, f)
def almost_equals(self, other):
"""Compare transforms for approximate equality.
:param other: Transform being compared.
:type other: Affine
:return: True if absolute difference between each element
of each respective tranform matrix < ``EPSILON``.
"""
for i in (0, 1, 2, 3, 4, 5):
if abs(self[i] - other[i]) >= planar.EPSILON:
return False
return True
def __gt__(self, other):
return assert_unorderable(self, other)
__ge__ = __lt__ = __le__ = __gt__
# Override from base class. We do not support entrywise
# addition, subtraction or scalar multiplication because
# the result is not an affine transform
def __add__(self, other):
raise TypeError("Operation not supported")
__iadd__ = __add__
def __mul__(self, other):
"""Apply the transform using matrix multiplication, creating a
resulting object of the same type. A transform may be applied to
another transform, a vector, vector array, or shape.
:param other: The object to transform.
:type other: Affine, :class:`~planar.Vec2`,
:class:`~planar.Vec2Array`, :class:`~planar.Shape`
:rtype: Same as ``other``
"""
sa, sb, sc, sd, se, sf, _, _, _ = self
if isinstance(other, Affine):
oa, ob, oc, od, oe, of, _, _, _ = other
return tuple.__new__(Affine,
(sa*oa + sb*od, sa*ob + sb*oe, sa*oc + sb*of + sc,
sd*oa + se*od, sd*ob + se*oe, sd*oc + se*of + sf,
0.0, 0.0, 1.0))
elif hasattr(other, 'from_points'):
# Point/vector array
Point = planar.Point
points = getattr(other, 'points', other)
try:
return other.from_points(
Point(px*sa + py*sd + sc, px*sb + py*se + sf)
for px, py in points)
except TypeError:
return NotImplemented
else:
try:
vx, vy = other
except Exception:
return NotImplemented
return planar.Vec2(vx*sa + vy*sd + sc, vx*sb + vy*se + sf)
def __rmul__(self, other):
# We should not be called if other is an affine instance
# This is just a guarantee, since we would potentially
# return the wrong answer in that case
assert not isinstance(other, Affine)
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, Affine) or isinstance(other, planar.Vec2):
return self.__mul__(other)
else:
return NotImplemented
def itransform(self, seq):
"""Transform a sequence of points or vectors in place.
:param seq: Mutable sequence of :class:`~planar.Vec2` to be
transformed.
:returns: None, the input sequence is mutated in place.
"""
if self is not identity and self != identity:
sa, sb, sc, sd, se, sf, _, _, _ = self
Vec2 = planar.Vec2
for i, (x, y) in enumerate(seq):
seq[i] = Vec2(x*sa + y*sd + sc, x*sb + y*se + sf)
def __invert__(self):
"""Return the inverse transform.
:raises: :except:`TransformNotInvertible` if the transform
is degenerate.
"""
if self.is_degenerate:
raise planar.TransformNotInvertibleError(
"Cannot invert degenerate transform")
idet = 1.0 / self.determinant
sa, sb, sc, sd, se, sf, _, _, _ = self
ra = se * idet
rb = -sb * idet
rd = -sd * idet
re = sa * idet
return tuple.__new__(Affine,
(ra, rb, -sc*ra - sf*rb,
rd, re, -sc*rd - sf*re,
0.0, 0.0, 1.0))
__hash__ = tuple.__hash__ # hash is not inherited in Py 3
identity = Affine(1, 0, 0, 0, 1, 0)
"""The identity transform"""
# vim: ai ts=4 sts=4 et sw=4 tw=78
|
wrightjb/bolt-planar
|
transform.py
|
Python
|
bsd-3-clause
| 12,182
| 0.002545
|
# Author: Abhishek Divekar, Jan 2016. Licence: Creative Commons.
import os
import sqlite3
import datetime
def get_conn(db_file_name):
#makes a new file if it does not exist
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) #gets direcotry path in which file is stored.
db_path = os.path.join(BASE_DIR, db_file_name)
with sqlite3.connect(db_path, detect_types=sqlite3.PARSE_DECLTYPES) as conn:
#souce for "detect_types=sqlite3.PARSE_DECLTYPES" is:
#http://stackoverflow.com/questions/4272908/sqlite-date-storage-and-conversion
print "\t\tOpened connection successfully"
return conn
return None
class Table:
def __init__ (self, input_attributes, input_table):
self.table=input_table
self.attributes=input_attributes
def __len__(self):
return len(self.table)
def __getitem__(self,i):
'''
works for 2D or 3D or any-D yay!
works because if a[i][j][k], a[i] returns a tuple, for the ith row. Let, row=a[i].
Then, a[i][j][k] becomes row[j][k]. We start call the function again, to get the column entry.
'''
# print type(self)
if type(i)==int:
return self.table[i]
elif type(i)==str:
#assume that they are searching by column, i.e.
#table['col_name']
#this allows access by column and then row
ind=self.attributes.index(i)
col=[]
for row_no in range(0, len(self.table)-1):
col.append(self.table[row_no][ind])
return tuple(col)
def build_where_clause(where_params_list, where_values_list):
if where_params_list!=None and where_values_list!=None:
where_clause=" WHERE "
where_clause+=" %s='%s' "%(str(where_params_list[0]), str(where_values_list[0]))
for i in range(1,len(where_values_list)):
where_clause+=" AND %s='%s' "%(str(where_params_list[i]), str(where_values_list[i]))
else :
where_clause=""
return where_clause
def build_select_query(tablename, select_params_list, where_params_list=None, where_values_list=None):
select_query="SELECT "
select_query+=" %s"%select_params_list[0]
for i in range(1,len(select_params_list)):
select_query+=", %s"%select_params_list[i]
select_query+=" FROM %s "%tablename
select_query+=build_where_clause(where_params_list=where_params_list, where_values_list=where_values_list)
select_query+=";"
return select_query
def build_update_query(tablename, update_params_list, update_values_list, where_params_list=None, where_values_list=None):
update_query="UPDATE "+tablename+" SET "
update_query+=" %s='%s' "%(str(update_params_list[0]), str(update_values_list[0]))
for i in range(1,len(update_values_list)):
update_query+=", %s='%s' "%(str(update_params_list[i]), str(update_values_list[i]))
update_query+=build_where_clause(where_params_list=where_params_list, where_values_list=where_values_list)
update_query+=";"
return update_query
def build_insert_query(tablename, insert_params_list, tuple_values_list):
insert_query="INSERT INTO %s(" %tablename+"%s"%insert_params_list[0]
# print insert_query
for param in insert_params_list:
if insert_params_list[0]!= param:
insert_query+=", %s"%param
insert_query+=") VALUES "
#print insert_query
insert_query+="\n('%s'"%tuple_values_list[0][0]
for j in range(1,len(tuple_values_list[0])):
insert_query+=" ,'%s'"%tuple_values_list[0][j]
insert_query+=")"
for i in range(1,len(tuple_values_list)):
insert_query+=",\n('%s'"%tuple_values_list[i][0]
for j in range(1,len(tuple_values_list[i])):
insert_query+=" ,'%s'"%tuple_values_list[i][j]
insert_query+=";"
# print insert_query
return insert_query
def build_date(d, m, y):
return datetime.date(y,m,d)
def build_date2(day, month, year):
return datetime.date(year,month,day)
""" <---------------THE CORRECT WAY TO HANDLE DATES IN SQLITE3 with sqliteDefaults------------------>
#Create a random table
conn.execute('''Create table if not exists person(
ID INTEGER PRIMARY KEY,
Name TEXT,
DOB DATE
);
''')
conn.commit()
#Insert values into the table in one of the accepted formats
sqliteDefaults.insert_table_sqlite(conn,
'person',
('ID', 'Name', 'DOB'),
[
(1, 'Bob', sqliteDefaults.build_date(07,10,1999) ),
(2, 'John', sqliteDefaults.build_date(y=2005,m=8,d=21) ),
(3, 'Stacy', sqliteDefaults.build_date2(month=6,day=25,year=2003)),
(4, 'Emma', datetime.date(2001, 10, 27) )
]
)
#Source: http://stackoverflow.com/questions/4272908/sqlite-date-storage-and-conversion
table=sqliteDefaults.verified_select_sqlite(conn,"select * from person order by DOB desc;")
for row in table:
print row
#OUTPUT:
#(2, u'John', datetime.date(2005, 8, 21))
#(3, u'Stacy', datetime.date(2003, 6, 25))
#(4, u'Emma', datetime.date(2001, 10, 27))
#(1, u'Bob', datetime.date(1999, 10, 7))
print table[2][2].day
#OUTPUT:
# 27
#We can now compare the values as we do normal datetime objects: with > and <, etc
i=1; j=2;
if table[i][2]<table[j][2]:
print "%s is older than %s"%(table[i][1], table[j][1])
elif table[j][2]<table[i][2]:
print "%s is older than %s"%(table[j][1], table[i][1])
#OUTPUT:
# Emma is older than Stacy
"""
def insert_table_sqlite(conn, tablename, insert_params_list, tuple_values_list, commit=True):
insert_query= build_insert_query(tablename=tablename, insert_params_list=insert_params_list, tuple_values_list=tuple_values_list)
# print insert_query
cursor=conn.cursor()
cursor.execute(insert_query)
if commit:
conn.commit()
# database_in_use(conn)
def insert_table_sqlite2(conn, tablename, parameters_tuple=(), tuple_values_list=[], commit=True, print_query=False):
if tuple_values_list==[]:
print("\n\tSQLiteDefaults: insert_table_sqlite() ERROR: tuple_value_list cannot be empty")
return
query=""
if parameters_tuple==():
query="INSERT INTO %s VALUES " %(tablename);
else:
query="INSERT INTO %s %s VALUES" %(tablename, parameters_tuple);
#else:
#print("\n\tSQLiteDefaults: insert_table_sqlite() ERROR: parameters_tuple must be a tuple")
query=query+"(?" + (",?"*(len(parameters_tuple)-1)) + ")" #source: https://docs.python.org/2/library/sqlite3.html
if print_query:
print query
conn.executemany(query, tuple_values_list)
if commit:
conn.commit()
def verified_select_sqlite(conn, select_query, fetch="all", printing=True):
'''This function verifies that the entered query is a valid select query (to prevent SQL injection).
If it is, it executes it and gets the table object. It returns None if the table is Empty, and prints an ERROR.
If the table is non-empty, it returns the table object.'''
if 'select' in select_query.lower():
temp = select_query.strip()
if not ';' in temp:
temp+=';'
# print temp
if temp.index(';') == (len(temp)-1):
cursor=conn.cursor()
cursor.execute(temp)
attributes=[]
for i in cursor.description:
attributes.append(i[0])
result_table=()
if fetch.lower()=="all":
result_table=cursor.fetchall()
elif fetch.lower()=="one":
result_table=cursor.fetchone()
else:
if printing:
print "verified_select() ERROR: Improper value '%s' passed to argument 'fetch'"%fetch
return None
if result_table is ():
if printing:
print 'verified_select() ERROR: Empty table'
return None
return Table(input_table=result_table, input_attributes=attributes)
else:
if printing:
print 'verified_select() ERROR: Only one query can be fired at a time'
else:
if printing:
print 'verified_select() ERROR: Only select queries can be executed'
def print_table(conn, select_query):
table = verified_select_sqlite(conn, select_query, printing=False)
if table is not None:
print '\n\n----------------------------------------------------------------'
for row in table:
print '\n'
for i in range(0,len(row)):
print row[i],"\t\t",
print '\n\n----------------------------------------------------------------\n'
def list_all_tables(db_file_name):
conn=get_conn(db_file_name)
print_table(conn,"select name from sqlite_master where type = 'table';")
'''
print("\n\n<------------TEST CODE----------->\n")
def select_table_sqlite(conn, tablename, parameters_tuple=(), where_string="", order_by_string=""):
query=""
if parameters_tuple==():
query="SELECT * FROM %s"%(tablename)
elif type(parameters_tuple)=="tuple":
query="SELECT %s FROM %s"%(parameters_tuple, tablename)
else:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: parameters_tuple must be a tuple")
if where_string!="":
query=query+" WHERE "+where_string
elif where_string.find(";") != -1:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: where_string cannot have a semicolon in it (this is to prevent SQL injection)")
return
if order_by_string!="":
query=query+" ORDER BY "+order_by_string
elif order_by_string.find(";") != -1:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: order_by_string cannot have a semicolon in it (this is to prevent SQL injection)")
return
query=query+";"
table=conn.execute(query)
print type(table)
for row in table:
print type(row)
print row
print("\n<---------END OF TEST CODE-------->\n")
'''
|
ARDivekar/SearchDistribute
|
other/Legacy/sqliteDefaults.py
|
Python
|
mit
| 9,396
| 0.038314
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BusinessIdentity(Model):
"""The integration account partner's business identity.
:param qualifier: The business identity qualifier e.g. as2identity, ZZ,
ZZZ, 31, 32
:type qualifier: str
:param value: The user defined business identity value.
:type value: str
"""
_validation = {
'qualifier': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'qualifier': {'key': 'qualifier', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, qualifier, value):
self.qualifier = qualifier
self.value = value
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-logic/azure/mgmt/logic/models/business_identity.py
|
Python
|
mit
| 1,162
| 0
|
# -*- coding: utf-8 -*-
from peewee import *
import urllib
import tempfile
import os
from contextlib import contextmanager
from sshtunnel import SSHTunnelForwarder
import traceback
db = MySQLDatabase(
"cod", host="127.0.0.1", user="cod_reader", port=3308, connect_timeout=10000
)
# Get
# ssh wout@axil1.ua.ac.be -L 3307:www.crystallography.net:3306 -N &
# python -m pwiz cod -e mysql -u cod_reader -H 127.0.0.1 -p 3307
#
# mysql -ucod_reader -h 127.0.0.1 -P 3307
# SELECT DATABASE();
# USE cod;
# SHOW TABLES;
# DESCRIBE data;
class BaseModel(Model):
class Meta:
database = db
class Data(BaseModel):
rfsqd = FloatField(db_column="RFsqd", null=True)
ri = FloatField(db_column="RI", null=True)
rall = FloatField(db_column="Rall", null=True)
robs = FloatField(db_column="Robs", null=True)
rref = FloatField(db_column="Rref", null=True)
z = IntegerField(db_column="Z", index=True, null=True)
zprime = FloatField(db_column="Zprime", index=True, null=True)
a = FloatField(index=True, null=True)
acce_code = CharField(index=True, null=True)
alpha = FloatField(index=True, null=True)
authors = TextField(null=True)
b = FloatField(index=True, null=True)
beta = FloatField(index=True, null=True)
c = FloatField(index=True, null=True)
calcformula = CharField(index=True, null=True)
cellformula = CharField(null=True)
cellpressure = FloatField(null=True)
celltemp = FloatField(null=True)
chemname = CharField(index=True, null=True)
commonname = CharField(index=True, null=True)
compoundsource = CharField(null=True)
date = DateField(index=True, null=True)
diffrpressure = FloatField(null=True)
diffrtemp = FloatField(null=True)
doi = CharField(index=True, null=True)
duplicateof = IntegerField(null=True)
file = PrimaryKeyField()
firstpage = CharField(null=True)
flags = CharField(null=True)
formula = CharField(index=True, null=True)
gamma = FloatField(index=True, null=True)
gofall = FloatField(null=True)
gofgt = FloatField(null=True)
gofobs = FloatField(null=True)
issue = CharField(null=True)
journal = CharField(index=True, null=True)
lastpage = CharField(null=True)
method = CharField(index=True, null=True)
mineral = CharField(index=True, null=True)
nel = CharField(index=True, null=True)
onhold = DateField(null=True)
optimal = IntegerField(null=True)
pressurehist = CharField(null=True)
radsymbol = CharField(db_column="radSymbol", null=True)
radtype = CharField(db_column="radType", null=True)
radiation = CharField(null=True)
sg = CharField(index=True, null=True)
sghall = CharField(db_column="sgHall", index=True, null=True)
siga = FloatField(null=True)
sigalpha = FloatField(null=True)
sigb = FloatField(null=True)
sigbeta = FloatField(null=True)
sigc = FloatField(null=True)
sigcellpressure = FloatField(null=True)
sigcelltemp = FloatField(null=True)
sigdiffrpressure = FloatField(null=True)
sigdiffrtemp = FloatField(null=True)
siggamma = FloatField(null=True)
sigvol = FloatField(null=True)
status = CharField(null=True)
svnrevision = IntegerField(index=True, null=True)
text = TextField(index=True)
thermalhist = CharField(null=True)
time = TimeField(index=True, null=True)
title = TextField(null=True)
vol = FloatField(index=True, null=True)
volume = IntegerField(null=True)
wrall = FloatField(db_column="wRall", null=True)
wrobs = FloatField(db_column="wRobs", null=True)
wrref = FloatField(db_column="wRref", null=True)
wavelength = FloatField(null=True)
year = IntegerField(null=True)
class Meta:
db_table = "data"
indexes = ((("mineral", "chemname", "commonname"), False),)
def __str__(self):
ret = "{} ({})\n".format(self.mineral, self.commonname)
ret += "{} ({})\n".format(self.formula, self.chemname)
ret += "{} ({} {} {} {} {} {})\n".format(
self.sg, self.a, self.b, self.c, self.alpha, self.beta, self.gamma
)
ret += "P = {} kPa, T = {} K\n".format(self.diffrpressure, self.diffrtemp)
ret += "P = {} kPa, T = {} K\n".format(self.cellpressure, self.celltemp)
ret += "{} ({})\n".format(self.authors, self.year)
ret += "https://doi.org/{}\n".format(self.doi)
return ret
@staticmethod
def sap(p):
if p is None:
return True
a = 0.9 # atm
b = 1.1
a *= 101.325 # kPa
b *= 101.325
return p >= a and p <= b
@staticmethod
def sat(t):
if t is None:
return True
a = 15 # celcius
b = 30
a += 273.15 # kelvin
b += 273.15
return t >= a and t <= b
def satp(self):
return (
self.sap(self.diffrpressure)
and self.sap(self.cellpressure)
and self.sat(self.diffrtemp)
and self.sat(self.celltemp)
)
@property
def filename(self):
return os.path.join("{}.cif".format(self.file))
@property
def path(self):
return os.path.join(tempfile.gettempdir(), "spectrocrunch", "cif")
@property
def resourcename(self):
return os.path.join(self.path, self.filename)
@property
def url(self):
return "http://www.crystallography.net/cod/{}.cif".format(self.file)
def download(self):
filename = self.resourcename
if not os.path.isfile(filename):
path = self.path
if not os.path.exists(path):
os.makedirs(path)
ciffile = urllib.URLopener()
ciffile.retrieve(self.url, filename)
@classmethod
def namequery(cls, name):
return (
cls.select()
.where(
cls.mineral == name or cls.commonname == name or cls.chemname == name
)
.order_by(cls.year.desc())
)
@contextmanager
def codtunnel():
server = SSHTunnelForwarder(
ssh_address_or_host=("axil1.ua.ac.be", 22),
ssh_username="wout",
ssh_pkey="/users/denolf/.ssh/id_rsa",
remote_bind_address=("www.crystallography.net", 3306),
local_bind_address=("127.0.0.1", 3308),
)
try:
server.start()
yield
except:
print traceback.format_exc()
server.stop()
if __name__ == "__main__":
with codtunnel():
query = Data.namequery("copper acetate")
# for entry in query:
# print entry
for entry in query:
if entry.satp():
print entry
entry.download()
break
|
woutdenolf/spectrocrunch
|
scraps/cod.py
|
Python
|
mit
| 6,727
| 0.000743
|
from tests.base import TestBase
from pascal.program import Program
class TestVariables(TestBase):
def test_pass_valid_var(self):
file_name = "tests/mock_pas/all_var.pas"
pascal_program = Program(file_name)
pascal_program.run()
self.assertEqual(len(pascal_program.symbol_table), 7)
self.assertEqual(pascal_program.symbol_address, 23)
def test_pass_assign(self):
file_name = "tests/mock_pas/variables.pas"
pascal_program = Program(file_name)
pascal_program.run()
|
TheLampshady/pascompiler
|
tests/test_variables.py
|
Python
|
apache-2.0
| 537
| 0.001862
|
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlegend", parent_name="cone", **kwargs):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/cone/_showlegend.py
|
Python
|
mit
| 404
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Lucterios mailing documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 22 17:11:37 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Lucterios courier'
copyright = '2016, sd-libre'
author = 'sd-libre'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.2.15122316'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'fr'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'fr'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Lucteriosmailingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Lucteriosmailing.tex', 'Lucterios mailing Documentation',
'sd-libre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lucterioscore', 'Documentation Lucterios mailing',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Lucteriosmailing', 'Documentation Lucterios mailing',
author, 'Lucteriosmailing', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
Lucterios2/contacts
|
lucterios/mailing/docs/fr/conf.py
|
Python
|
gpl-3.0
| 9,330
| 0
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
from orthography import add_suffix
import unittest
class OrthographyTestCase(unittest.TestCase):
def test_add_suffix(self):
cases = (
('artistic', 'ly', 'artistically'),
('cosmetic', 'ly', 'cosmetically'),
('establish', 's', 'establishes'),
('speech', 's', 'speeches'),
('approach', 's', 'approaches'),
('beach', 's', 'beaches'),
('arch', 's', 'arches'),
('larch', 's', 'larches'),
('march', 's', 'marches'),
('search', 's', 'searches'),
('starch', 's', 'starches'),
('stomach', 's', 'stomachs'),
('monarch', 's', 'monarchs'),
('patriarch', 's', 'patriarchs'),
('oligarch', 's', 'oligarchs'),
('cherry', 's', 'cherries'),
('day', 's', 'days'),
('penny', 's', 'pennies'),
('pharmacy', 'ist', 'pharmacist'),
('melody', 'ist', 'melodist'),
('pacify', 'ist', 'pacifist'),
('geology', 'ist', 'geologist'),
('metallurgy', 'ist', 'metallurgist'),
('anarchy', 'ist', 'anarchist'),
('monopoly', 'ist', 'monopolist'),
('alchemy', 'ist', 'alchemist'),
('botany', 'ist', 'botanist'),
('therapy', 'ist', 'therapist'),
('theory', 'ist', 'theorist'),
('psychiatry', 'ist', 'psychiatrist'),
('lobby', 'ist', 'lobbyist'),
('hobby', 'ist', 'hobbyist'),
('copy', 'ist', 'copyist'),
('beauty', 'ful', 'beautiful'),
('weary', 'ness', 'weariness'),
('weary', 'some', 'wearisome'),
('lonely', 'ness', 'loneliness'),
('narrate', 'ing', 'narrating'),
('narrate', 'or', 'narrator'),
('generalize', 'ability', 'generalizability'),
('reproduce', 'able', 'reproducible'),
('grade', 'ations', 'gradations'),
('urine', 'ary', 'urinary'),
('achieve', 'able', 'achievable'),
('polarize', 'ation', 'polarization'),
('done', 'or', 'donor'),
('analyze', 'ed', 'analyzed'),
('narrate', 'ing', 'narrating'),
('believe', 'able', 'believable'),
('animate', 'ors', 'animators'),
('discontinue', 'ation', 'discontinuation'),
('innovate', 'ive', 'innovative'),
('future', 'ists', 'futurists'),
('illustrate', 'or', 'illustrator'),
('emerge', 'ent', 'emergent'),
('equip', 'ed', 'equipped'),
('defer', 'ed', 'deferred'),
('defer', 'er', 'deferrer'),
('defer', 'ing', 'deferring'),
('pigment', 'ed', 'pigmented'),
('refer', 'ed', 'referred'),
('fix', 'ed', 'fixed'),
('alter', 'ed', 'altered'),
('interpret', 'ing', 'interpreting'),
('wonder', 'ing', 'wondering'),
('target', 'ing', 'targeting'),
('limit', 'er', 'limiter'),
('maneuver', 'ing', 'maneuvering'),
('monitor', 'ing', 'monitoring'),
('color', 'ing', 'coloring'),
('inhibit', 'ing', 'inhibiting'),
('master', 'ed', 'mastered'),
('target', 'ing', 'targeting'),
('fix', 'ed', 'fixed'),
('scrap', 'y', 'scrappy'),
('trip', 's', 'trips'),
('equip', 's', 'equips'),
('bat', 'en', 'batten'),
('smite', 'en', 'smitten'),
('got', 'en', 'gotten'),
('bite', 'en', 'bitten'),
('write', 'en', 'written'),
('flax', 'en', 'flaxen'),
('wax', 'en', 'waxen'),
('fast', 'est', 'fastest'),
('white', 'er', 'whiter'),
('crap', 'y', 'crappy'),
('lad', 'er', 'ladder'),
)
failed = []
for word, suffix, expected in cases:
if add_suffix(word, suffix) != expected:
failed.append((word, suffix, expected))
for word, suffix, expected in failed:
print 'add_suffix(%s, %s) is %s not %s' % (word, suffix, add_suffix(word, suffix),expected)
self.assertEqual(len(failed), 0)
if __name__ == '__main__':
unittest.main()
|
blockbomb/plover
|
plover/test_orthography.py
|
Python
|
gpl-2.0
| 4,487
| 0.002452
|
'''
chg1: first change to multi-person pose estimation
'''
from __future__ import print_function, absolute_import
import argparse
import time
import matplotlib.pyplot as plt
import os
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pose import Bar
from pose.utils.logger import Logger
from pose.utils.evaluation import accuracy, AverageMeter, final_preds
from pose.utils.misc import save_checkpoint, save_pred, LRDecay
from pose.utils.osutils import mkdir_p, isfile, isdir, join
from pose.utils.imutils import batch_with_heatmap
from pose.utils.transforms import fliplr, flip_back
import pose.models as models
import pose.datasets as datasets
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
# to calaulate acc
idx = [1,2,3,4,5,6,11,12,15,16]
best_acc = 0
def main(args):
global best_acc
# create checkpoint dir
if not isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# create model
print("==> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](num_classes=16)
# multi-GPU
model = torch.nn.DataParallel(model).cuda()
# the total number of parameters
print(' Total params size: %.2fM' % (sum(para.numel() for para in model.parameters())/1000000.0))
# define criterion and optimizer
criterion = torch.nn.MSELoss(size_average=True).cuda()
optimizer = torch.optim.RMSprop(model.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay)
# optionally resume from a checkpoint
# --------
title = 'mpii-' + args.arch
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
logger = Logger(join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# --------
else:
# open the log file
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
# set names of log file
logger.set_names(['train-loss', 'val-loss', 'val-acc'])
# using the fastest algorithm
cudnn.benchmark = True
# Data loading code
train_loader = torch.utils.data.DataLoader(
dataset = datasets.Mpii('data/mpii/mpii_annotations.json', args.dataPath),
batch_size = args.train_batch,
shuffle = True,
num_workers = args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(
dataset = datasets.Mpii('data/mpii/mpii_annotations.json', args.dataPath, train=False),
batch_size = args.test_batch,
shuffle = False,
num_workers = args.workers,
pin_memory=True)
if args.evaluate:
print('\nEvaluation only')
loss, acc, predictions = validate(val_loader, model, criterion, args.debug, args.flip)
save_pred(predictions, checkpoint=args.checkpoint)
return
for epoch in range(args.start_epoch, args.Epochs):
# lr decay
lr = LRDecay(optimizer, epoch, args.lr)
print('\nEpoch: %d | lr: %.8f' % (epoch, lr))
# train for one epoch
train_loss = train(train_loader, model, criterion, optimizer, epoch - 1, args.debug)
# evaluate on validation set
valid_loss, valid_acc, predictions = validate(val_loader, model, criterion, args.debug, args.flip)
# append logger file
logger.append([train_loss, valid_loss, valid_acc])
# remember best acc and save checkpoint
is_best = valid_acc > best_acc
best_acc = max(valid_acc, best_acc)
save_checkpoint({
'epoch': epoch,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, predictions, is_best, checkpoint = args.checkpoint)
logger.close()
logger.plot()
plt.savefig(os.path.join(args.checkpoint, 'log.eps'))
def train(train_loader, model, criterion, optimizer, epoch, debug=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
# switch to train mode
model.train()
end = time.time()
gt_win, pred_win = None, None
bar = Bar('Processing', max=len(train_loader))
print("the length of train_loader: {}".format(len(train_loader)))
for i, (inputs, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
inputs = inputs.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(inputs)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
# Calculate intermediate loss
loss = criterion(output[0], target_var)
for j in range(1, len(output)):
loss += criterion(output[j], target_var)
if debug: # visualize groundtruth and predictions
gt_batch_img = batch_with_heatmap(inputs, target)
pred_batch_img = batch_with_heatmap(inputs, output[-1].data)
if not gt_win or not pred_win:
ax1 = plt.subplot(121)
ax1.title.set_text('Groundtruth')
gt_win = plt.imshow(gt_batch_img)
ax2 = plt.subplot(122)
ax2.title.set_text('Prediction')
pred_win = plt.imshow(pred_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
losses.update(loss.data[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f}'.format(
batch=i + 1,
size=len(train_loader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
)
bar.next()
bar.finish()
return losses.avg
def validate(val_loader, model, criterion, debug=False, flip=True):
batch_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
# predictions
predictions = torch.Tensor(val_loader.dataset.__len__(), 16, 2)
# switch to evaluate mode
model.eval()
gt_win, pred_win = None, None
end = time.time()
bar = Bar('Processing', max=len(val_loader))
print("length of output:{}".format(len(val_loader)))
for i, (inputs, target, meta) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(inputs.cuda(), volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
# score_map: 16*64*64
score_map = output[-1].data.cpu()
if flip:
flip_input_var = torch.autograd.Variable(
torch.from_numpy(fliplr(inputs.clone().numpy())).float().cuda(),
volatile=True
)
flip_output_var = model(flip_input_var)
flip_output = flip_back(flip_output_var[-1].data.cpu())
score_map += flip_output
#print("scor")
loss = 0
for o in output:
loss += criterion(o, target_var)
# target : 16*64*64
acc = accuracy(score_map.cuda(), target, idx)
# generate predictions
preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
for n in range(score_map.size(0)):
predictions[meta['index'][n], :, :] = preds[n, :, :]
if debug:
gt_batch_img = batch_with_heatmap(inputs, target)
pred_batch_img = batch_with_heatmap(inputs, score_map)
if not gt_win or not pred_win:
plt.subplot(121)
gt_win = plt.imshow(gt_batch_img)
plt.subplot(122)
pred_win = plt.imshow(pred_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
losses.update(loss.data[0], inputs.size(0))
acces.update(acc[0], inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .9f}'.format(
batch=i + 1,
size=len(val_loader),
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
acc=acces.avg
)
bar.next()
bar.finish()
return losses.avg, acces.avg, predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='hg_pytorch training')
## General options
parser.add_argument('-dataPath', default = '/home/guoqiang/hg_train/data/mpii/images/',
help = 'the path to images data')
## Model options
parser.add_argument('-arch', default = 'hg4', metavar = 'ARCH', choices = model_names,
help = 'model architecture: '+' | '.join(model_names)+' (default: resnet18)')
parser.add_argument('-j', '--workers', default = 1, type = int, metavar = 'N',
help = 'number of data loading workers (default: 4)')
parser.add_argument('--Epochs', default = 50, type = int, metavar='EPOCH',
help = 'number of total Epochs to run')
parser.add_argument('--start-epoch', default = 1, type = int,
help = 'manual epoch number (useful for continue)')
parser.add_argument('--train-batch', default = 6, type = int,
help = 'train batchsize')
parser.add_argument('--test-batch', default = 6, type = int,
help = 'test batchsize')
parser.add_argument('--lr', default = 2.5e-4, type = float,
help = 'initial learning rate')
parser.add_argument('--momentum', default = 0, type = float,
help = 'momentum')
parser.add_argument('--weight-decay', '--wd', default = 0, type = float,
help = 'weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default = 10, type = int,
help = 'print frequency (default: 10)')
parser.add_argument('-c', '--checkpoint', default = 'checkpoint', type = str, metavar='PATH',
help = 'path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default = '', type = str, metavar='PATH',
help = 'path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest = 'evaluate', action = 'store_true',
help = 'evaluate model on validation set')
parser.add_argument('-d', '--debug', dest = 'debug', action = 'store_true',
help = 'show intermediate results')
parser.add_argument('-f', '--flip', dest = 'flip', action = 'store_true',
help = 'flip the input during validation')
main(parser.parse_args())
|
weigq/pytorch-pose
|
example/main.py
|
Python
|
gpl-3.0
| 12,764
| 0.014964
|
#!/usr/bin/env python
from ConfigParser import ConfigParser
from ordereddict import OrderedDict
import sys
def make_parser():
parser = ConfigParser(dict_type=OrderedDict)
parser.optionxform = str
return parser
def transform(sectionName):
sectionName = sectionName.replace(",Dialog=", ", Dialog=")
if sectionName.startswith("View="):
if sectionName.endswith("Viewer"):
return "Type=Viewer, " + sectionName.split(", ")[0]
else:
parts = sectionName.split(",")
parts.reverse()
if len(parts) == 1:
parts.insert(0, "Type=View")
return ", ".join(parts)
else:
return sectionName
if __name__ == "__main__":
fileName = sys.argv[1]
parser = make_parser()
parser.read([ fileName ])
newParser = make_parser()
for section in parser.sections():
newSection = transform(section)
newParser.add_section(newSection)
for option, value in parser.items(section):
newParser.set(newSection, option, value)
newParser.write(open(fileName + ".tmp", "w"))
|
emilybache/texttest-runner
|
src/main/python/storytext/bin/migrate_uimap.py
|
Python
|
mit
| 1,119
| 0.004468
|
# -*- coding: utf-8 -*-
"""
"Sandbox" module for exploring API useful for digital labbooks.
Examples
--------
>>> from chempy.units import to_unitless, default_units as u
>>> s1 = Solution(0.1*u.dm3, {'CH3OH': 0.1 * u.molar})
>>> s2 = Solution(0.3*u.dm3, {'CH3OH': 0.4 * u.molar, 'Na+': 2e-3*u.molar, 'Cl-': 2e-3*u.molar})
>>> s3 = s1 + s2
>>> abs(to_unitless(s3.volume - 4e-4 * u.m**3, u.dm3)) < 1e-15
True
>>> s3.concentrations.isclose({'CH3OH': 0.325*u.molar, 'Na+': 1.5e-3*u.molar, 'Cl-': 1.5e-3*u.molar})
True
>>> s4 = s3.dissolve({'CH3OH': 1*u.gram})
>>> abs(s4.concentrations['CH3OH'] - (0.325 + 1/(12.011 + 4*1.008 + 15.999)/.4)*u.molar) < 1e-4
True
"""
import copy
from .chemistry import Substance
from .units import (
get_derived_unit,
html_of_unit,
is_unitless,
SI_base_registry,
to_unitless,
rescale,
default_units as u,
)
from .util.arithmeticdict import ArithmeticDict, _imul, _itruediv
from .printing import as_per_substance_html_table
class QuantityDict(ArithmeticDict):
def __init__(self, units, *args, **kwargs):
self.units = units
super(QuantityDict, self).__init__(lambda: 0 * self.units, *args, **kwargs)
self._check()
@classmethod
def of_quantity(cls, quantity_name, *args, **kwargs):
instance = cls(
get_derived_unit(SI_base_registry, quantity_name), *args, **kwargs
)
instance.quantity_name = quantity_name
return instance
def rescale(self, new_units):
return self.__class__(
new_units, {k: rescale(v, new_units) for k, v in self.items()}
)
def _repr_html_(self):
if hasattr(self, "quantity_name"):
header = self.quantity_name.capitalize() + " / "
else:
header = ""
header += html_of_unit(self.units)
tab = as_per_substance_html_table(to_unitless(self, self.units), header=header)
return tab._repr_html_()
def _check(self):
for k, v in self.items():
if not is_unitless(v / self.units):
raise ValueError(
"entry for %s (%s) is not compatible with %s" % (k, v, self.units)
)
def __setitem__(self, key, value):
if not is_unitless(value / self.units):
raise ValueError(
"entry for %s (%s) is not compatible with %s" % (key, value, self.units)
)
super(QuantityDict, self).__setitem__(key, value)
def copy(self):
return self.__class__(self.units, copy.deepcopy(list(self.items())))
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, repr(self.units), dict(self)
)
def __mul__(self, other):
d = dict(copy.deepcopy(list(self.items())))
_imul(d, other)
return self.__class__(self.units * getattr(other, "units", 1), d)
def __truediv__(self, other):
d = dict(copy.deepcopy(list(self.items())))
_itruediv(d, other)
return self.__class__(self.units / getattr(other, "units", 1), d)
def __floordiv__(self, other):
a = self.copy()
if getattr(other, "units", 1) != 1:
raise ValueError("Floor division with quantities not defined")
a //= other
return a
def __rtruediv__(self, other):
""" other / self """
return self.__class__(
getattr(other, "units", 1) / self.units,
{k: other / v for k, v in self.items()},
)
def __rfloordiv__(self, other):
""" other // self """
return self.__class__(
getattr(other, "units", 1) / self.units,
{k: other // v for k, v in self.items()},
)
class AutoRegisteringSubstanceDict(object):
def __init__(self, factory=Substance.from_formula):
self.factory = factory
self._store = {}
def __getitem__(self, key):
if key not in self._store:
self._store[key] = self.factory(key)
return self._store[key]
class Solution(object):
def __init__(self, volume, concentrations, substances=None, solvent=None):
if not is_unitless(volume / u.dm3):
raise ValueError("volume need to have a unit (e.g. dm3)")
self.volume = volume
self.concentrations = QuantityDict(u.molar, concentrations)
if substances is None:
substances = AutoRegisteringSubstanceDict()
self.substances = substances
self.solvent = solvent
def __eq__(self, other):
if not isinstance(other, Solution):
return NotImplemented
return all(
[
getattr(self, k) == getattr(other, k)
for k in "volume concentrations substances solvent".split()
]
)
def __add__(self, other):
if self.solvent != other.solvent:
raise NotImplementedError(
"Mixed solvent should be represented as concentrations"
)
tot_amount = (
self.concentrations * self.volume + other.concentrations * other.volume
)
tot_vol = self.volume + other.volume
return Solution(tot_vol, tot_amount / tot_vol, self.substances, self.solvent)
def dissolve(self, masses):
contrib = QuantityDict(
u.molar,
{
k: v / self.substances[k].molar_mass() / self.volume
for k, v in masses.items()
},
)
return Solution(
self.volume, self.concentrations + contrib, self.substances, self.solvent
)
def withdraw(self, volume):
if volume > self.volume:
raise ValueError(
"Cannot withdraw a volume greater than the solution volume"
)
if volume < volume * 0:
raise ValueError("Cannot withdraw a negative volume")
self.volume -= volume
return Solution(volume, self.concentrations, self.substances, self.solvent)
|
bjodah/aqchem
|
chempy/_solution.py
|
Python
|
bsd-2-clause
| 5,994
| 0.001835
|
from django import forms
from django.contrib.auth.models import User
from django.forms.models import ModelForm
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import UserProfile
class UserForm(ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
#email = forms.EmailField(max_length=100, required=False)
class Meta:
model = User
#fields = ('username', 'email', 'password')
## I really don't need your email and you're safer not sharing it with me
fields = ('username', 'password')
helper = FormHelper()
helper.form_method = 'POST'
helper.add_input(Submit('post', 'post', css_class='btn-primary'))
class LoginForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'password')
class UserProfileForm(ModelForm):
class Meta:
model = UserProfile
fields = ('website', 'picture')
|
kburts/django-playlist
|
django_playlist/auth/forms.py
|
Python
|
mit
| 954
| 0.008386
|
# This file is part of Rubber and thus covered by the GPL
# (c) Sebastian Reichel, 2012
"""
Dependency analysis for package 'ltxtable' in Rubber.
"""
def setup (document, context):
global doc
doc = document
doc.hook_macro('LTXtable', 'aa', hook_ltxtable)
def hook_ltxtable (loc, width, file):
# If the file name looks like it contains a control sequence or a macro
# argument, forget about this \LTXtable.
if file.find('\\') < 0 and file.find('#') < 0:
doc.add_source(file)
|
sre/rubber
|
src/latex_modules/ltxtable.py
|
Python
|
gpl-2.0
| 484
| 0.022727
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class Dimension(dict):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Name':
self._name = value
elif name == 'Value':
if self._name in self:
self[self._name].append(value)
else:
self[self._name] = [value]
else:
setattr(self, name, value)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/boto/ec2/cloudwatch/dimension.py
|
Python
|
agpl-3.0
| 1,532
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_sas_logical_jbod_attachment_facts
short_description: Retrieve facts about one or more of the OneView SAS Logical JBOD Attachments.
version_added: "2.3"
description:
- Retrieve facts about one or more of the SAS Logical JBOD Attachments from OneView.
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 3.0"
author: "Abilio Parada (@abiliogp)"
options:
name:
description:
- Name of SAS Logical JBOD Attachment.
required: false
notes:
- This resource is only available on HPE Synergy
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all SAS Logical JBOD Attachment
oneview_sas_logical_jbod_attachment_facts:
config: "{{ config_path }}"
- debug: var=sas_logical_jbod_attachments
- name: Gather paginated, filtered and sorted facts about SAS Logical JBOD Attachment
oneview_sas_logical_jbod_attachment_facts:
config: "{{ config }}"
params:
start: 0
count: 2
sort: 'name:descending'
filter: "state=Deployed"
- debug: var=sas_logical_jbod_attachments
- name: Gather facts about a SAS Logical JBOD Attachment by name
oneview_sas_logical_jbod_attachment_facts:
config: "{{ config_path }}"
name: "logical-enclosure-SAS-Logical-Interconnect-Group-BDD-1-SLJA-1"
- debug: var=sas_logical_jbod_attachments
'''
RETURN = '''
sas_logical_jbod_attachments:
description: Has all the OneView facts about the SAS Logical JBOD Attachment.
returned: Always, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class SasLogicalJbodAttachmentFactsModule(OneViewModuleBase):
def __init__(self):
argument_spec = dict(
name=dict(required=False, type='str'),
params=dict(required=False, type='dict'),
)
super(SasLogicalJbodAttachmentFactsModule, self).__init__(additional_arg_spec=argument_spec)
def execute_module(self):
if self.module.params['name']:
name = self.module.params['name']
resources = self.oneview_client.sas_logical_jbod_attachments.get_by('name', name)
else:
resources = self.oneview_client.sas_logical_jbod_attachments.get_all(**self.facts_params)
return dict(changed=False,
ansible_facts=dict(sas_logical_jbod_attachments=resources))
def main():
SasLogicalJbodAttachmentFactsModule().run()
if __name__ == '__main__':
main()
|
HewlettPackard/oneview-ansible
|
library/oneview_sas_logical_jbod_attachment_facts.py
|
Python
|
apache-2.0
| 3,330
| 0.002402
|
"""
pynipap - a Python NIPAP client library
=======================================
pynipap is a Python client library for the NIPAP IP address planning
system. It is structured as a simple ORM.
There are three ORM-classes:
* :class:`VRF`
* :class:`Pool`
* :class:`Prefix`
Each of these maps to the NIPAP objects with the same name. See the main
NIPAP API documentation for an overview of the different object types and
what they are used for.
There are also a few supporting classes:
* :class:`AuthOptions` - Authentication options.
And a bunch of exceptions:
* :class:`NipapError`
* :class:`NipapNonExistentError`
* :class:`NipapInputError`
* :class:`NipapMissingInputError`
* :class:`NipapExtraneousInputError`
* :class:`NipapNoSuchOperatorError`
* :class:`NipapValueError`
* :class:`NipapDuplicateError`
* :class:`NipapAuthError`
* :class:`NipapAuthenticationError`
* :class:`NipapAuthorizationError`
General usage
-------------
pynipap has been designed to be simple to use.
Preparations
^^^^^^^^^^^^
Make sure that pynipap is accessible in your `sys.path`, you can test it by
starting a python shell and running::
import pynipap
If that works, you are good to go!
To simplify your code slightly, you can import the individual classes into
your main namespace::
import pynipap
from pynipap import VRF, Pool, Prefix
Before you can access NIPAP you need to specify the URL to the NIPAP
XML-RPC service and the authentication options to use for your connection.
NIPAP has a authentication system which is somewhat involved, see the main
NIPAP documentation.
The URL, including the user credentials, is set in the pynipap module
variable `xmlrpc_uri` as so::
pynipap.xmlrpc_uri = "http://user:pass@127.0.0.1:9002"
The minimum authentication options which we need to set is the
`authoritative_source` option, which specifies what system is accessing
NIPAP. This is logged for each query which alters the NIPAP database and
attached to each prefix which is created or edited. Well-behaved clients
are required to honor this and verify that the user really want to alter
the prefix, when trying to edit a prefix which last was edited by another
system. The :class:`AuthOptions` class is a class with a shared state,
similar to a singleton class; that is, when a first instance is created
each consecutive instances will be copies of the first one. In this way the
authentication options can be accessed from all of the pynipap classes. ::
a = AuthOptions({
'authoritative_source': 'my_fancy_nipap_client'
})
After this, we are good to go!
Accessing data
^^^^^^^^^^^^^^
To fetch data from NIPAP, a set of static methods (@classmethod) has been
defined in each of the ORM classes. They are:
* :func:`get` - Get a single object from its ID.
* :func:`list` - List objects matching a simple criteria.
* :func:`search` - Perform a full-blown search.
* :func:`smart_search` - Perform a magic search from a string.
Each of these functions return either an instance of the requested class
(:py:class:`VRF`, :class:`Pool`, :class:`Prefix`) or a list of
instances. The :func:`search` and :func:`smart_search` functions also
embeds the lists in dicts which contain search meta data.
The easiest way to get data out of NIPAP is to use the :func:`get`-method,
given that you know the ID of the object you want to fetch::
# Fetch VRF with ID 1 and print its name
vrf = VRF.get(1)
print(vrf.name)
To list all objects each object has a :func:`list`-function. ::
# list all pools
pools = Pool.list()
# print the name of the pools
for p in pools:
print(p.name)
Each of the list functions can also take a `spec`-dict as a second
argument. With the spec you can perform a simple search operation by
specifying object attribute values. ::
# List pools with a default type of 'assignment'
pools = Pool.list({ 'default_type': 'assignment' })
Performing searches
^^^^^^^^^^^^^^^^^^^
Commin' up, commin' up.
Saving changes
^^^^^^^^^^^^^^
Changes made to objects are not automatically saved. To save the changes,
simply run the object's :func:`save`-method::
vrf.name = "Spam spam spam"
vrf.save()
Error handling
--------------
As is customary in Python applications, an error results in an exception
being thrown. All pynipap exceptions extend the main exception
:class:`NipapError`. A goal with the pynipap library has been to make the
XML-RPC-channel to the backend as transparent as possible, so the XML-RPC
Faults which the NIPAP server returns in case of errors are converted and
re-thrown as new exceptions which also they extend :class:`NipapError`,
for example the NipapDuplicateError which is thrown when a duplicate key
error occurs in NIPAP.
Classes
-------
"""
import sys
import logging
if sys.version_info[0] < 3:
import xmlrpclib
int = long
else:
import xmlrpc.client as xmlrpclib
__version__ = "0.28.4"
__author__ = "Kristian Larsson, Lukas Garberg"
__author_email__= "kll@tele2.net, lukas@spritelink.net"
__copyright__ = "Copyright 2011, Kristian Larsson, Lukas Garberg"
__license__ = "MIT"
__status__ = "Development"
__url__ = "http://SpriteLink.github.com/NIPAP"
# This variable holds the URI to the nipap XML-RPC service which will be used.
# It must be set before the Pynipap can be used!
xmlrpc_uri = None
# Caching of objects is enabled per default but can be disabled for certain
# scenarios. Since we don't have any cache expiration time it can be useful to
# disable for long running applications.
CACHE = True
class AuthOptions:
""" A global-ish authentication option container.
Note that this essentially is a global variable. If you handle multiple
queries from different users, you need to make sure that the
AuthOptions-instances are set to the current user's.
"""
__shared_state = {}
options = None
def __init__(self, options = None):
""" Create a shared option container.
The argument 'options' must be a dict containing authentication
options.
"""
self.__dict__ = self.__shared_state
if len(self.__shared_state) == 0 and options is None:
raise NipapMissingInputError("authentication options not set")
if options is not None:
self.options = options
class XMLRPCConnection:
""" Handles a shared XML-RPC connection.
"""
__shared_state = {}
connection = None
_logger = None
def __init__(self):
""" Create XML-RPC connection.
The connection will be created to the URL set in the module
variable `xmlrpc_uri`. The instanciation will fail unless this
variable is set.
"""
if xmlrpc_uri is None:
raise NipapError('XML-RPC URI not specified')
# creating new instance
self.connection = xmlrpclib.ServerProxy(xmlrpc_uri, allow_none=True,
use_datetime=True)
self._logger = logging.getLogger(self.__class__.__name__)
class Pynipap:
""" A base class for the pynipap model classes.
All Pynipap classes which maps to data in NIPAP (:py:class:VRF,
:py:class:Pool, :py:class:Prefix) extends this class.
"""
_logger = None
""" Logging instance for this object.
"""
id = None
""" Internal database ID of object.
"""
def __eq__(self, other):
""" Perform test for equality.
"""
# Only possible if we have ID numbers set
if self.id is None or other.id is None:
return False
return self.id == other.id
def __init__(self, id=None):
""" Creates logger and XML-RPC-connection.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._auth_opts = AuthOptions()
self.id = id
class Tag(Pynipap):
""" A Tag.
"""
name = None
""" The Tag name
"""
@classmethod
def from_dict(cls, tag=None):
""" Create new Tag-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if tag is None:
tag = {}
l = Tag()
l.name = tag['name']
return l
@classmethod
def search(cls, query, search_opts=None):
""" Search VRFs.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_tag(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for xml_tag in search_result['result']:
result['result'].append(Tag.from_dict(xml_tag))
return result
class VRF(Pynipap):
""" A VRF.
"""
rt = None
""" The VRF RT, as a string (x:y or x.x.x.x:y).
"""
name = None
""" The name of the VRF, as a string.
"""
description = None
""" VRF description, as a string.
"""
num_prefixes_v4 = None
""" Number of IPv4 prefixes in this VRF
"""
num_prefixes_v6 = None
""" Number of IPv6 prefixes in this VRF
"""
total_addresses_v4 = None
""" Total number of IPv4 addresses in this VRF
"""
total_addresses_v6 = None
""" Total number of IPv6 addresses in this VRF
"""
used_addresses_v4 = None
""" Number of used IPv4 addresses in this VRF
"""
used_addresses_v6 = None
""" Number of used IPv6 addresses in this VRF
"""
free_addresses_v4 = None
""" Number of free IPv4 addresses in this VRF
"""
free_addresses_v6 = None
""" Number of free IPv6 addresses in this VRF
"""
def __init__(self):
Pynipap.__init__(self)
self.tags = {}
self.avps = {}
@classmethod
def list(cls, vrf=None):
""" List VRFs.
"""
if vrf is None:
vrf = {}
xmlrpc = XMLRPCConnection()
try:
vrf_list = xmlrpc.connection.list_vrf(
{
'vrf': vrf,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for v in vrf_list:
res.append(VRF.from_dict(v))
return res
@classmethod
def from_dict(cls, parm, vrf = None):
""" Create new VRF-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if vrf is None:
vrf = VRF()
vrf.id = parm['id']
vrf.rt = parm['rt']
vrf.name = parm['name']
vrf.description = parm['description']
vrf.tags = {}
for tag_name in parm['tags']:
tag = Tag.from_dict({'name': tag_name })
vrf.tags[tag_name] = tag
vrf.avps = parm['avps']
vrf.num_prefixes_v4 = int(parm['num_prefixes_v4'])
vrf.num_prefixes_v6 = int(parm['num_prefixes_v6'])
vrf.total_addresses_v4 = int(parm['total_addresses_v4'])
vrf.total_addresses_v6 = int(parm['total_addresses_v6'])
vrf.used_addresses_v4 = int(parm['used_addresses_v4'])
vrf.used_addresses_v6 = int(parm['used_addresses_v6'])
vrf.free_addresses_v4 = int(parm['free_addresses_v4'])
vrf.free_addresses_v6 = int(parm['free_addresses_v6'])
return vrf
@classmethod
def get(cls, id):
""" Get the VRF with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['VRF']:
log.debug('cache hit for VRF %d' % id)
return _cache['VRF'][id]
log.debug('cache miss for VRF %d' % id)
try:
vrf = VRF.list({ 'id': id })[0]
except IndexError:
raise NipapNonExistentError('no VRF with ID ' + str(id) + ' found')
_cache['VRF'][id] = vrf
return vrf
@classmethod
def search(cls, query, search_opts=None):
""" Search VRFs.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_vrf(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for v in search_result['result']:
result['result'].append(VRF.from_dict(v))
return result
@classmethod
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart VRF search.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_vrf(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['result'] = list()
for v in smart_result['result']:
result['result'].append(VRF.from_dict(v))
return result
def save(self):
""" Save changes made to object to NIPAP.
"""
xmlrpc = XMLRPCConnection()
data = {
'rt': self.rt,
'name': self.name,
'description': self.description,
'tags': [],
'avps': self.avps
}
for tag_name in self.tags:
data['tags'].append(tag_name)
if self.id is None:
# New object, create
try:
vrf = xmlrpc.connection.add_vrf(
{
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
else:
# Old object, edit
try:
vrfs = xmlrpc.connection.edit_vrf(
{
'vrf': { 'id': self.id },
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if len(vrfs) != 1:
raise NipapError('VRF edit returned %d entries, should be 1.' % len(vrfs))
vrf = vrfs[0]
# Refresh object data with attributes from add/edit operation
VRF.from_dict(vrf, self)
_cache['VRF'][self.id] = self
def remove(self):
""" Remove VRF.
"""
xmlrpc = XMLRPCConnection()
try:
xmlrpc.connection.remove_vrf(
{
'vrf': { 'id': self.id },
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if self.id in _cache['VRF']:
del(_cache['VRF'][self.id])
class Pool(Pynipap):
""" An address pool.
"""
name = None
description = None
default_type = None
ipv4_default_prefix_length = None
ipv6_default_prefix_length = None
vrf = None
member_prefixes_v4 = None
member_prefixes_v6 = None
used_prefixes_v4 = None
used_prefixes_v6 = None
free_prefixes_v4 = None
free_prefixes_v6 = None
total_prefixes_v4 = None
total_prefixes_v6 = None
total_addresses_v4 = None
total_addresses_v6 = None
used_addresses_v4 = None
used_addresses_v6 = None
free_addresses_v4 = None
free_addresses_v6 = None
def __init__(self):
Pynipap.__init__(self)
self.tags = {}
self.avps = {}
def save(self):
""" Save changes made to pool to NIPAP.
"""
xmlrpc = XMLRPCConnection()
data = {
'name': self.name,
'description': self.description,
'default_type': self.default_type,
'ipv4_default_prefix_length': self.ipv4_default_prefix_length,
'ipv6_default_prefix_length': self.ipv6_default_prefix_length,
'tags': [],
'avps': self.avps
}
for tag_name in self.tags:
data['tags'].append(tag_name)
if self.id is None:
# New object, create
try:
pool = xmlrpc.connection.add_pool(
{
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
else:
# Old object, edit
try:
pools = xmlrpc.connection.edit_pool(
{
'pool': { 'id': self.id },
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if len(pools) != 1:
raise NipapError('Pool edit returned %d entries, should be 1.' % len(pools))
pool = pools[0]
# Refresh object data with attributes from add/edit operation
Pool.from_dict(pool, self)
_cache['Pool'][self.id] = self
def remove(self):
""" Remove pool.
"""
xmlrpc = XMLRPCConnection()
try:
xmlrpc.connection.remove_pool(
{
'pool': { 'id': self.id },
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if self.id in _cache['Pool']:
del(_cache['Pool'][self.id])
@classmethod
def get(cls, id):
""" Get the pool with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['Pool']:
log.debug('cache hit for pool %d' % id)
return _cache['Pool'][id]
log.debug('cache miss for pool %d' % id)
try:
pool = Pool.list({'id': id})[0]
except (IndexError, KeyError):
raise NipapNonExistentError('no pool with ID ' + str(id) + ' found')
_cache['Pool'][id] = pool
return pool
@classmethod
def search(cls, query, search_opts=None):
""" Search pools.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_pool(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for pool in search_result['result']:
p = Pool.from_dict(pool)
result['result'].append(p)
return result
@classmethod
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart pool search.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_pool(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['result'] = list()
for pool in smart_result['result']:
p = Pool.from_dict(pool)
result['result'].append(p)
return result
@classmethod
def from_dict(cls, parm, pool = None):
""" Create new Pool-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if pool is None:
pool = Pool()
pool.id = parm['id']
pool.name = parm['name']
pool.description = parm['description']
pool.default_type = parm['default_type']
pool.ipv4_default_prefix_length = parm['ipv4_default_prefix_length']
pool.ipv6_default_prefix_length = parm['ipv6_default_prefix_length']
for val in ('member_prefixes_v4', 'member_prefixes_v6',
'used_prefixes_v4', 'used_prefixes_v6', 'free_prefixes_v4',
'free_prefixes_v6', 'total_prefixes_v4', 'total_prefixes_v6',
'total_addresses_v4', 'total_addresses_v6', 'used_addresses_v4',
'used_addresses_v6', 'free_addresses_v4', 'free_addresses_v6'):
if parm[val] is not None:
setattr(pool, val, int(parm[val]))
pool.tags = {}
for tag_name in parm['tags']:
tag = Tag.from_dict({'name': tag_name })
pool.tags[tag_name] = tag
pool.avps = parm['avps']
# store VRF object in pool.vrf
if parm['vrf_id'] is not None:
pool.vrf = VRF.get(parm['vrf_id'])
return pool
@classmethod
def list(self, spec=None):
""" List pools.
"""
if spec is None:
spec = {}
xmlrpc = XMLRPCConnection()
try:
pool_list = xmlrpc.connection.list_pool(
{
'pool': spec,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for pool in pool_list:
p = Pool.from_dict(pool)
res.append(p)
return res
class Prefix(Pynipap):
""" A prefix.
"""
family = None
vrf = None
prefix = None
display_prefix = None
description = None
comment = None
node = None
pool = None
type = None
indent = None
country = None
external_key = None
order_id = None
customer_id = None
authoritative_source = None
alarm_priority = None
monitor = None
display = True
match = False
children = -2
vlan = None
added = None
last_modified = None
total_addresses = None
used_addreses = None
free_addreses = None
status = None
expires = None
def __init__(self):
Pynipap.__init__(self)
self.inherited_tags = {}
self.tags = {}
self.avps = {}
@classmethod
def get(cls, id):
""" Get the prefix with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['Prefix']:
log.debug('cache hit for prefix %d' % id)
return _cache['Prefix'][id]
log.debug('cache miss for prefix %d' % id)
try:
prefix = Prefix.list({'id': id})[0]
except IndexError:
raise NipapNonExistentError('no prefix with ID ' + str(id) + ' found')
_cache['Prefix'][id] = prefix
return prefix
@classmethod
def find_free(cls, vrf, args):
""" Finds a free prefix.
"""
xmlrpc = XMLRPCConnection()
q = {
'args': args,
'auth': AuthOptions().options
}
# sanity checks
if isinstance(vrf, VRF):
q['vrf'] = { 'id': vrf.id }
elif vrf is None:
q['vrf'] = None
else:
raise NipapValueError('vrf parameter must be instance of VRF class')
# run XML-RPC query
try:
find_res = xmlrpc.connection.find_free_prefix(q)
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
pass
return find_res
@classmethod
def search(cls, query, search_opts=None):
""" Search for prefixes.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_prefix(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for prefix in search_result['result']:
p = Prefix.from_dict(prefix)
result['result'].append(p)
return result
@classmethod
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart prefix search.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_prefix(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['result'] = list()
for prefix in smart_result['result']:
p = Prefix.from_dict(prefix)
result['result'].append(p)
return result
@classmethod
def list(cls, spec=None):
""" List prefixes.
"""
if spec is None:
spec = {}
xmlrpc = XMLRPCConnection()
try:
pref_list = xmlrpc.connection.list_prefix(
{
'prefix': spec,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for pref in pref_list:
p = Prefix.from_dict(pref)
res.append(p)
return res
def save(self, args=None):
""" Save prefix to NIPAP.
"""
if args is None:
args = {}
xmlrpc = XMLRPCConnection()
data = {
'description': self.description,
'comment': self.comment,
'tags': [],
'node': self.node,
'type': self.type,
'country': self.country,
'order_id': self.order_id,
'customer_id': self.customer_id,
'external_key': self.external_key,
'alarm_priority': self.alarm_priority,
'monitor': self.monitor,
'vlan': self.vlan,
'avps': self.avps,
'expires': self.expires
}
if self.status is not None:
data['status'] = self.status
for tag_name in self.tags:
data['tags'].append(tag_name)
if self.vrf is not None:
if not isinstance(self.vrf, VRF):
raise NipapValueError("'vrf' attribute not instance of VRF class.")
data['vrf_id'] = self.vrf.id
# Prefix can be none if we are creating a new prefix
# from a pool or other prefix!
if self.prefix is not None:
data['prefix'] = self.prefix
if self.pool is None:
data['pool_id'] = None
else:
if not isinstance(self.pool, Pool):
raise NipapValueError("'pool' attribute not instance of Pool class.")
data['pool_id'] = self.pool.id
# New object, create from scratch
if self.id is None:
# format args
x_args = {}
if 'from-pool' in args:
x_args['from-pool'] = { 'id': args['from-pool'].id }
if 'family' in args:
x_args['family'] = args['family']
if 'from-prefix' in args:
x_args['from-prefix'] = args['from-prefix']
if 'prefix_length' in args:
x_args['prefix_length'] = args['prefix_length']
try:
prefix = xmlrpc.connection.add_prefix(
{
'attr': data,
'args': x_args,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
# Old object, edit
else:
# Add authoritative source to data
data['authoritative_source'] = self.authoritative_source
try:
# save
prefixes = xmlrpc.connection.edit_prefix(
{
'prefix': { 'id': self.id },
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if len(prefixes) != 1:
raise NipapError('Prefix edit returned %d entries, should be 1.' % len(prefixes))
prefix = prefixes[0]
# Refresh object data with attributes from add/edit operation
Prefix.from_dict(prefix, self)
# update cache
_cache['Prefix'][self.id] = self
if self.pool is not None:
if self.pool.id in _cache['Pool']:
del _cache['Pool'][self.pool.id]
def remove(self, recursive = False):
""" Remove the prefix.
"""
xmlrpc = XMLRPCConnection()
try:
xmlrpc.connection.remove_prefix(
{
'prefix': { 'id': self.id },
'recursive': recursive,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
# update cache
if self.id in _cache['Prefix']:
del(_cache['Prefix'][self.id])
if self.pool is not None:
if self.pool.id in _cache['Pool']:
del _cache['Pool'][self.pool.id]
@classmethod
def from_dict(cls, pref, prefix = None):
""" Create a Prefix object from a dict.
Suitable for creating Prefix objects from XML-RPC input.
"""
if prefix is None:
prefix = Prefix()
prefix.id = pref['id']
if pref['vrf_id'] is not None: # VRF is not mandatory
prefix.vrf = VRF.get(pref['vrf_id'])
prefix.family = pref['family']
prefix.prefix = pref['prefix']
prefix.display_prefix = pref['display_prefix']
prefix.description = pref['description']
prefix.comment = pref['comment']
prefix.node = pref['node']
if pref['pool_id'] is not None: # Pool is not mandatory
prefix.pool = Pool.get(pref['pool_id'])
prefix.type = pref['type']
prefix.indent = pref['indent']
prefix.country = pref['country']
prefix.order_id = pref['order_id']
prefix.customer_id = pref['customer_id']
prefix.external_key = pref['external_key']
prefix.authoritative_source = pref['authoritative_source']
prefix.alarm_priority = pref['alarm_priority']
prefix.monitor = pref['monitor']
prefix.vlan = pref['vlan']
prefix.added = pref['added']
prefix.last_modified = pref['last_modified']
prefix.total_addresses = int(pref['total_addresses'])
prefix.used_addresses = int(pref['used_addresses'])
prefix.free_addresses = int(pref['free_addresses'])
prefix.status = pref['status']
prefix.avps = pref['avps']
prefix.expires = pref['expires']
prefix.inherited_tags = {}
for tag_name in pref['inherited_tags']:
tag = Tag.from_dict({'name': tag_name })
prefix.inherited_tags[tag_name] = tag
prefix.tags = {}
for tag_name in pref['tags']:
tag = Tag.from_dict({'name': tag_name })
prefix.tags[tag_name] = tag
if 'match' in pref:
prefix.match = pref['match']
if 'display' in pref:
prefix.display = pref['display']
if 'children' in pref:
prefix.children = pref['children']
return prefix
def nipapd_version():
""" Get version of nipapd we're connected to.
"""
xmlrpc = XMLRPCConnection()
try:
return xmlrpc.connection.version(
{
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
def nipap_db_version():
""" Get schema version of database we're connected to.
"""
xmlrpc = XMLRPCConnection()
try:
return xmlrpc.connection.db_version(
{
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
#
# Define exceptions
#
class NipapError(Exception):
""" A generic NIPAP model exception.
All errors thrown from the NIPAP model extends this exception.
"""
pass
class NipapNonExistentError(NipapError):
""" Thrown when something can not be found.
For example when a given ID can not be found in the NIPAP database.
"""
class NipapInputError(NipapError):
""" Something wrong with the input we received
A general case.
"""
pass
class NipapMissingInputError(NipapInputError):
""" Missing input
Most input is passed in dicts, this could mean a missing key in a dict.
"""
pass
class NipapExtraneousInputError(NipapInputError):
""" Extraneous input
Most input is passed in dicts, this could mean an unknown key in a dict.
"""
pass
class NipapNoSuchOperatorError(NipapInputError):
""" A non existent operator was specified.
"""
pass
class NipapValueError(NipapError):
""" Something wrong with a value we have
For example, trying to send an integer when an IP address is expected.
"""
pass
class NipapDuplicateError(NipapError):
""" A duplicate entry was encountered
"""
pass
class NipapAuthError(NipapError):
""" General NIPAP AAA error
"""
pass
class NipapAuthenticationError(NipapAuthError):
""" Authentication failed.
"""
pass
class NipapAuthorizationError(NipapAuthError):
""" Authorization failed.
"""
pass
#
# GLOBAL STUFF
#
# Simple object cache
# TODO: fix some kind of timeout
_cache = {
'Pool': {},
'Prefix': {},
'VRF': {}
}
# Map from XML-RPC Fault codes to Exception classes
_fault_to_exception_map = {
1000: NipapError,
1100: NipapInputError,
1110: NipapMissingInputError,
1120: NipapExtraneousInputError,
1200: NipapValueError,
1300: NipapNonExistentError,
1400: NipapDuplicateError,
1500: NipapAuthError,
1510: NipapAuthenticationError,
1520: NipapAuthorizationError
}
log = logging.getLogger("Pynipap")
def _fault_to_exception(f):
""" Converts XML-RPC Fault objects to Pynipap-exceptions.
TODO: Is this one neccesary? Can be done inline...
"""
e = _fault_to_exception_map.get(f.faultCode)
if e is None:
e = NipapError
return e(f.faultString)
|
ettrig/NIPAP
|
pynipap/pynipap.py
|
Python
|
mit
| 37,369
| 0.003131
|
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
changelog:
0.27 - 2012-08-12 - hgg
fix "global name 'js_answer' is not defined" bug
fix captcha bug #1 (failed on non-english "captcha wrong" errors)
"""
import re
from time import time
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
from module.plugins.internal.CaptchaService import ReCaptcha
from module.common.json_layer import json_loads
class FilepostCom(SimpleHoster):
__name__ = "FilepostCom"
__type__ = "hoster"
__pattern__ = r'https?://(?:www\.)?(?:filepost\.com/files|fp.io)/([^/]+).*'
__version__ = "0.28"
__description__ = """Filepost.com hoster plugin"""
__author_name__ = "zoidberg"
__author_mail__ = "zoidberg@mujmail.cz"
FILE_INFO_PATTERN = r'<input type="text" id="url" value=\'<a href[^>]*>(?P<N>[^>]+?) - (?P<S>[0-9\.]+ [kKMG]i?B)</a>\' class="inp_text"/>'
OFFLINE_PATTERN = r'class="error_msg_title"> Invalid or Deleted File. </div>|<div class="file_info file_info_deleted">'
PREMIUM_ONLY_PATTERN = r'members only. Please upgrade to premium|a premium membership is required to download this file'
RECAPTCHA_KEY_PATTERN = r"Captcha.init\({\s*key:\s*'([^']+)'"
FLP_TOKEN_PATTERN = r"set_store_options\({token: '([^']+)'"
def handleFree(self):
# Find token and captcha key
file_id = re.match(self.__pattern__, self.pyfile.url).group(1)
m = re.search(self.FLP_TOKEN_PATTERN, self.html)
if m is None:
self.parseError("Token")
flp_token = m.group(1)
m = re.search(self.RECAPTCHA_KEY_PATTERN, self.html)
if m is None:
self.parseError("Captcha key")
captcha_key = m.group(1)
# Get wait time
get_dict = {'SID': self.req.cj.getCookie('SID'), 'JsHttpRequest': str(int(time() * 10000)) + '-xml'}
post_dict = {'action': 'set_download', 'token': flp_token, 'code': file_id}
wait_time = int(self.getJsonResponse(get_dict, post_dict, 'wait_time'))
if wait_time > 0:
self.wait(wait_time)
post_dict = {"token": flp_token, "code": file_id, "file_pass": ''}
if 'var is_pass_exists = true;' in self.html:
# Solve password
for file_pass in self.getPassword().splitlines():
get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
post_dict['file_pass'] = file_pass
self.logInfo("Password protected link, trying " + file_pass)
download_url = self.getJsonResponse(get_dict, post_dict, 'link')
if download_url:
break
else:
self.fail("No or incorrect password")
else:
# Solve recaptcha
recaptcha = ReCaptcha(self)
for i in xrange(5):
get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
if i:
post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field'] = recaptcha.challenge(
captcha_key)
self.logDebug(u"RECAPTCHA: %s : %s : %s" % (
captcha_key, post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field']))
download_url = self.getJsonResponse(get_dict, post_dict, 'link')
if download_url:
if i:
self.correctCaptcha()
break
elif i:
self.invalidCaptcha()
else:
self.fail("Invalid captcha")
# Download
self.download(download_url)
def getJsonResponse(self, get_dict, post_dict, field):
json_response = json_loads(self.load('https://filepost.com/files/get/', get=get_dict, post=post_dict))
self.logDebug(json_response)
if not 'js' in json_response:
self.parseError('JSON %s 1' % field)
# i changed js_answer to json_response['js'] since js_answer is nowhere set.
# i don't know the JSON-HTTP specs in detail, but the previous author
# accessed json_response['js']['error'] as well as js_answer['error'].
# see the two lines commented out with "# ~?".
if 'error' in json_response['js']:
if json_response['js']['error'] == 'download_delay':
self.retry(wait_time=json_response['js']['params']['next_download'])
# ~? self.retry(wait_time=js_answer['params']['next_download'])
elif 'Wrong file password' in json_response['js']['error']:
return None
elif 'You entered a wrong CAPTCHA code' in json_response['js']['error']:
return None
elif 'CAPTCHA Code nicht korrekt' in json_response['js']['error']:
return None
elif 'CAPTCHA' in json_response['js']['error']:
self.logDebug('error response is unknown, but mentions CAPTCHA -> return None')
return None
else:
self.fail(json_response['js']['error'])
# ~? self.fail(js_answer['error'])
if not 'answer' in json_response['js'] or not field in json_response['js']['answer']:
self.parseError('JSON %s 2' % field)
return json_response['js']['answer'][field]
getInfo = create_getInfo(FilepostCom)
|
estaban/pyload
|
module/plugins/hoster/FilepostCom.py
|
Python
|
gpl-3.0
| 6,081
| 0.00296
|
"""Driver module for Newport's Spectra-Physics Quanta-Ray INDI, PRO, and LAB
Series Nd:YAG lasers.
NOTE: the watchdog parameter is important! The laser will turn off if it does
not receive a command within the watchdog time period. Therefore, it is
advised to use a command like QRstatus().get_status() at regular intervals to
query the status of the laser during operation.
@author: Jami L Johnson
September 5, 2014
"""
import serial
class QuantaRay:
"""QuantaRay class"""
def __init__(self, portINDI='/dev/ttyUSB0', baudINDI=9600):
"""Define serial port for INDI"""
self.indi = serial.Serial(
port=portINDI,
baudrate=baudINDI,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_TWO,
bytesize=serial.EIGHTBITS
)
def open_connection(self):
""" Open serial connection to INDI"""
self.indi.close()
self.indi.open()
indi_open = self.indi.isOpen()
if indi_open is False:
raise RuntimeError('unable to connect to INDI')
def close_connection(self):
"""Close connection to INDI"""
self.indi.close()
def get_id(self):
"""Get ID"""
self.indi.write('*IDN?\r'.encode())
return self.indi.readline().decode()
def help(self):
"""Prints serial command options (operational commands)"""
self.indi.write('HELP\r'.encode())
for _ in range(1, 6):
print(self.indi.readline().decode())
def turn_on(self):
"""Turns Quanta-Ray INDI on"""
self.indi.write('ON\r'.encode())
def turn_off(self):
"""Turns Quanta-Ray INDI off"""
self.indi.write('OFF\r'.encode())
def set_lamp(self, lamp_set='FIX', lamp_pulse=''):
"""Select lamp trigger source
lamp_set:
FIX = set lamp trigger to Fixed
EXT = set lamp trigger to External Source
VAR = set lamp trigger to Variable
INH = inhibit lamp trigger
lamp_pulse = set rate of lamp (pulses/second)
"""
if lamp_pulse != '':
self.indi.write(('LAMP '+ str(lamp_set) + ' ' + str(lamp_pulse) + '\r').encode())
else:
self.indi.write(('LAMP '+ str(lamp_set) + '\r').encode())
def get_lamp(self):
""" Returns the lamp Variable Rate trigger setting """
self.indi.write('LAMP VAR?\r'.encode())
return self.indi.readline().decode()
def set(self, cmd='NORM'):
"""Set mode, type, or timing of Q-switch
cmd:
LONG = long pulse mode
EXT = external mode
NORM = normal mode
SING = single shot
FIR = fire Q-switch once
REP = repetitive shots
"""
self.indi.write(('QSW ' + str(cmd) + '\r').encode())
def single_shot(self):
"""Set single shot"""
self.set('SING')
def normal_mode(self):
"""Set normal mode"""
self.set('NORM')
def repeat_mode(self, watchdog_timeout):
"""Set repetitive shots and ensures watchdog is turned on (not disabled)
:param watchdog_timeout: seconds before laser safety shutoff
:type watchdog_timeout: int
#:raises ValueError: if watchdog is requested to be 0 (disabled)
"""
if watchdog_timeout == 0:
dummy = input('QuantaRay INDI Laser watchdog is 0 s. This will ' +
'disable watchdog and the laser will continue to run ' +
'after the experiment has finished. Continue? [ y / n ]:')
if dummy == 'n':
raise ValueError('Disabling watchdog when using repeat mode is not advised')
self.set_watchdog(watchdog_timeout)
self.set('REP')
def get(self):
"""Queries and returns the Q-switch settings."""
self.indi.write('QSW?\r'.encode())
return self.indi.readline().decode()
def set_adv(self, delay):
"""Set advanced sync delay"""
self.indi.write(('ADV ' + str(delay) + '\r').encode())
def get_adv(self):
"""Queries and returns the Q-switch Advanced Sync settings"""
self.indi.write('QSW ADV? \r'.encode())
return self.indi.readline().decode()
def set_delay(self, delay):
"""Sets delay for Q-switch delay"""
self.indi.write(('QSW DEL ' + str(delay) + '\r').encode())
def get_delay(self):
"""Queries and returns the Q-switch delay setting"""
self.indi.write('QSW DEL? \r'.encode())
return self.indi.readline().decode()
def set_echo(self, mode=0):
"""Set echo mode of INDI.
mode:
0 = show prompts
1 = laser echoes characters as received
2 = shows error messages
3 = output line feed for every command (even those that don't normally generate a response)
4 = terminate responses with <cr><lf>, rather than just <lf>
5 = use XON/XOFF handshaking for data sent to laser (not for data sent from the laser)
"""
self.indi.write(('ECH ' + str(mode) + '\r').encode())
def set_watchdog(self, time=10):
"""Set range of watchdog. If the laser does not receive communication
from the control computer within the specifiedc time, it turns off. If
disabled, the default time is zero. Time must be between 0 and 110
seconds.
"""
if time < 0 or time > 110:
raise ValueError('Invalid watchdog time. Choose value between 0 and 110 seconds.')
self.indi.write(('WATC ' + str(time) + '\r').encode())
def set_baud(self, baud_indi=9600):
"""Sets baudrate of laser. At power-up, baudrate is always 9600."""
self.indi.write(('BAUD ' + str(baud_indi) + '\r').encode())
def get_amp_setting(self):
"""Queries amplifier PFN command setting in percent"""
self.indi.write('READ:APFN?\r'.encode())
return self.indi.readline().decode()
def get_amp_power(self):
"""Queries amplifier PFN monitor in percent (what PFN power supply is actually doing)"""
self.indi.write('READ:AMON?\r'.encode())
return self.indi.readline().decode()
def get_osc_setting(self):
"""Queries oscillator PFN command setting in percent"""
self.indi.write('READ:OPFN?\r'.encode())
return self.indi.readline().decode()
def get_osc_power(self):
"""Queries oscillator PFN monitor in percent (what PFN power supply is actually doing)"""
self.indi.write('READ:OMON?\r'.encode())
return self.indi.readline().decode()
def get_qsw_adv(self):
"""Queries and returns the current Q-Switch Advanced Sync setting"""
self.indi.write('READ:QSWADV?\r'.encode())
return self.indi.readline().decode()
def get_shots(self):
"""Queries and returns the number of shots"""
self.indi.write('SHOT?\r'.encode())
return self.indi.readline().decode()
def get_trig_rate(self):
"""Queries and returns the lamp trigger rate (unless lamp trigger source is external"""
self.indi.write('READ:VAR?\r'.encode())
return self.indi.readline().decode()
def set_osc_power(self, percent=0):
"""set the Oscillator PFN voltage as a percentage of factory full scale"""
self.indi.write(('OPFN ' + str(percent) + '\r').encode())
def set_amp_power(self, percent=0):
"""set the PFN Amplifier voltage as a percentage of factory full scale"""
self.indi.write(('APFN ' + str(percent) + '\r').encode())
def get_status(self):
"""Returns the laser status.
Result is a list with entries of the form: [bit, error], where "bit" is
the bit of the status byte, and "error" is a text description of the
error.
"""
self.indi.write('*STB?\r'.encode())
stb_value = bin(int(self.indi.readline().decode()))
stb_value = stb_value[2:] # remove 0b at beginning
#print 'stb_value: ', stb_value # prints binary status byte value
error_list = list()
if stb_value[len(stb_value)-1] == '1':
bit = '0'
error = 'Laser emission can occur'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-2] == '1':
bit = '1'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-3] == '1':
bit = '2'
error = ('Data is in the error log.\n'
+ '(use QRstatus().getHist() for details on the error.)')
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-4] == '1':
bit = '3'
error = 'Check QRstatus().getQuest() for error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-5] == '1':
bit = '4'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-6] == '1': #5 **********
bit = '5'
error = 'Check *ESR bits for error.'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-7] == '1':
bit = '6'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-8] == '1': #7 ****
bit = '7'
error = 'Check STR:OPER bits'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-9] == '1':
bit = '8'
error = 'Main contactor is energized'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-10] == '1':
bit = '9'
error = 'Oscillator simmer is on'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-11] == '1':
bit = '10'
error = 'Amplifier simmer is on'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-12] == '1':
bit = '11'
error = 'Oscillator PFN is at target'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-13] == '1':
bit = '12'
error = 'The laser has recently fired'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-14] == '1':
bit = '13'
error = '15 Vdc power supply failure'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-15] == '1':
bit = '14'
error = 'Laser cover interlock open'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-16] == '1':
bit = '15'
error = ('Interlock open: CDRH plug, power supply cover, laser '
+ 'head cover, laser head temperature, water pressure, '
+ 'or water flow')
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-17] == '1':
bit = '16'
error = 'Remote panel disconnected'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-18] == '1':
bit = '17'
error = 'Internal 208 Vac failure'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-19] == '1':
bit = '18'
error = 'CDRH enable failure'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-20] == '1':
bit = '19'
error = 'Laser ID fault'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-21] == '1':
bit = '20'
error = 'Low water fault'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-22] == '1':
bit = '21'
error = 'Interlock fault'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-23] == '1':
bit = '22'
error = 'Remote panel connected'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-24] == '1':
bit = '23'
error = 'Remote panel indicates that the computer is in control.'
stat = [bit, error]
error_list.append(stat)
if len(stb_value) > 24:
if stb_value[len(stb_value)-25] == '1':
bit = '24'
error = 'Main contactor should be on.'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-26] == '1':
bit = '25'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-27] == '1':
bit = '26'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-28] == '1':
bit = '27'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-29] == '1':
bit = '28'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-30] == '1':
bit = '29'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-31] == '1':
bit = '30'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-31] == '1':
bit = '32'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
return error_list
def get_quest(self):
"""Returns questionable condition register.
Result is a list with entries of the form: [bit, error], where "bit" is
the bit of the status byte, and "error" is a text description of the
error.
"""
self.indi.write('STAT:QUES?\r'.encode())
qb_value = bin(int(self.indi.readline().decode()))
qb_value = qb_value[3:]
error_list = list()
#print 'qb_value: ', qb_value # prints binary STAT:QUES? value
if qb_value[len(qb_value)-1] == '1':
bit = '0'
error = '-Reserved error'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-2] == '1':
bit = '1'
error = '-Reserved error'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-3] == '1':
bit = '2'
error = '-Reserved error'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-4] == '1':
bit = '3'
error = '-Reserved error'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-5] == '1':
bit = '4'
error = '-Reserved error'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-6] == '1':
bit = '5'
error = '-Reserved error'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-7] == '1':
bit = '6'
error = '-Reserved error'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-8] == '1':
bit = '7'
error = '-Reserved error'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-9] == '1':
bit = '8'
error = '-Reserved error'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-10] == '1':
bit = '9'
error = '-Oscillator HV failure'
stat = [bit, error]
error_list.append(stat)
QuantaRay().turn_off()
exit()
if qb_value[len(qb_value)-11] == '1':
bit = '10'
error = '-Amplifier HV failure'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-12] == '1':
bit = '11'
error = '-External trigger rate out of range.'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-13] == '1':
bit = '12'
error = '-De-ionized water low'
stat = [bit, error]
error_list.append(stat)
if len(qb_value) > 15:
# bits 13-15 undefined
if qb_value[len(qb_value)-17] == '1':
bit = '16'
error = '-OSC HVPS # 1 EndOfCharge'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-18] == '1':
bit = '17'
error = '-OverLoad'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-19] == '1':
bit = '18'
error = '-OverTemp'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-20] == '1':
bit = '19'
error = '-OverVolt'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-21] == '1':
bit = '20'
error = '-OSC HVPS #2 EndOfCharge'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-22] == '1':
bit = '21'
error = '-OverLoad'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-23] == '1':
bit = '22'
error = '-OverTemp'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-24] == '1':
bit = '23'
error = '-OverVolt'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-25] == '1':
bit = '24'
error = '-AMP HVPS # 1 EndOfCharge'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-26] == '1':
bit = '25'
error = '-OverLoad'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-27] == '1':
bit = '26'
error = '-OverTemp'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-28] == '1':
bit = '27'
error = '-OverVolt'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-29] == '1':
bit = '28'
error = '-AMP HVPS # 2 EndOfCharge'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-30] == '1':
bit = '29'
error = '-OverLoad'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-31] == '1':
bit = '30'
error = '-OverTemp'
stat = [bit, error]
error_list.append(stat)
if qb_value[len(qb_value)-32] == '1':
bit = '31'
error = '-OverVolt'
stat = [bit, error]
error_list.append(stat)
return error_list
def reset(self):
""" Resets the laser head PC board"""
self.indi.write('*RST?\r'.encode())
print('Laser PC board reset')
def get_hist(self):
"""Returns up to 16 status/error codes from the system history buffer.
Use if the laser has shut off or the system is behaving erratically.
The first element is the most recent.
Example output:
1 827 # 1 error has occured, current time is 827 sec
301 801 # Error code 301 occured at 810 seconds
0 0 # End of history buffer
"""
self.indi.write('READ:HIST?\r'.encode())
reply = '1'
reply_list = list()
while reply[0] != '0': #end of history buffer
reply = self.indi.readline().decode().rstrip()
reply_list.append(reply)
return reply_list
|
PALab/PLACE
|
place/plugins/quanta_ray/qray_driver.py
|
Python
|
lgpl-3.0
| 21,278
| 0.001222
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette_driver.by import By
from marionette_harness import MarionetteTestCase
class TestPosition(MarionetteTestCase):
def test_should_get_element_position_back(self):
test_url = self.marionette.absolute_url('rectangles.html')
self.marionette.navigate(test_url)
r2 = self.marionette.find_element(By.ID, "r2")
location = r2.rect
self.assertEqual(11, location['x'])
self.assertEqual(10, location['y'])
|
Yukarumya/Yukarum-Redfoxes
|
testing/marionette/harness/marionette_harness/tests/unit/test_position.py
|
Python
|
mpl-2.0
| 664
| 0
|
import unittest2
from zounds.util import simple_in_memory_settings
from .preprocess import MeanStdNormalization, PreprocessingPipeline
import featureflow as ff
import numpy as np
class MeanStdTests(unittest2.TestCase):
def _forward_backward(self, shape):
@simple_in_memory_settings
class Model(ff.BaseModel):
meanstd = ff.PickleFeature(
MeanStdNormalization,
store=False)
pipeline = ff.PickleFeature(
PreprocessingPipeline,
needs=(meanstd,),
store=True)
training = np.random.random_sample((100,) + shape)
_id = Model.process(meanstd=training)
model = Model(_id)
data_shape = (10,) + shape
data = np.random.random_sample(data_shape)
result = model.pipeline.transform(data)
self.assertEqual(data_shape, result.data.shape)
inverted = result.inverse_transform()
self.assertEqual(inverted.shape, data.shape)
np.testing.assert_allclose(inverted, data)
def test_can_process_1d(self):
self._forward_backward((9,))
def test_can_process_2d(self):
self._forward_backward((3, 4))
def test_can_process_3d(self):
self._forward_backward((5, 4, 7))
|
JohnVinyard/zounds
|
zounds/learn/test_meanstd.py
|
Python
|
mit
| 1,301
| 0
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
from evaluator import Evaluator
from loader import Loader
import matplotlib.pyplot as plt
from confidence_weighted import ConfidenceWeighted
def graph_plot(plt_obj, show=False):
plt_obj.ylim(0, 1)
plt_obj.xlabel("Number of trials")
plt_obj.ylabel("Accuracy")
plt_obj.legend(["CW", "CW1", "CW2"], loc="lower right")
if show is True:
plt_obj.show()
else:
plt_obj.figure()
if __name__ == '__main__':
# construct passive-aggressive model
cw = list()
cw.append(ConfidenceWeighted(123))
cw.append(ConfidenceWeighted(123, 0.30))
cw.append(ConfidenceWeighted(123, 0.50))
# training phase
loader = Loader('a1a', 123, 30956, 1605)
y_vec, feats_vec = loader.load_train()
for i in range(len(cw)):
evaluator = Evaluator(cw[i], y_vec, feats_vec)
evaluator.update()
plt.plot(evaluator.accuracy)
graph_plot(plt)
# test phase
y_vec, feats_vec = loader.load_test()
for i in range(len(cw)):
evaluator = Evaluator(cw[i], y_vec, feats_vec)
evaluator.predict()
plt.plot(evaluator.accuracy)
graph_plot(plt, show=True)
|
smrmkt/online_learning_algorithms
|
exec_cw.py
|
Python
|
bsd-3-clause
| 1,188
| 0.000842
|
#!/usr/bin/env python
# coding: utf-8
import os
import csv
from schedule_entry import EntryStatus
from machine import MachineStatus
def dump_stat(path, data, headers):
with open(path, 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(headers)
for row in data:
csv_out.writerow(row)
class Statistics():
def __init__(self):
self.numbers = []
self.scheds = []
self.entries = []
self.durations = []
def snapshot(self, timestamp, entries, machines):
# Number of jobs in scheduled/execution
njs = len([e for e in entries if e.status == EntryStatus.scheduled])
nje = len([e for e in entries if e.status == EntryStatus.executing])
# Number of machines allocating/running
nma = len([m for m in machines if m.status == MachineStatus.allocating])
nmr = len([m for m in machines if m.status == MachineStatus.running])
self.numbers.append((timestamp, njs, nje, nma, nmr))
def schedshot(self, provisioner):
self.scheds.append((provisioner.timestamp, provisioner.budget, provisioner.cost_pred, provisioner.wf_end))
def jobs(self, entries):
d = {}
for e in entries:
if e.host != None:
host_id = e.host.id
condor_slot = e.host.condor_slot
else:
host_id = condor_slot = None
if e.job != None:
wf_id = e.job.wf_id
dag_job_id = e.job.dag_job_id
else:
wf_id = dag_job_id = None
for event in e.log.keys():
if e.log[event]:
self.entries.append((host_id, condor_slot, wf_id, dag_job_id, e.condor_id, event, e.log[event]))
if dag_job_id and 'EXECUTE' in e.log.keys() and 'JOB_TERMINATED' in e.log.keys() and 'SUBMIT' in e.log.keys():
parts = dag_job_id.split('_')
if len(parts) == 2:
jt = parts[0]
else:
jt = '_'.join(parts[:2])
d[jt] = [
(d[jt][0] if jt in d.keys() else 0) +1,
(d[jt][1] if jt in d.keys() else 0) +(e.log['JOB_TERMINATED'] - e.log['EXECUTE']).total_seconds(),
(d[jt][2] if jt in d.keys() else 0) +(e.log['EXECUTE'] - e.log['SUBMIT']).total_seconds(),
(d[jt][3] if jt in d.keys() else 0) +(e.log['JOB_TERMINATED'] - e.log['SUBMIT']).total_seconds(),
]
for jt in d.keys():
self.durations.append((jt, d[jt][1]*1.0 / d[jt][0], d[jt][2]*1.0 / d[jt][0], d[jt][3]*1.0 / d[jt][0], d[jt][0]))
def dump(self):
home = os.path.expanduser('~')
directory = os.path.join(home, '.dynamic_provisioning')
if not os.path.exists(directory):
os.makedirs(directory)
print 'Writing statistics in ' + str(directory)
path = os.path.join(directory, 'numbers.csv')
headers = ['timestamp','n_jobs_s','n_jobs_e','n_machines_a','n_machines_r']
dump_stat(path, self.numbers, headers)
path = os.path.join(directory, 'budget.csv')
headers = ['timestamp', 'budget', 'cost_prediction', 'wf_end']
dump_stat(path, self.scheds, headers)
path = os.path.join(directory, 'jobs.csv')
headers = ['host', 'slot', 'workflow', 'dag_job_id','condor_id', 'event', 'timestamp']
dump_stat(path, self.entries, headers)
path = os.path.join(directory, 'durations.csv')
headers = ['job', 'execute_time', 'queue_time', 'total_time', 'n']
dump_stat(path, self.durations, headers)
|
rika/dynamic-provisioning
|
statistics.py
|
Python
|
gpl-3.0
| 3,837
| 0.011207
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2019
# Separated test code with Python 3.6 syntax.
import typing
import decimal
from streamsx.spl.types import int64
class NTS(typing.NamedTuple):
x: int
msg: str
class NamedTupleBytesSchema(typing.NamedTuple):
idx: str
msg: bytes
flag: bool
oidx: typing.Optional[str] = None
omsg: typing.Optional[bytes] = None
oflag: typing.Optional[bool] = None
class NamedTupleNumbersSchema2(typing.NamedTuple):
i64: int
f64: float
d128: decimal.Decimal
c64: complex
si64: typing.Set[int]
oi64: typing.Optional[int] = None
of64: typing.Optional[float] = None
od128: typing.Optional[decimal.Decimal] = None
oc64: typing.Optional[complex] = None
omi64li64: typing.Optional[typing.Mapping[int,typing.List[int]]] = None
class NamedTupleNumbersSchema(typing.NamedTuple):
i64: int
f64: float
d128: decimal.Decimal
c64: complex
oi64: typing.Optional[int] = None
of64: typing.Optional[float] = None
od128: typing.Optional[decimal.Decimal] = None
oc64: typing.Optional[complex] = None
omi64li64: typing.Optional[typing.Mapping[int,typing.List[int]]] = None
#tuple<float64 start_time, float64 end_time, float64 confidence>
class SpottedSchema(typing.NamedTuple):
start_time: float
end_time: float
confidence: float
class NamedTupleSetOfListofTupleSchema(typing.NamedTuple):
slt: typing.Set[typing.List[SpottedSchema]]
#tuple<map<rstring, tuple<float64 start_time, float64 end_time, float64 confidence>> keywords_spotted>
class NamedTupleMapWithTupleSchema(typing.NamedTuple):
keywords_spotted: typing.Mapping[str,SpottedSchema]
class NamedTupleMapWithListTupleSchema(typing.NamedTuple):
keywords_spotted: typing.Mapping[str,typing.List[SpottedSchema]]
class NamedTupleListOfTupleSchema(typing.NamedTuple):
spotted: typing.List[SpottedSchema]
#tuple<rstring str, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>
class NamedTupleNestedTupleSchema(typing.NamedTuple):
key: str
spotted: SpottedSchema
#tuple<int64 i64, list<tuple<rstring str, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spottedList>
class NamedTupleListOfNestedTupleSchema(typing.NamedTuple):
i64: int
spottedList: typing.List[NamedTupleNestedTupleSchema]
#tuple<rstring s1, tuple<int64 i64, list<tuple<rstring key, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spottedList> tupleWList>
class NamedTupleNestedList2Schema(typing.NamedTuple):
s1: str
tupleWList: NamedTupleListOfNestedTupleSchema
#tuple<rstring s2, tuple<rstring s1, tuple<int64 i64, list<tuple<rstring key, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spottedList> tupleWList> tupleWList2>
class NamedTupleNestedList3Schema(typing.NamedTuple):
s2: str
tupleWList2: NamedTupleNestedList2Schema
#tuple<int64 i64, map<rstring, tuple<rstring str, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spotted>
class NamedTupleMapOfNestedTupleSchema(typing.NamedTuple):
i64: int
spottedMap: typing.Mapping[str,NamedTupleNestedTupleSchema]
#tuple<rstring s1, tuple<int64 i64, map<rstring, tuple<rstring key, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spottedMap> tupleWMap>
class NamedTupleNestedMap2Schema(typing.NamedTuple):
s1: str
tupleWMap: NamedTupleMapOfNestedTupleSchema
#tuple<rstring s2, tuple<rstring s1, tuple<int64 i64, map<rstring, tuple<rstring key, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spottedMap> tupleWMap> tupleWMap2>
class NamedTupleNestedMap3Schema(typing.NamedTuple):
s2: str
tupleWMap2: NamedTupleNestedMap2Schema
class TestSchema(typing.NamedTuple):
flag: bool
i64: int
class ContactsSchema(typing.NamedTuple):
mail: str
phone: str
nested_tuple: TestSchema
class AddressSchema(typing.NamedTuple):
street: str
city: str
contacts: ContactsSchema
class PersonSchema(typing.NamedTuple):
name: str
age: int
address: AddressSchema
#tuple<int64 x_coord, int64 y_coord>
class Point2DSchema(typing.NamedTuple):
x_coord: int
y_coord: int
#tuple<int64 x_coord, int64 y_coord, int64 z_coord>
class Point3DSchema(typing.NamedTuple):
x_coord: int
y_coord: int
z_coord: int
#tuple<tuple<int64 x_coord, int64 y_coord> center, int64 radius>
class CircleSchema(typing.NamedTuple):
center: Point2DSchema
radius: float
#tuple<float64 radius, boolean has_rings>
class CircleRadiusSchema(typing.NamedTuple):
radius: float
has_rings: bool
#tuple<tuple<int64 x_coord, int64 y_coord, int64 z_coord> center, int64 radius , int64 radius2>
class DonutSchema(typing.NamedTuple):
center: Point3DSchema
radius: int
radius2: int
rings: typing.List[CircleRadiusSchema]
#tuple<tuple<tuple<int64 x_coord, int64 y_coord> center, radius int64> circle,
# tuple<tuple<int64 x_coord, int64 y_coord, int64 z_coord> center, int64 radius , int64 radius2> torus>
class TripleNestedTupleAmbiguousAttrName(typing.NamedTuple):
circle: CircleSchema # contains 'center' as tuple attribute
torus: DonutSchema # contains also 'center' as a different tuple type attribute, contains 'rings' attribute
rings: typing.List[CircleSchema] # rings with nested (anonymous C++ type)
#tuple<int64 int1, map<string, tuple<int64 x_coord, int64 y_coord>> map1>
class TupleWithMapToTupleAttr1(typing.NamedTuple):
int1: int
map1: typing.Mapping[str,Point2DSchema]
#tuple<int64 int2, map<string, tuple<int64 int1, map<rstring, tuple<int64 x_coord, int64 y_coord>> map1>> map2>
# This schema contains map attributes at different nesting levels with different attribute names and different Value types
class TupleWithMapToTupleWithMap(typing.NamedTuple):
int2: int
map2: typing.Mapping[str,TupleWithMapToTupleAttr1]
#tuple<int64 int1, map<string, tuple<int64 int1, map<rstring, tuple<int64 x_coord, int64 y_coord>> map1>> map1>
# This schema contains map attributes at different nesting levels with equal map attribute name (map1), but different Value types
class TupleWithMapToTupleWithMapAmbigousMapNames(typing.NamedTuple):
int1: int
map1: typing.Mapping[str,TupleWithMapToTupleAttr1]
#tuple<int64 int1, map<string, tuple<int64 x_coord, int64 y_coord, int64 z_coord>> map1>
#class TupleWithMapToTupleAttr2(typing.NamedTuple):
# int1: int
# map1: typing.Mapping[str,Point3DSchema]
|
IBMStreams/streamsx.topology
|
test/python/topology/py36_types.py
|
Python
|
apache-2.0
| 6,604
| 0.011508
|
# Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Windows shell link files"""
from lf.win.shell.link.objects import (
ShellLink, FileAttributes, LinkFlags, ShellLinkHeader, StringData,
LinkInfo, VolumeID, CNRL, ExtraDataBlock, ConsoleProps, ConsoleFEProps,
DarwinProps, ExpandableStringsDataBlock, EnvironmentProps,
IconEnvironmentProps, KnownFolderProps, PropertyStoreProps, ShimProps,
SpecialFolderProps, DomainRelativeObjId, TrackerProps,
VistaAndAboveIDListProps, TerminalBlock, ExtraDataBlockFactory,
StringDataSet
)
__docformat__ = "restructuredtext en"
__all__ = [
"ShellLink", "FileAttributes", "LinkFlags", "ShellLinkHeader",
"StringData", "LinkInfo", "VolumeID", "CNRL", "ExtraDataBlock",
"ConsoleProps", "ConsoleFEProps", "DarwinProps",
"ExpandableStringsDataBlock", "EnvironmentProps", "IconEnvironmentProps",
"KnownFolderProps", "PropertyStoreProps", "ShimProps",
"SpecialFolderProps", "DomainRelativeObjId", "TrackerProps",
"VistaAndAboveIDListProps", "TerminalBlock", "ExtraDataBlockFactory",
"StringDataSet"
]
|
306777HC/libforensics
|
code/lf/win/shell/link/__init__.py
|
Python
|
lgpl-3.0
| 1,769
| 0.000565
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import models, api, fields
class mrp_bom(models.Model):
_inherit = 'mrp.bom'
def _bom_explode(self, cr, uid, bom, product, factor, properties=None,
level=0, routing_id=False, previous_products=None,
master_bom=None, context=None):
res = super(mrp_bom, self)._bom_explode(
cr, uid, bom, product, factor,
properties=properties, level=level,
routing_id=routing_id,
previous_products=previous_products,
master_bom=master_bom, context=context
)
results = res[0] # product_lines
results2 = res[1] # workcenter_lines
indice = 0
for bom_line_id in bom.bom_line_ids:
line = results[indice]
line['largura'] = bom_line_id.largura
line['comprimento'] = bom_line_id.comprimento
line['unidades'] = bom_line_id.unidades
indice += 1
return results, results2
class mrp_bom_line(models.Model):
_inherit = 'mrp.bom.line'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
@api.onchange('largura', 'comprimento', 'unidades')
def compute_quantity(self):
self.product_qty = (self.largura or 1) * \
(self.comprimento or 1) * (self.unidades or 1)
class mrp_production_product_line(models.Model):
_inherit = 'mrp.production.product.line'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
class stock_move(models.Model):
_inherit = 'stock.move'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
class mrp_production(models.Model):
_inherit = 'mrp.production'
def _make_production_consume_line(self, cr, uid, line, context=None):
move_id = super(mrp_production, self)\
._make_production_consume_line(
cr, uid, line, context=context)
self.pool['stock.move'].write(cr, uid, move_id,
{'unidades': line.unidades,
'comprimento': line.comprimento,
'largura': line.largura})
return move_id
|
Trust-Code/trust-addons
|
trust_second_unit_of_measure/models/mrp_bom.py
|
Python
|
agpl-3.0
| 3,943
| 0
|
# -*- coding: utf-8 -*
import uuid
import random
import string
from test import DjangoTestCase
class Account(object):
def __init__(self, email=None, password=None):
self.email = email
self.password = password
@staticmethod
def create_email():
return u"some.one+%s@example.com" % uuid.uuid4().hex.__str__()
@staticmethod
def create_password(length=20):
return u"".join([random.choice(string.digits) for _ in range(length)])
class AccountTestCase(DjangoTestCase):
def signup(self, email=None, password=None, password_confirmation=None):
data = {}
if email is not None:
data[u"email"] = email
if password is not None:
data[u"password"] = password
if password_confirmation is not None:
data[u"password_confirmation"] = password_confirmation
response = self.http_post(u"/signup", data)
return Account(email=email, password=password), response
def login(self, email=None, password=None):
data = {}
if email is not None:
data[u"email"] = email
if password is not None:
data[u"password"] = password
return self.http_post(u"/login", data)
def logout(self, email=None, password=None):
data = {}
if email is not None:
data[u"email"] = email
if password is not None:
data[u"password"] = password
return self.http_post(u"/logout", data)
|
kaeawc/django-auth-example
|
test/account.py
|
Python
|
mit
| 1,507
| 0.000664
|
# Copyright (c) 2008, 2012 testtools developers. See LICENSE for details.
from testtools import TestCase
from testtools.matchers import Equals, MatchesException, Raises
from testtools.content_type import (
ContentType,
JSON,
UTF8_TEXT,
)
class TestContentType(TestCase):
def test___init___None_errors(self):
raises_value_error = Raises(MatchesException(ValueError))
self.assertThat(lambda:ContentType(None, None), raises_value_error)
self.assertThat(lambda:ContentType(None, "traceback"),
raises_value_error)
self.assertThat(lambda:ContentType("text", None), raises_value_error)
def test___init___sets_ivars(self):
content_type = ContentType("foo", "bar")
self.assertEqual("foo", content_type.type)
self.assertEqual("bar", content_type.subtype)
self.assertEqual({}, content_type.parameters)
def test___init___with_parameters(self):
content_type = ContentType("foo", "bar", {"quux": "thing"})
self.assertEqual({"quux": "thing"}, content_type.parameters)
def test___eq__(self):
content_type1 = ContentType("foo", "bar", {"quux": "thing"})
content_type2 = ContentType("foo", "bar", {"quux": "thing"})
content_type3 = ContentType("foo", "bar", {"quux": "thing2"})
self.assertTrue(content_type1.__eq__(content_type2))
self.assertFalse(content_type1.__eq__(content_type3))
def test_basic_repr(self):
content_type = ContentType('text', 'plain')
self.assertThat(repr(content_type), Equals('text/plain'))
def test_extended_repr(self):
content_type = ContentType(
'text', 'plain', {'foo': 'bar', 'baz': 'qux'})
self.assertThat(
repr(content_type), Equals('text/plain; baz="qux"; foo="bar"'))
class TestBuiltinContentTypes(TestCase):
def test_plain_text(self):
# The UTF8_TEXT content type represents UTF-8 encoded text/plain.
self.assertThat(UTF8_TEXT.type, Equals('text'))
self.assertThat(UTF8_TEXT.subtype, Equals('plain'))
self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'}))
def test_json_content(self):
# The JSON content type represents implictly UTF-8 application/json.
self.assertThat(JSON.type, Equals('application'))
self.assertThat(JSON.subtype, Equals('json'))
self.assertThat(JSON.parameters, Equals({}))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/testtools/tests/test_content_type.py
|
Python
|
agpl-3.0
| 2,543
| 0.001573
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import contextlib
import imp
import os
import sys
import inspect
# Python 3.3's importlib caches filesystem reads for faster imports in the
# general case. But sometimes it's necessary to manually invalidate those
# caches so that the import system can pick up new generated files. See
# https://github.com/astropy/astropy/issues/820
if sys.version_info[:2] >= (3, 3):
from importlib import invalidate_caches
else:
invalidate_caches = lambda: None
class _DummyFile(object):
"""A noop writeable object."""
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
if sys.platform == 'win32':
import ctypes
def _has_hidden_attribute(filepath):
"""
Returns True if the given filepath has the hidden attribute on
MS-Windows. Based on a post here:
http://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection
"""
if isinstance(filepath, bytes):
filepath = filepath.decode(sys.getfilesystemencoding())
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath)
assert attrs != -1
result = bool(attrs & 2)
except (AttributeError, AssertionError):
result = False
return result
else:
def _has_hidden_attribute(filepath):
return False
def is_path_hidden(filepath):
"""
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
"""
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b'.')
else:
is_dotted = name.startswith('.')
return is_dotted or _has_hidden_attribute(filepath)
def walk_skip_hidden(top, onerror=None, followlinks=False):
"""
A wrapper for `os.walk` that skips hidden files and directories.
This function does not have the parameter `topdown` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See also
--------
os.walk : For a description of the parameters
"""
for root, dirs, files in os.walk(
top, topdown=True, onerror=onerror,
followlinks=followlinks):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
def write_if_different(filename, data):
"""Write `data` to `filename`, if the content of the file is different.
Parameters
----------
filename : str
The file name to be written to.
data : bytes
The data to be written to `filename`.
"""
assert isinstance(data, bytes)
if os.path.exists(filename):
with open(filename, 'rb') as fd:
original_data = fd.read()
else:
original_data = None
if original_data != data:
with open(filename, 'wb') as fd:
fd.write(data)
def import_file(filename):
"""
Imports a module from a single file as if it doesn't belong to a
particular package.
"""
# Specifying a traditional dot-separated fully qualified name here
# results in a number of "Parent module 'astropy' not found while
# handling absolute import" warnings. Using the same name, the
# namespaces of the modules get merged together. So, this
# generates an underscore-separated name which is more likely to
# be unique, and it doesn't really matter because the name isn't
# used directly here anyway.
with open(filename, 'U') as fd:
name = '_'.join(
os.path.relpath(os.path.splitext(filename)[0]).split(os.sep)[1:])
return imp.load_module(name, fd, filename, ('.py', 'U', 1))
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
__import__(modname)
mod = sys.modules[modname]
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
valids = [fqn.startswith(modname) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs
|
astrofrog/astropy-helpers
|
astropy_helpers/utils.py
|
Python
|
bsd-3-clause
| 6,917
| 0.000434
|
"""
Django settings for dfiid project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
from django.core.exceptions import ImproperlyConfigured
def get_env(setting):
""" Get the environment setting or return exception """
try:
return os.environ[setting]
except KeyError:
error_msg = 'Set the %s env variable' % setting
raise ImproperlyConfigured(error_msg)
SECRET_KEY = get_env('SECRET_KEY')
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'nocaptcha_recaptcha',
'core',
'user',
'content',
'notify',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dfiid.urls'
WSGI_APPLICATION = 'dfiid.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': get_env('DB_NAME'),
'USER': get_env('DB_USER'),
'PASSWORD': get_env('DB_PASSWORD'),
'HOST': get_env('DB_HOST'),
'PORT': get_env('DB_PORT'),
}
}
LANGUAGE_CODE = get_env('LANGUAGE')
TIME_ZONE = 'Atlantic/Canary'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/s/'
STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), )
STATIC_ROOT = os.path.join(BASE_DIR, 's')
MEDIA_URL = '/m/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'm')
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
AUTH_USER_MODEL = 'user.User'
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
NORECAPTCHA_SITE_KEY = get_env('NORECAPTCHA_SITE_KEY')
NORECAPTCHA_SECRET_KEY = get_env('NORECAPTCHA_SECRET_KEY')
|
ellipticaldoor/dfiid
|
project/dfiid/settings/base.py
|
Python
|
gpl-2.0
| 2,415
| 0.015735
|
#Evaluate semantic space against MEN dataset
import sys
import utils
from scipy import stats
import numpy as np
from math import sqrt
#Note: this is scipy's spearman, without tie adjustment
def spearman(x,y):
return stats.spearmanr(x, y)[0]
def readMEN(annotation_file):
pairs=[]
humans=[]
f=open(annotation_file,'r')
for l in f:
l=l.rstrip('\n')
items=l.split()
pairs.append((items[0],items[1]))
humans.append(float(items[2]))
f.close()
return pairs, humans
def compute_men_spearman(dm_dict, annotation_file):
pairs, humans=readMEN(annotation_file)
system_actual=[]
human_actual=[]
count=0
for i in range(len(pairs)):
human=humans[i]
a,b=pairs[i]
if a in dm_dict and b in dm_dict:
cos=utils.cosine_similarity(dm_dict[a],dm_dict[b])
system_actual.append(cos)
human_actual.append(human)
count+=1
sp = spearman(human_actual,system_actual)
return sp,count
|
minimalparts/Tutorials
|
FruitFly/MEN.py
|
Python
|
mit
| 989
| 0.034378
|
# Copyright (C) 2007, 2011, One Laptop Per Child
# Copyright (C) 2014, Ignacio Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import statvfs
from gettext import gettext as _
from gi.repository import GObject
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gdk
import cPickle
import xapian
import json
import tempfile
import shutil
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.palette import Palette
from sugar3.graphics import style
from sugar3 import env
from sugar3 import profile
from jarabe.journal import model
from jarabe.journal.misc import get_mount_icon_name
from jarabe.journal.misc import get_mount_color
from jarabe.view.palettes import VolumePalette
_JOURNAL_0_METADATA_DIR = '.olpc.store'
def _get_id(document):
"""Get the ID for the document in the xapian database."""
tl = document.termlist()
try:
term = tl.skip_to('Q').term
if len(term) == 0 or term[0] != 'Q':
return None
return term[1:]
except StopIteration:
return None
def _convert_entries(root):
"""Convert entries written by the datastore version 0.
The metadata and the preview will be written using the new
scheme for writing Journal entries to removable storage
devices.
- entries that do not have an associated file are not
converted.
- if an entry has no title we set it to Untitled and rename
the file accordingly, taking care of creating a unique
filename
"""
try:
database = xapian.Database(os.path.join(root, _JOURNAL_0_METADATA_DIR,
'index'))
except xapian.DatabaseError:
logging.exception('Convert DS-0 Journal entries: error reading db: %s',
os.path.join(root, _JOURNAL_0_METADATA_DIR, 'index'))
return
metadata_dir_path = os.path.join(root, model.JOURNAL_METADATA_DIR)
if not os.path.exists(metadata_dir_path):
try:
os.mkdir(metadata_dir_path)
except EnvironmentError:
logging.error('Convert DS-0 Journal entries: '
'error creating the Journal metadata directory.')
return
for posting_item in database.postlist(''):
try:
document = database.get_document(posting_item.docid)
except xapian.DocNotFoundError, e:
logging.debug('Convert DS-0 Journal entries: error getting '
'document %s: %s', posting_item.docid, e)
continue
_convert_entry(root, document)
def _convert_entry(root, document):
try:
metadata_loaded = cPickle.loads(document.get_data())
except cPickle.PickleError, e:
logging.debug('Convert DS-0 Journal entries: '
'error converting metadata: %s', e)
return
if not ('activity_id' in metadata_loaded and
'mime_type' in metadata_loaded and
'title' in metadata_loaded):
return
metadata = {}
uid = _get_id(document)
if uid is None:
return
for key, value in metadata_loaded.items():
metadata[str(key)] = str(value[0])
if 'uid' not in metadata:
metadata['uid'] = uid
filename = metadata.pop('filename', None)
if not filename:
return
if not os.path.exists(os.path.join(root, filename)):
return
if not metadata.get('title'):
metadata['title'] = _('Untitled')
fn = model.get_file_name(metadata['title'],
metadata['mime_type'])
new_filename = model.get_unique_file_name(root, fn)
os.rename(os.path.join(root, filename),
os.path.join(root, new_filename))
filename = new_filename
preview_path = os.path.join(root, _JOURNAL_0_METADATA_DIR,
'preview', uid)
if os.path.exists(preview_path):
preview_fname = filename + '.preview'
new_preview_path = os.path.join(root,
model.JOURNAL_METADATA_DIR,
preview_fname)
if not os.path.exists(new_preview_path):
shutil.copy(preview_path, new_preview_path)
metadata_fname = filename + '.metadata'
metadata_path = os.path.join(root, model.JOURNAL_METADATA_DIR,
metadata_fname)
if not os.path.exists(metadata_path):
(fh, fn) = tempfile.mkstemp(dir=root)
os.write(fh, json.dumps(metadata))
os.close(fh)
os.rename(fn, metadata_path)
logging.debug('Convert DS-0 Journal entries: entry converted: '
'file=%s metadata=%s',
os.path.join(root, filename), metadata)
class VolumesToolbar(Gtk.Toolbar):
__gtype_name__ = 'VolumesToolbar'
__gsignals__ = {
'volume-changed': (GObject.SignalFlags.RUN_FIRST, None,
([str])),
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self):
Gtk.Toolbar.__init__(self)
self._mount_added_hid = None
self._mount_removed_hid = None
button = JournalButton()
button.connect('toggled', self._button_toggled_cb)
self.insert(button, 0)
button.show()
self._volume_buttons = [button]
self.connect('destroy', self.__destroy_cb)
GLib.idle_add(self._set_up_volumes)
def __destroy_cb(self, widget):
volume_monitor = Gio.VolumeMonitor.get()
volume_monitor.disconnect(self._mount_added_hid)
volume_monitor.disconnect(self._mount_removed_hid)
def _set_up_volumes(self):
self._set_up_documents_button()
volume_monitor = Gio.VolumeMonitor.get()
self._mount_added_hid = volume_monitor.connect('mount-added',
self.__mount_added_cb)
self._mount_removed_hid = volume_monitor.connect(
'mount-removed',
self.__mount_removed_cb)
for mount in volume_monitor.get_mounts():
self._add_button(mount)
def _set_up_documents_button(self):
documents_path = model.get_documents_path()
if documents_path is not None:
button = DocumentsButton(documents_path)
button.props.group = self._volume_buttons[0]
button.set_palette(Palette(_('Documents')))
button.connect('toggled', self._button_toggled_cb)
button.show()
position = self.get_item_index(self._volume_buttons[-1]) + 1
self.insert(button, position)
self._volume_buttons.append(button)
self.show()
def __mount_added_cb(self, volume_monitor, mount):
self._add_button(mount)
def __mount_removed_cb(self, volume_monitor, mount):
self._remove_button(mount)
def _add_button(self, mount):
logging.debug('VolumeToolbar._add_button: %r', mount.get_name())
if os.path.exists(os.path.join(mount.get_root().get_path(),
_JOURNAL_0_METADATA_DIR)):
logging.debug('Convert DS-0 Journal entries: starting conversion')
GLib.idle_add(_convert_entries, mount.get_root().get_path())
button = VolumeButton(mount)
button.props.group = self._volume_buttons[0]
button.connect('toggled', self._button_toggled_cb)
button.connect('volume-error', self.__volume_error_cb)
position = self.get_item_index(self._volume_buttons[-1]) + 1
self.insert(button, position)
button.show()
self._volume_buttons.append(button)
if len(self.get_children()) > 1:
self.show()
def __volume_error_cb(self, button, strerror, severity):
self.emit('volume-error', strerror, severity)
def _button_toggled_cb(self, button):
if button.props.active:
self.emit('volume-changed', button.mount_point)
def _get_button_for_mount(self, mount):
mount_point = mount.get_root().get_path()
for button in self.get_children():
if button.mount_point == mount_point:
return button
logging.error('Couldnt find button with mount_point %r', mount_point)
return None
def _remove_button(self, mount):
button = self._get_button_for_mount(mount)
self._volume_buttons.remove(button)
self.remove(button)
self.get_children()[0].props.active = True
if len(self.get_children()) < 2:
self.hide()
def set_active_volume(self, mount):
button = self._get_button_for_mount(mount)
button.props.active = True
class BaseButton(RadioToolButton):
__gsignals__ = {
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self, mount_point):
RadioToolButton.__init__(self)
self.mount_point = mount_point
self.drag_dest_set(Gtk.DestDefaults.ALL,
[Gtk.TargetEntry.new('journal-object-id', 0, 0)],
Gdk.DragAction.COPY)
self.connect('drag-data-received', self._drag_data_received_cb)
def _drag_data_received_cb(self, widget, drag_context, x, y,
selection_data, info, timestamp):
object_id = selection_data.get_data()
metadata = model.get(object_id)
file_path = model.get_file(metadata['uid'])
if not file_path or not os.path.exists(file_path):
logging.warn('Entries without a file cannot be copied.')
self.emit('volume-error',
_('Entries without a file cannot be copied.'),
_('Warning'))
return
try:
model.copy(metadata, self.mount_point)
except IOError, e:
logging.exception('Error while copying the entry. %s', e.strerror)
self.emit('volume-error',
_('Error while copying the entry. %s') % e.strerror,
_('Error'))
class VolumeButton(BaseButton):
def __init__(self, mount):
self._mount = mount
mount_point = mount.get_root().get_path()
BaseButton.__init__(self, mount_point)
self.props.icon_name = get_mount_icon_name(mount,
Gtk.IconSize.LARGE_TOOLBAR)
# TODO: retrieve the colors from the owner of the device
self.props.xo_color = get_mount_color(self._mount)
def create_palette(self):
palette = VolumePalette(self._mount)
# palette.props.invoker = FrameWidgetInvoker(self)
# palette.set_group_id('frame')
return palette
class JournalButton(BaseButton):
def __init__(self):
BaseButton.__init__(self, mount_point='/')
self.props.icon_name = 'activity-journal'
self.props.xo_color = profile.get_color()
def create_palette(self):
palette = JournalButtonPalette(self)
return palette
class JournalButtonPalette(Palette):
def __init__(self, mount):
Palette.__init__(self, _('Journal'))
grid = Gtk.Grid(orientation=Gtk.Orientation.VERTICAL,
margin=style.DEFAULT_SPACING,
row_spacing=style.DEFAULT_SPACING)
self.set_content(grid)
grid.show()
self._progress_bar = Gtk.ProgressBar()
grid.add(self._progress_bar)
self._progress_bar.show()
self._free_space_label = Gtk.Label()
self._free_space_label.set_alignment(0.5, 0.5)
grid.add(self._free_space_label)
self._free_space_label.show()
self.connect('popup', self.__popup_cb)
def __popup_cb(self, palette):
stat = os.statvfs(env.get_profile_path())
free_space = stat[statvfs.F_BSIZE] * stat[statvfs.F_BAVAIL]
total_space = stat[statvfs.F_BSIZE] * stat[statvfs.F_BLOCKS]
fraction = (total_space - free_space) / float(total_space)
self._progress_bar.props.fraction = fraction
self._free_space_label.props.label = _('%(free_space)d MB Free') % \
{'free_space': free_space / (1024 * 1024)}
class DocumentsButton(BaseButton):
def __init__(self, documents_path):
BaseButton.__init__(self, mount_point=documents_path)
self.props.icon_name = 'user-documents'
self.props.xo_color = profile.get_color()
|
icarito/sugar
|
src/jarabe/journal/volumestoolbar.py
|
Python
|
gpl-3.0
| 13,211
| 0
|
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
|
timm/timmnix
|
pypy3-v5.5.0-linux64/lib-python/3/lib2to3/pgen2/tokenize.py
|
Python
|
mit
| 19,320
| 0.001967
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hack_plot', '0005_auto_20150505_1940'),
]
operations = [
migrations.AddField(
model_name='sshhackip',
name='located',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
hellsgate1001/graphs
|
hack_plot/migrations/0006_sshhackip_located.py
|
Python
|
mit
| 444
| 0
|
from trackers.fitbit_tracker import FitbitTracker
__author__ = 'doughyde'
# FitBit connection
f = FitbitTracker()
f.authenticate()
f.get_devices()
|
doughyde/fitbit-cal-sync
|
fitbit-cal-sync.py
|
Python
|
gpl-2.0
| 149
| 0.006711
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from st2common.content.loader import ContentPackLoader
from st2common.exceptions.content import ParseException
from st2common.bootstrap.aliasesregistrar import AliasesRegistrar
from st2common.models.utils.action_alias_utils import extract_parameters_for_action_alias_db
from st2common.models.utils.action_alias_utils import extract_parameters
from st2tests.pack_resource import BasePackResourceTestCase
__all__ = [
'BaseActionAliasTestCase'
]
class BaseActionAliasTestCase(BasePackResourceTestCase):
"""
Base class for testing action aliases.
"""
action_alias_name = None
action_alias_db = None
def setUp(self):
super(BaseActionAliasTestCase, self).setUp()
if not self.action_alias_name:
raise ValueError('"action_alias_name" class attribute needs to be provided')
self.action_alias_db = self._get_action_alias_db_by_name(name=self.action_alias_name)
def assertCommandMatchesExactlyOneFormatString(self, format_strings, command):
"""
Assert that the provided command matches exactly one format string from the provided list.
"""
matched_format_strings = []
for format_string in format_strings:
try:
extract_parameters(format_str=format_string,
param_stream=command)
except ParseException:
continue
matched_format_strings.append(format_string)
if len(matched_format_strings) == 0:
msg = ('Command "%s" didn\'t match any of the provided format strings' % (command))
raise AssertionError(msg)
elif len(matched_format_strings) > 1:
msg = ('Command "%s" matched multiple format strings: %s' %
(command, ', '.join(matched_format_strings)))
raise AssertionError(msg)
def assertExtractedParametersMatch(self, format_string, command, parameters):
"""
Assert that the provided command matches the format string.
In addition to that, also assert that the parameters which have been extracted from the
user input (command) also match the provided parameters.
"""
extracted_params = extract_parameters_for_action_alias_db(
action_alias_db=self.action_alias_db,
format_str=format_string,
param_stream=command)
if extracted_params != parameters:
msg = ('Extracted parameters from command string "%s" against format string "%s"'
' didn\'t match the provided parameters: ' % (command, format_string))
# Note: We intercept the exception so we can can include diff for the dictionaries
try:
self.assertEqual(extracted_params, parameters)
except AssertionError as e:
msg += str(e)
raise AssertionError(msg)
def _get_action_alias_db_by_name(self, name):
"""
Retrieve ActionAlias DB object for the provided alias name.
"""
base_pack_path = self._get_base_pack_path()
_, pack = os.path.split(base_pack_path)
pack_loader = ContentPackLoader()
registrar = AliasesRegistrar(use_pack_cache=False)
aliases_path = pack_loader.get_content_from_pack(pack_dir=base_pack_path,
content_type='aliases')
aliases = registrar._get_aliases_from_pack(aliases_dir=aliases_path)
for alias_path in aliases:
action_alias_db = registrar._get_action_alias_db(pack=pack,
action_alias=alias_path)
if action_alias_db.name == name:
return action_alias_db
raise ValueError('Alias with name "%s" not found' % (name))
|
lakshmi-kannan/st2
|
st2tests/st2tests/action_aliases.py
|
Python
|
apache-2.0
| 4,628
| 0.003025
|
from sft.runner.Trainer import Trainer
import sft.config.exp
if __name__ == "__main__":
Trainer().run(sft.config.exp)
|
kevinkepp/look-at-this
|
run_trainer.py
|
Python
|
mit
| 121
| 0.008264
|
import time
import sqlite3
from base_model import BaseModel
from datetime import datetime
from contextlib import contextmanager
class SSIDTrafficHistory(BaseModel):
def __init__(self, dbfile, table_name, time_limit):
super(SSIDTrafficHistory, self).__init__(dbfile, table_name)
self.time_limit = time_limit
def init_db(self):
with self.db_cursor() as c:
c.execute('''
CREATE TABLE IF NOT EXISTS {} (
timestamp integer,
adapter text,
ssid text,
rx integer,
tx integer,
PRIMARY KEY (timestamp, adapter, ssid)
)
'''.format(self.table_name))
def truncate_time(timestamp):
raise NotImplementedError
def query(self, adapter, ssid, timestamp=None):
if not timestamp:
timestamp = time.time()
with self.db_cursor(commit=False) as c:
query = '''
SELECT timestamp, adapter, ssid, rx, tx
FROM {}
WHERE adapter=? AND ssid=? AND timestamp=?;
'''.format(self.table_name)
c.execute(query, (adapter, ssid, self.truncate_time(timestamp)))
result = c.fetchone()
if result == None:
result = (self.truncate_time(timestamp), adapter, ssid, 0, 0)
return {
'timestamp': self.truncate_time(timestamp),
'adapter': adapter,
'ssid': ssid,
'rx': result[3],
'tx': result[4]
}
def query_all(self, start_time=None, end_time=None, timestamp=None):
if not timestamp:
timestamp = time.time()
if not end_time:
end_time = timestamp
if not start_time:
start_time = self.truncate_time(end_time)
with self.db_cursor(commit=False) as c:
query = '''
SELECT timestamp, adapter, ssid, sum(rx), sum(tx)
FROM {}
WHERE timestamp >= ? AND timestamp <= ?
GROUP BY adapter, ssid
ORDER BY adapter, ssid;
'''.format(self.table_name)
c.execute(query, (start_time, end_time))
results = c.fetchall()
query_result = {}
for r in results:
ts, adapter, ssid, rx, tx = r
if adapter not in query_result:
query_result[adapter] = []
query_result[adapter].append({
'timestamp': ts,
'adapter': adapter,
'ssid': ssid,
'rx': rx,
'tx': tx
})
return query_result
def update(self, adapter, ssid, rx, tx, timestamp=None):
if not timestamp:
timestamp = time.time()
with self.db_cursor() as c:
query = '''
INSERT OR REPLACE INTO {} (timestamp, adapter, ssid, rx, tx)
VALUES ( ?, ?, ?, ?, ? );
'''.format(self.table_name)
c.execute(query, (self.truncate_time(timestamp), adapter, ssid, rx, tx))
def add(self, adapter, ssid, delta_rx, delta_tx, timestamp=None):
if not timestamp:
timestamp = time.time()
prev = self.query(adapter, ssid, timestamp=timestamp)
self.update(
adapter, ssid,
prev['rx']+delta_rx, prev['tx']+delta_tx,
timestamp=timestamp
)
self.clear(timestamp=timestamp)
def clear(self, timestamp=None):
if not timestamp:
timestamp = time.time()
with self.db_cursor() as c:
query = '''
DELETE FROM {}
WHERE timestamp < ?;
'''.format(self.table_name)
c.execute(query, (timestamp - self.time_limit, ))
|
putrasattvika/ssidstat
|
ssidstat/common/models/ssid_traffic_history.py
|
Python
|
apache-2.0
| 3,044
| 0.031866
|
'''
Created on 28/set/2014
@author: Vincenzo Pirrone <pirrone.v@gmail.com>
'''
import serial, time
class Connector:
def readline(self):
pass
def writeline(self, line):
pass
def close(self):
pass
class FakeSerial(Connector):
def __init__(self, port):
print 'opening fake serial on %s' % port
def readline(self):
time.sleep(2)
return 'TIME:%d' % int(time.time())
def writeline(self, line):
print 'FAKE SERIAL: ' + line
def close(self):
print 'closing fake serial'
class Serial(Connector, serial.Serial):
def __init__(self,
port=None,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None,
xonxoff=False,
rtscts=False,
writeTimeout=None,
dsrdtr=False,
interCharTimeout=None):
serial.Serial.__init__(self, port=port, baudrate=baudrate, bytesize=bytesize, parity=parity, stopbits=stopbits, timeout=timeout, xonxoff=xonxoff, rtscts=rtscts, writeTimeout=writeTimeout, dsrdtr=dsrdtr, interCharTimeout=interCharTimeout)
def readline(self):
return Serial.readline(self)
def writeline(self, line):
Serial.write(self, line + '\n')
def close(self):
Serial.close(self)
|
sp4x/osnf
|
osnf/connectors.py
|
Python
|
apache-2.0
| 1,423
| 0.026704
|
# Generated by Django 1.11.15 on 2018-08-08 18:28
import django.db.models.deletion
import django_extensions.db.fields
import stdimage.models
from course_discovery.apps.course_metadata.utils import UploadToFieldNamePath
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0096_degree_lead_capture_list_name'),
]
operations = [
migrations.AddField(
model_name='degree',
name='lead_capture_image',
field=stdimage.models.StdImageField(blank=True, help_text='Please provide an image file for the lead capture banner.', null=True, upload_to=UploadToFieldNamePath('uuid', path='media/degree_marketing/lead_capture_images/')),
),
]
|
edx/course-discovery
|
course_discovery/apps/course_metadata/migrations/0097_degree_lead_capture_image.py
|
Python
|
agpl-3.0
| 772
| 0.001295
|
import os.path
from subprocess import call
class InstallerTools(object):
@staticmethod
def update_environment(file_path,environment_path):
update_file = open(file_path, 'r')
original_lines = update_file.readlines()
original_lines[0] = environment_path+'\n'
update_file.close()
update_file = open(file_path, 'w')
for lines in original_lines:
update_file.write(lines)
update_file.close()
@staticmethod
def fix_migrate(base_directory):
print "\nFixing the migrate bug \n"
buggy_path = os.path.join(base_directory,
'env/lib/python2.7/site-packages/migrate/versioning/schema.py')
buggy_file = open(buggy_path,'r')
original_lines = buggy_file.readlines()
original_lines[9] = "from sqlalchemy import exc as sa_exceptions\n"
buggy_file.close()
update_file = open(buggy_path,'w')
for lines in original_lines:
update_file.write(lines)
update_file.close()
@staticmethod
def refresh_environment(framework_config):
InstallerTools.update_environment(framework_config.yard_path,framework_config.environment_path)
InstallerTools.update_environment(framework_config.blow_path,framework_config.environment_path)
InstallerTools.update_environment(framework_config.try_path,framework_config.environment_path)
@staticmethod
def change_permissions(framework_config):
call(['chmod', 'a+x', framework_config.yard_path])
call(['chmod', 'a+x', framework_config.blow_path])
call(['chmod', 'a+x', framework_config.try_path])
@staticmethod
def create_db_directory(base_directory):
if not os.path.exists(os.path.join(base_directory, 'storage/')):
os.makedirs(os.path.join(base_directory, 'storage/'))
@staticmethod
def create_virtual_environment(framework_config):
call(['python', framework_config.v_path, framework_config.environment_name])
InstallerTools.refresh_environment(framework_config)
InstallerTools.change_permissions(framework_config)
|
femmerling/backyard
|
builder/installer_tools.py
|
Python
|
mit
| 1,906
| 0.028856
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RProtgenerics(RPackage):
"""S4 generic functions needed by Bioconductor proteomics packages."""
homepage = "https://bioconductor.org/packages/ProtGenerics/"
url = "https://git.bioconductor.org/packages/ProtGenerics"
list_url = homepage
version('1.8.0', git='https://git.bioconductor.org/packages/ProtGenerics', commit='b2b3bb0938e20f58fca905f6870de7dbc9dfd7a3')
depends_on('r@3.4.0:3.4.9', when='@1.8.0')
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-protgenerics/package.py
|
Python
|
lgpl-2.1
| 1,699
| 0.001177
|
# Input:
# 2
# 5
# 1 2 3 4 5
# 6
# 2 4 6 7 5 1
#
# Output:
# 3
# 7
def findMid(head):
if head == None:
return -1
fast, slow = head, head
while fast.next != None and fast.next.next != None:
fast = fast.next.next
slow = slow.next
if fast.next != None:
return slow.next
return slow
|
HoussemCharf/FunUtils
|
linked_lists/1_finding_middle_element_in_a_linked_list.py
|
Python
|
mit
| 335
| 0.01194
|
from tests.util.base import event
def test_invite_generation(event, default_account):
from inbox.events.ical import generate_icalendar_invite
event.sequence_number = 1
event.participants = [{'email': 'helena@nylas.com'},
{'email': 'myles@nylas.com'}]
cal = generate_icalendar_invite(event)
assert cal['method'] == 'REQUEST'
for component in cal.walk():
if component.name == "VEVENT":
assert component.get('summary') == event.title
assert int(component.get('sequence')) == event.sequence_number
assert component.get('location') == event.location
attendees = component.get('attendee', [])
# the iCalendar python module doesn't return a list when
# there's only one attendee. Go figure.
if not isinstance(attendees, list):
attendees = [attendees]
for attendee in attendees:
email = unicode(attendee)
# strip mailto: if it exists
if email.lower().startswith('mailto:'):
email = email[7:]
assert email in ['helena@nylas.com', 'myles@nylas.com']
def test_message_generation(event, default_account):
from inbox.events.ical import generate_invite_message
event.title = 'A long walk on the beach'
event.participants = [{'email': 'helena@nylas.com'}]
msg = generate_invite_message('empty', event, default_account)
# Check that we have an email with an HTML part, a plain text part, a
# text/calendar with METHOD=REQUEST and an attachment.
count = 0
for mimepart in msg.walk(with_self=msg.content_type.is_singlepart()):
format_type = mimepart.content_type.format_type
subtype = mimepart.content_type.subtype
if (format_type, subtype) in [('text', 'plain'), ('text', 'html'),
('text', 'calendar; method=request'),
('application', 'ics')]:
count += 1
assert count == 3
|
Eagles2F/sync-engine
|
tests/events/test_inviting.py
|
Python
|
agpl-3.0
| 2,071
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack import resource2 as resource
class ServiceProfile(resource.Resource):
resource_key = 'service_profile'
resources_key = 'service_profiles'
base_path = '/service_profiles'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'description', 'driver',
is_enabled='enabled',
project_id='tenant_id'
)
# Properties
#: Description of the service flavor profile.
description = resource.Body('description')
#: Provider driver for the service flavor profile
driver = resource.Body('driver')
#: Sets enabled flag
is_enabled = resource.Body('enabled', type=bool)
#: Metainformation of the service flavor profile
meta_info = resource.Body('metainfo')
#: The owner project ID
project_id = resource.Body('tenant_id')
|
briancurtin/python-openstacksdk
|
openstack/network/v2/service_profile.py
|
Python
|
apache-2.0
| 1,571
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatter for ASL securityd log file."""
from plaso.formatters import interface
class MacSecuritydLogFormatter(interface.ConditionalEventFormatter):
"""Formatter for ASL Securityd file."""
DATA_TYPE = 'mac:asl:securityd:line'
FORMAT_STRING_PIECES = [
u'Sender: {sender}',
u'({sender_pid})',
u'Level: {level}',
u'Facility: {facility}',
u'Text: {message}']
FORMAT_STRING_SHORT_PIECES = [u'Text: {message}']
SOURCE_LONG = 'Mac ASL Securityd Log'
SOURCE_SHORT = 'LOG'
|
cvandeplas/plaso
|
plaso/formatters/mac_securityd.py
|
Python
|
apache-2.0
| 1,215
| 0.005761
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Technologies and contributors
# License: MIT. See LICENSE
# import frappe
from frappe.model.document import Document
class UserDocumentType(Document):
pass
|
frappe/frappe
|
frappe/core/doctype/user_document_type/user_document_type.py
|
Python
|
mit
| 212
| 0.009434
|
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import taskqueue
import json
import endpoints
import urllib2
import logging, os
import settings
import inspect
from seqid import SeqidIssuer, seqid2str
from endpointshelper import EndpointsHelper
from logger import Logger
from geofeed import GeoFeed
package = 'GeoFeedAPI'
"""GeoFeed API
"""
class SeqidResponse(messages.Message):
"""Response message for geofeed.seqid method"""
status = messages.StringField(1)
series = messages.StringField(2)
seqid_datetime = messages.StringField(3)
seqid_int = messages.IntegerField(4)
class TestRequest(messages.Message):
"""request message for taskqueue.test"""
message = messages.StringField(1)
class TestResponse(messages.Message):
"""response message for taskqueue.test"""
status = messages.StringField(1)
message = messages.StringField(2)
info = messages.StringField(3)
class FeedItem(messages.Message):
topic = messages.StringField(1, required=True)
key = messages.StringField(2, required=True)
url = messages.StringField(3)
latitude = messages.FloatField(4)
longitude = messages.FloatField(5)
content = messages.StringField(6)
published = messages.StringField(7)
class PublishResponse(messages.Message):
"""response message for geofeed.publish"""
status = messages.StringField(1)
class ListRequest(messages.Message):
"""message for retrieving a list of feed items"""
topic = messages.StringField(1, required=True)
class ListResponse(messages.Message):
"""response message for geofeed.list"""
status = messages.StringField(1)
items = messages.MessageField(FeedItem, 2, repeated=True)
class GetRequest(messages.Message):
"""message for retrieving a single feed items"""
topic = messages.StringField(1, required=True)
key = messages.StringField(2, required=True)
class GetResponse(messages.Message):
"""response message for geofeed.get"""
status = messages.StringField(1)
item = messages.MessageField(FeedItem, 2)
@endpoints.api(name='geofeed', version='v1.0', allowed_client_ids=['314157906781-5k944tnd2e4hvcf0nrc4dl93kgdaqnam.apps.googleusercontent.com'])
#@hub_api.api_class(resource_name='geofeed')
class GeoFeedApi(remote.Service):
"""GeoFeed API
"""
SEQUENCE_RESOURCE = endpoints.ResourceContainer(
message_types.VoidMessage,
series=messages.StringField(1))
@endpoints.method(SEQUENCE_RESOURCE, SeqidResponse,
path='seqid/{series}', http_method='GET',
name='seqid')
def seqid(self, request):
"""Get a new seqid from the specified series
"""
response = SeqidResponse(status='OK')
try:
EndpointsHelper.authenticate()
issuer = SeqidIssuer(series=request.series)
seqid = issuer.issueSeqids()[0]
response.series = issuer.series
response.seqid_int = seqid
response.seqid_datetime = seqid2str (seqid)
except Exception, err:
response.status=str(err)
return response
@endpoints.method(FeedItem, PublishResponse,
path='publish', http_method='POST',
name='publish')
def publish(self, request):
"""Publish a new item to a feed.
"""
response = PublishResponse(status='OK')
try:
EndpointsHelper.authenticate()
GeoFeed.publish(**EndpointsHelper.message2dict(request))
except Exception, err:
response.status=str(err)
return response
@endpoints.method(ListRequest, ListResponse,
path='list', http_method='POST',
name='list')
def list(self, request):
"""Retrieve a list of recent items in a feed
"""
response = ListResponse(status='OK')
try:
EndpointsHelper.authenticate()
response.items = [FeedItem(**item) for item in GeoFeed.list(topic=request.topic)]
except Exception, err:
response.status=str(err)
return response
@endpoints.method(GetRequest, GetResponse,
path='get', http_method='POST',
name='get')
def get(self, request):
"""Retrieve a specified feed item
"""
response = GetResponse(status='OK')
try:
EndpointsHelper.authenticate()
item = GeoFeed.get(request.topic, request.key)
if item:
response.item = FeedItem(**item)
else:
response.status='NOT FOUND'
except Exception, err:
response.status=str(err)
return response
@endpoints.method(TestRequest, TestResponse,
path='test', http_method='POST',
name='test')
def test(self, request):
"""Test method for debugging conncection and auth issues
This method will return to the caller whatever string is supplied in the 'message' field
The info field in the response contains some debug information
"""
response = TestResponse(message=request.message, status='OK')
response.info = "USER: %s" % endpoints.get_current_user()
try:
EndpointsHelper.authenticate()
Logger.log (op='test')
except Exception, err:
response.status=str(err)
return response
#app = endpoints.api_server([hub_api])
#app = endpoints.api_server([GeoFeedApi])
|
SkyTruth/skytruth-automation-hub
|
gae/geofeed_api.py
|
Python
|
mit
| 5,854
| 0.007345
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Toshio Kuratomi <a.badger@gmail.com>, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
.. warn:: This module_util is currently internal implementation.
We want to evaluate this code for stability and API suitability before
making backwards compatibility guarantees. The API may change between
releases. Do not use this unless you are willing to port your module code.
"""
import codecs
from ansible.module_utils.six import PY3, text_type, binary_type
try:
codecs.lookup_error('surrogateescape')
HAS_SURROGATEESCAPE = True
except LookupError:
HAS_SURROGATEESCAPE = False
_COMPOSED_ERROR_HANDLERS = frozenset((None, 'surrogate_or_escape',
'surrogate_or_strict',
'surrogate_then_replace'))
def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a byte string
:arg obj: An object to make sure is a byte string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. There are three additional error strategies
specifically aimed at helping people to port code. The first two are:
:surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``strict``
:surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``replace``.
Because ``surrogateescape`` was added in Python3 this usually means that
Python3 will use ``surrogateescape`` and Python2 will use the fallback
error handler. Note that the code checks for ``surrogateescape`` when the
module is imported. If you have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler.
"""
if isinstance(obj, binary_type):
return obj
# We're given a text string
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, 'surrogate_then_replace'):
# Slow but works
return_string = obj.encode('utf-8', 'surrogateescape')
return_string = return_string.decode('utf-8', 'replace')
return return_string.encode(encoding, 'replace')
raise
# Note: We do these last even though we have to call to_bytes again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes('')
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
# python2.4 doesn't have b''
return to_bytes('')
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
return to_bytes(value, encoding, errors)
def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a text string
:arg obj: An object to make sure is a text string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a byte string to
a text string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the byte string is not
decodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. We support three additional error strategies
specifically aimed at helping people to port code:
:surrogate_or_strict: Will use surrogateescape if it is a valid
handler, otherwise it will use strict
:surrogate_or_replace: Will use surrogateescape if it is a valid
handler, otherwise it will use replace.
:surrogate_then_replace: Does the same as surrogate_or_replace but
`was added for symmetry with the error handlers in
:func:`ansible.module_utils._text.to_bytes` (Added in Ansible 2.3)
Because surrogateescape was added in Python3 this usually means that
Python3 will use `surrogateescape` and Python2 will use the fallback
error handler. Note that the code checks for surrogateescape when the
module is imported. If you have a backport of `surrogateescape` for
python2, be sure to register the error handler prior to importing this
module.
The default until Ansible-2.2 was `surrogate_or_replace`
In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry
with :func:`ansible.module_utils._text.to_bytes` .
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the text version of that string.
:empty: Return an empty text string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a text string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a byte string.
From Ansible-2.3 onwards, the default is `surrogate_then_replace`.
.. version_changed:: 2.3
Added the surrogate_then_replace error handler and made it the default error handler.
"""
if isinstance(obj, text_type):
return obj
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, binary_type):
# Note: We don't need special handling for surrogate_then_replace
# because all bytes will either be made into surrogates or are valid
# to decode.
return obj.decode(encoding, errors)
# Note: We do these last even though we have to call to_text again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return u''
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
return u''
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring)
return to_text(value, encoding, errors)
#: :py:func:`to_native`
#: Transform a variable into the native str type for the python version
#:
#: On Python2, this is an alias for
#: :func:`~ansible.module_utils.to_bytes`. On Python3 it is an alias for
#: :func:`~ansible.module_utils.to_text`. It makes it easier to
#: transform a variable into the native str type for the python version
#: the code is running on. Use this when constructing the message to
#: send to exceptions or when dealing with an API that needs to take
#: a native string. Example::
#:
#: try:
#: 1//0
#: except ZeroDivisionError as e:
#: raise MyException('Encountered and error: %s' % to_native(e))
if PY3:
to_native = to_text
else:
to_native = to_bytes
|
Slezhuk/ansible
|
lib/ansible/module_utils/_text.py
|
Python
|
gpl-3.0
| 12,325
| 0.00211
|
# -*- coding: utf-8 -*-
# Aualé oware graphic user interface.
# Copyright (C) 2014-2020 Joan Sala Soler <contact@joansala.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
import random
import struct
from game import Match
from game import Oware
from uci import Strength
from .constants import COEFFICIENTS
class OpeningBook(object):
"""Opening book implementation"""
__MARGIN = 42
def __init__(self, path):
self._scores = []
self._header = dict()
self._min_score = self.__MARGIN
self._load_opening_book(path)
def set_strength(self, strength):
"""Sets the playing strength of the book"""
margin = self.__MARGIN
factor = 1 - strength.strength_factor
self._min_score = margin + (.25 * margin * factor) ** 2
def pick_best_move(self, match):
"""Choose a best move from the book"""
moves = self.find_best_moves(match)
choice = random.choice(moves) if moves else None
return choice
def find_best_moves(self, match):
"""Obtain the best moves from the book"""
moves = list()
game = match.get_game()
turn = match.get_turn()
scores = self._get_move_scores(match)
max_score = max(scores) if scores else -math.inf
min_score = max(max_score - self._min_score, -self._min_score)
offset = 0 if turn == game.SOUTH else 6
for move, score in enumerate(scores, offset):
if score >= min_score or score >= max_score:
moves.append(move)
return moves
def _get_move_scores(self, match):
"""Scores for the given match position"""
code = self._compute_hash_code(match)
scores = self._scores.get(code, [])
return scores
def _load_opening_book(self, path):
"""Loads an opening book from a file"""
with open(path, 'rb') as file:
self._header = self._read_header(file)
self._scores = self._read_scores(file)
def _read_header(self, file):
"""Reads the header fields from an open file"""
header = dict()
signature = file.readline()
while True:
field = file.readline()
if not field or field == b'\x00\n': break
values = field.decode('utf-8').split(':', 1)
header.setdefault(*values)
return header
def _read_scores(self, file):
"""Reads position scores from an open file"""
scores = dict()
while True:
entry = file.read(20)
if not entry: break
code, *values = struct.unpack('>q6h', entry)
scores.setdefault(code, values)
return scores
def _compute_hash_code(self, match):
"""Hash code for the current match position"""
game = match.get_game()
turn = match.get_turn()
board = match.get_board()
code = 0x80000000000 if turn == game.SOUTH else 0x00
seeds = board[13]
for house in range(12, -1, -1):
if seeds >= 48: break
code += COEFFICIENTS[seeds][house]
seeds += board[house]
return code
|
joansalasoler/auale
|
src/auale/book/opening_book.py
|
Python
|
gpl-3.0
| 3,781
| 0.000794
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for interacting with MapReduce-like backends.
This package contains libraries for using TFF in backend systems that offer
MapReduce-like capabilities, i.e., systems that can perform parallel processing
on a set of clients, and then aggregate the results of such processing on the
server. Systems of this type do not support the full expressiveness of TFF, but
they are common enough in practice to warrant a dedicated set of libraries, and
many examples of TFF computations, including those constructed by
`tff.learning`, can be compiled by TFF into a form that can be deployed on such
systems.
This package defines a data structure `MapReduceForm`, parameterized by
TensorFlow functions, which expresses the logic of a single MapReduce-style
round (plus initialization) and serves as a target for TFF's compiler pipeline.
`MapReduceForm` serves as the conceptual core of this package, and represents
a manner of specifying a round of federated computation quite distinct from
TFF's usual `computation.proto`. However, as `MapReduceForm` can express only a
strict subset of the logic expressible via `computation.proto`, we discuss the
mapping between the two here.
Instead of `computation.proto` directly, we standardize on
`tff.templates.IterativeProcess` as the basis for targeting the canonical
mapreduce representation, as this type of processing is most common in federated
learning scenarios, where different rounds typically involve different subsets
of a potentially very large number of participating clients. The iterative
aspect of the computation allows for it to not only model processes that evolve
over time, but also ones that might involve a very large client population in
which not all participants (clients, data shards, etc.) may be present at the
same time, and the iterative approach may instead be dictated by data
availability or scalability considerations. Related to the above, the fact that
in practical scenarios the set of clients involved in a federated computation
will (often) vary from round to round, the server state is necessary to connect
subsequent rounds into a single contiguous logical sequence.
Conceptually, `next`, the iterator part of an iterative process, is modeled
in the same way as any stateful computation in TFF. I.e., one that takes the
server state as the first component of the input, and returns updated server
state as the first component of the output. If there is no need for server
state, the input/output state should be modeled as an empty tuple.
In addition to updating state, `next` additionally takes client-side data as
input, and can produce results on server side in addition to state intended to
be passed to the next round. As is the case for the server state, if this
is undesired it should be modeled as an empty tuple.
The type signature of `next`, in the concise TFF type notation (as defined in
TFF's `computation.proto`), is as follows:
```python
(<S@SERVER,{D}@CLIENTS> -> <S@SERVER,X@SERVER>)
```
The above type signature involves the following abstract types:
* `S` is the type of the state that is passed at the server between rounds of
processing. For example, in the context of federated training, the server
state would typically include the weights of the model being trained. The
weights would be updated in each round as the model is trained on more and
more of the clients' data, and hence the server state would evolve as well.
Note: This is also the type of the output of the `initialize` that produces
the server state to feed into the first round.
* `D` represents the type of per-client units of data that serve as the input
to the computation. Often, this would be a sequence type, i.e., a dataset
in TensorFlow's parlance, although strictly speaking this does not have to
always be the case.
* `X` represents the type of server-side outputs generated by the server after
each round.
One can think of the process based on this representation as being equivalent
to the following pseudocode loop:
```python
client_data = ...
server_state = initialize()
while True:
server_state, server_outputs = next(server_state, client_data)
```
The logic of `next` in `MapReduceForm` is factored into seven
variable components `prepare`, `work`, `zero`, `accumulate`, `merge`,
`report`, and `update` (in addition to `initialize` that produces the server
state component for the initial round and `bitwidth` that specifies runtime
parameters for `federated_secure_sum_bitwidth`). The pseudocode below uses
common syntactic shortcuts (such as implicit zipping) for brevity.
For a concise representation of the logic embedded in the discussion below,
specifying the manner in which an instance `mrf` of `MapReduceForm` maps to
a single federated round, see the definitions of `init_computation` and
`next_computation` in
`form_utils.get_iterative_process_for_map_reduce_form`.
```python
@tff.federated_computation
def next(server_state, client_data):
# The server prepares an input to be broadcast to all clients that controls
# what will happen in this round.
client_input = (
tff.federated_broadcast(tff.federated_map(prepare, server_state)))
# The clients all independently do local work and produce updates, plus the
# optional client-side outputs.
client_updates = tff.federated_map(work, [client_data, client_input])
# `client_updates` is a two-tuple, whose first index should be aggregated
# with TFF's `federated_aggregate` and whose second index should be passed
# to TFF's `federated_secure_sum_bitwidth`. The updates are aggregated
# across the system into a single global update at the server.
simple_agg = (
tff.federated_aggregate(client_updates[0], zero(), accumulate, merge,
report))
secure_agg = tff.secure_sum(client_updates[1], bitwidth())
global_update = [simple_agg, secure_agg]
# Finally, the server produces a new state as well as server-side output to
# emit from this round.
new_server_state, server_output = (
tff.federated_map(update, [server_state, global_update]))
# The updated server state, server- and client-side outputs are returned as
# results of this round.
return new_server_state, server_output
```
The above characterization of `next` forms the relationship between
`MapReduceForm` and `tff.templates.IterativeProcess`. It depends on the seven
pieces of pure TensorFlow logic defined as follows. Please also consult the
documentation for related federated operators for more detail (particularly
the `tff.federated_aggregate()`, as several of the components below correspond
directly to the parameters of that operator).
* `prepare` represents the preparatory steps taken by the server to generate
inputs that will be broadcast to the clients and that, together with the
client data, will drive the client-side work in this round. It takes the
initial state of the server, and produces the input for use by the clients.
Its type signature is `(S -> C)`.
* `work` represents the totality of client-side processing, again all as a
single section of TensorFlow code. It takes a tuple of client data and
client input that was broadcasted by the server, and returns a two-tuple
containing the client update to be aggregated (across all the clients). The
first index of this two-tuple will be passed to an aggregation parameterized
by the blocks of TensorFlow below (`zero`, `accumulate`, `merge`, and
`report`), and the second index will be passed to
`federated_secure_sum_bitwidth`. Its type signature is `(<D,C> -> <U,V>)`.
* `bitwidth` is the TensorFlow computation that produces an integer specifying
the bitwidth for inputs to secure sum. `bitwidth` will be used by the system
to compute appropriate parameters for the secure sum protocol. Exactly how
this computation is performed is left to the runtime implementation of
`federated_secure_sum_bitwidth`.
* `zero` is the TensorFlow computation that produces the initial state of
accumulators that are used to combine updates collected from subsets of the
client population. In some systems, all accumulation may happen at the
server, but for scalability reasons, it is often desirable to structure
aggregation in multiple tiers. Its type signature is `A`, or when
represented as a `tff.Computation` in Python, `( -> A)`.
* `accumulate` is the TensorFlow computation that updates the state of an
update accumulator (initialized with `zero` above) with a single client's
update. Its type signature is `(<A,U> -> A)`. Typically, a single acumulator
would be used to combine the updates from multiple clients, but this does
not have to be the case (it's up to the target deployment platform to choose
how to use this logic in a particular deployment scenario).
* `merge` is the TensorFlow computation that merges two accumulators holding
the results of aggregation over two disjoint subsets of clients. Its type
signature is `(<A,A> -> A)`.
* `report` is the TensorFlow computation that transforms the state of the
top-most accumulator (after accumulating updates from all clients and
merging all the resulting accumulators into a single one at the top level
of the system hierarchy) into the final result of aggregation. Its type
signature is `(A -> R)`.
* `update` is the TensorFlow computation that applies the aggregate of all
clients' updates (the output of `report`), also referred to above as the
global update, to the server state, to produce a new server state to feed
into the next round, and that additionally outputs a server-side output,
to be reported externally as one of the results of this round. In federated
learning scenarios, the server-side outputs might include things like loss
and accuracy metrics, and the server state to be carried over, as noted
above, may include the model weights to be trained further in a subsequent
round. The type signature of this computation is `(<S,R> -> <S,X>)`.
The above TensorFlow computations' type signatures involves the following
abstract types in addition to those defined earlier:
* `C` is the type of the inputs for the clients, to be supplied by the server
at the beginning of each round (or an empty tuple if not needed).
* `U` is the type of the per-client update to be produced in each round and
fed into the cross-client federated aggregation protocol.
* `V` is the type of the per-client update to be produced in each round and
fed into the cross-client secure aggregation protocol.
* `A` is the type of the accumulators used to combine updates from subsets of
clients.
* `R` is the type of the final result of aggregating all client updates, the
global update to be incorporated into the server state at the end of the
round (and to produce the server-side output).
"""
# TODO(b/138261370): Cover this in the general set of guidelines for deployment.
from tensorflow_federated.python.core.backends.mapreduce.form_utils import check_iterative_process_compatible_with_map_reduce_form
from tensorflow_federated.python.core.backends.mapreduce.form_utils import get_broadcast_form_for_computation
from tensorflow_federated.python.core.backends.mapreduce.form_utils import get_computation_for_broadcast_form
from tensorflow_federated.python.core.backends.mapreduce.form_utils import get_iterative_process_for_map_reduce_form
from tensorflow_federated.python.core.backends.mapreduce.form_utils import get_map_reduce_form_for_iterative_process
from tensorflow_federated.python.core.backends.mapreduce.forms import BroadcastForm
from tensorflow_federated.python.core.backends.mapreduce.forms import MapReduceForm
|
tensorflow/federated
|
tensorflow_federated/python/core/backends/mapreduce/__init__.py
|
Python
|
apache-2.0
| 12,271
| 0.000733
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import numpy as np
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
from horton.io.test.common import compute_mulliken_charges, compute_hf_energy
def test_load_wfn_low_he_s():
fn_wfn = context.get_fn('test/he_s_orbital.wfn')
title, numbers, coordinates, centers, type_assignment, exponents, \
mo_count, occ_num, mo_energy, coefficients, energy = load_wfn_low(fn_wfn)
assert title == 'He atom - decontracted 6-31G basis set'
assert numbers.shape == (1,)
assert numbers == [2]
assert coordinates.shape == (1, 3)
assert (coordinates == [0.00, 0.00, 0.00]).all()
assert centers.shape == (4,)
assert (centers == [0, 0, 0, 0]).all()
assert type_assignment.shape == (4,)
assert (type_assignment == [1, 1, 1, 1]).all()
assert exponents.shape == (4,)
assert (exponents == [0.3842163E+02, 0.5778030E+01, 0.1241774E+01, 0.2979640E+00]).all()
assert mo_count.shape == (1,)
assert mo_count == [1]
assert occ_num.shape == (1,)
assert occ_num == [2.0]
assert mo_energy.shape == (1,)
assert mo_energy == [-0.914127]
assert coefficients.shape == (4, 1)
expected = np.array([0.26139500E+00, 0.41084277E+00, 0.39372947E+00, 0.14762025E+00])
assert (coefficients == expected.reshape(4, 1)).all()
assert abs(energy - (-2.855160426155)) < 1.e-5
def test_load_wfn_low_h2o():
fn_wfn = context.get_fn('test/h2o_sto3g.wfn')
title, numbers, coordinates, centers, type_assignment, exponents, \
mo_count, occ_num, mo_energy, coefficients, energy = load_wfn_low(fn_wfn)
assert title == 'H2O Optimization'
assert numbers.shape == (3,)
assert (numbers == np.array([8, 1, 1])).all()
assert coordinates.shape == (3, 3)
assert (coordinates[0] == [-4.44734101, 3.39697999, 0.00000000]).all()
assert (coordinates[1] == [-2.58401495, 3.55136194, 0.00000000]).all()
assert (coordinates[2] == [-4.92380519, 5.20496220, 0.00000000]).all()
assert centers.shape == (21,)
assert (centers[:15] == np.zeros(15, int)).all()
assert (centers[15:] == np.array([1, 1, 1, 2, 2, 2])).all()
assert type_assignment.shape == (21,)
assert (type_assignment[:6] == np.ones(6)).all()
assert (type_assignment[6:15] == np.array([2, 2, 2, 3, 3, 3, 4, 4, 4])).all()
assert (type_assignment[15:] == np.ones(6)).all()
assert exponents.shape == (21,)
assert (exponents[:3] == [0.1307093E+03, 0.2380887E+02, 0.6443608E+01]).all()
assert (exponents[5:8] == [0.3803890E+00, 0.5033151E+01, 0.1169596E+01]).all()
assert (exponents[13:16] == [0.1169596E+01, 0.3803890E+00, 0.3425251E+01]).all()
assert exponents[-1] == 0.1688554E+00
assert mo_count.shape == (5,)
assert (mo_count == [1, 2, 3, 4, 5]).all()
assert occ_num.shape == (5,)
assert np.sum(occ_num) == 10.0
assert (occ_num == [2.0, 2.0, 2.0, 2.0, 2.0]).all()
assert mo_energy.shape == (5,)
assert (mo_energy == np.sort(mo_energy)).all()
assert (mo_energy[:3] == [-20.251576, -1.257549, -0.593857]).all()
assert (mo_energy[3:] == [-0.459729, -0.392617]).all()
assert coefficients.shape == (21, 5)
expected = [0.42273517E+01, -0.99395832E+00, 0.19183487E-11, 0.44235381E+00, -0.57941668E-14]
assert (coefficients[0] == expected).all()
assert coefficients[6, 2] == 0.83831599E+00
assert coefficients[10, 3] == 0.65034846E+00
assert coefficients[17, 1] == 0.12988055E-01
assert coefficients[-1, 0] == -0.46610858E-03
assert coefficients[-1, -1] == -0.33277355E-15
assert abs(energy - (-74.965901217080)) < 1.e-6
def test_get_permutation_orbital():
assert (get_permutation_orbital(np.array([1, 1, 1])) == [0, 1, 2]).all()
assert (get_permutation_orbital(np.array([1, 1, 2, 3, 4])) == [0, 1, 2, 3, 4]).all()
assert (get_permutation_orbital(np.array([2, 3, 4])) == [0, 1, 2]).all()
assert (get_permutation_orbital(np.array([2, 2, 3, 3, 4, 4])) == [0, 2, 4, 1, 3, 5]).all()
assign = np.array([1, 1, 2, 2, 3, 3, 4, 4, 1])
expect = [0, 1, 2, 4, 6, 3, 5, 7, 8]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([1, 5, 6, 7, 8, 9, 10, 1])
expect = [0, 1, 2, 3, 4, 5, 6, 7]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10])
expect = [0, 2, 4, 6, 8, 10, 1, 3, 5, 7, 9, 11]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10])
expect = [0, 1, 3, 5, 2, 4, 6, 7, 8, 9, 10, 11, 12]
assert (get_permutation_orbital(assign) == expect).all()
# f orbitals
assign = np.array([11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
assert (get_permutation_orbital(assign) == range(10)).all()
# g orbitals
assign = np.array([23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21])
assert (get_permutation_orbital(assign) == range(15)).all()
# g orbitals
assign = np.array([23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21])
assert (get_permutation_orbital(assign) == range(15)).all()
# h orbitals
assert (get_permutation_orbital(np.arange(36, 57)) == range(21)).all()
assign = np.array([1, 1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
assert (get_permutation_orbital(assign) == range(12)).all()
assign = np.array([2, 3, 4, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 1, 1])
assert (get_permutation_orbital(assign) == range(15)).all()
def test_get_permutation_basis():
assert (get_permutation_basis(np.array([1, 1, 1])) == [0, 1, 2]).all()
assert (get_permutation_basis(np.array([2, 2, 3, 3, 4, 4])) == [0, 2, 4, 1, 3, 5]).all()
assert (get_permutation_basis(np.array([1, 2, 3, 4, 1])) == [0, 1, 2, 3, 4]).all()
assert (get_permutation_basis(np.array([5, 6, 7, 8, 9, 10])) == [0, 3, 4, 1, 5, 2]).all()
assign = np.repeat([5, 6, 7, 8, 9, 10], 2)
expect = [0, 6, 8, 2, 10, 4, 1, 7, 9, 3, 11, 5]
assert (get_permutation_basis(assign) == expect).all()
assert (get_permutation_basis(np.arange(1, 11)) == [0, 1, 2, 3, 4, 7, 8, 5, 9, 6]).all()
assign = np.array([1, 5, 6, 7, 8, 9, 10, 1])
expect = [0, 1, 4, 5, 2, 6, 3, 7]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
expect = [0, 4, 5, 3, 9, 6, 1, 8, 7, 2]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 1])
expect = [0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 2, 2, 3, 3, 4, 4])
expect = [0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11, 13, 15, 12, 14, 16]
assert (get_permutation_basis(assign) == expect).all()
assign = [1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expect = np.array([0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11, 12, 13, 14, 17, 18, 15, 19, 16])
assert (get_permutation_basis(np.array(assign)) == expect).all()
assert (get_permutation_basis(np.arange(36, 57)) == np.arange(21)[::-1]).all()
assign = [23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21]
expect = [14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
assert (get_permutation_basis(np.array(assign)) == expect).all()
assert (get_permutation_basis(np.arange(36, 57)) == range(21)[::-1]).all()
def test_get_mask():
assert (get_mask(np.array([2, 3, 4])) == [True, False, False]).all()
expected = [True, True, False, False, True, True, False, False]
assert (get_mask(np.array([1, 2, 3, 4, 1, 2, 3, 4])) == expected).all()
expected = [True, False, False, False, False, False]
assert (get_mask(np.array([5, 6, 7, 8, 9, 10])) == expected).all()
expected = [True, False, False, True, True, False, False, False, False, False]
assert (get_mask(np.array([2, 3, 4, 1, 5, 6, 7, 8, 9, 10])) == expected).all()
expected = [True, False, False, False, False, False, False, False, False, False]
assert (get_mask(np.arange(11, 21)) == expected).all()
assert (get_mask(np.array([21, 24, 25])) == [True, False, False]).all()
assert (get_mask(np.array([11, 21, 36, 1])) == [True, True, True, True]).all()
def check_load_wfn(name):
# system out of *.wfn file
mol1 = IOData.from_file(context.get_fn('test/%s.wfn' % name))
# system out of *.fchk file
mol2 = IOData.from_file(context.get_fn('test/%s.fchk' % name))
# Coordinates check:
assert (abs(mol1.coordinates - mol2.coordinates) < 1e-6).all()
# Numbers check
numbers1 = mol1.numbers
numbers2 = mol2.numbers
assert (numbers1 == numbers2).all()
# Basis Set check:
obasis1 = mol1.obasis
obasis2 = mol2.obasis
assert obasis1.nbasis == obasis2.nbasis
assert (obasis1.shell_map == obasis2.shell_map).all()
assert (obasis1.shell_types == obasis2.shell_types).all()
assert (obasis1.nprims == obasis2.nprims).all()
assert (abs(obasis1.alphas - obasis2.alphas) < 1.e-4).all()
# Comparing MOs (*.wfn might not contain virtual orbitals):
n_mo = mol1.orb_alpha.nfn
assert (abs(mol1.orb_alpha.energies - mol2.orb_alpha.energies[:n_mo]) < 1.e-5).all()
assert (mol1.orb_alpha.occupations == mol2.orb_alpha.occupations[:n_mo]).all()
assert (abs(mol1.orb_alpha.coeffs - mol2.orb_alpha.coeffs[:, :n_mo]) < 1.e-7).all()
# Check overlap
olp1 = obasis1.compute_overlap()
olp2 = obasis2.compute_overlap()
obasis2.compute_overlap(olp2)
assert (abs(olp1 - olp2) < 1e-6).all()
# Check normalization
mol1.orb_alpha.check_normalization(olp1, 1e-5)
# Check charges
dm_full1 = mol1.get_dm_full()
charges1 = compute_mulliken_charges(obasis1, numbers1, dm_full1)
dm_full2 = mol2.get_dm_full()
charges2 = compute_mulliken_charges(obasis2, numbers2, dm_full2)
assert (abs(charges1 - charges2) < 1e-6).all()
# Check energy
energy1 = compute_hf_energy(mol1)
energy2 = compute_hf_energy(mol2)
# check loaded & computed energy from wfn file
assert abs(energy1 - mol1.energy) < 1.e-5
assert abs(energy1 - energy2) < 1e-5
return energy1, charges1
def test_load_wfn_he_s_virtual():
energy, charges = check_load_wfn('he_s_virtual')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.855160426155)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_s():
energy, charges = check_load_wfn('he_s_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.855160426155)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_sp():
energy, charges = check_load_wfn('he_sp_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.859895424589)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spd():
energy, charges = check_load_wfn('he_spd_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.855319016184)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spdf():
energy, charges = check_load_wfn('he_spdf_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-1.100269433080)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spdfgh():
energy, charges = check_load_wfn('he_spdfgh_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-1.048675168346)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spdfgh_virtual():
energy, charges = check_load_wfn('he_spdfgh_virtual')
# Compare to the energy printed in wfn file
assert abs(energy - (-1.048675168346)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def check_wfn(fn_wfn, restricted, nbasis, energy, charges):
fn_wfn = context.get_fn(fn_wfn)
mol = IOData.from_file(fn_wfn)
assert mol.obasis.nbasis == nbasis
olp = mol.obasis.compute_overlap()
if restricted:
mol.orb_alpha.check_normalization(olp, 1e-5)
assert not hasattr(mol, 'orb_beta')
else:
mol.orb_alpha.check_normalization(olp, 1e-5)
mol.orb_beta.check_normalization(olp, 1e-5)
if energy is not None:
assert abs(energy - mol.energy) < 1.e-5
myenergy = compute_hf_energy(mol)
assert abs(energy - myenergy) < 1e-5
dm_full = mol.get_dm_full()
mycharges = compute_mulliken_charges(mol.obasis, mol.numbers, dm_full)
assert (abs(charges - mycharges) < 1e-5).all()
orb_beta = getattr(mol, 'orb_beta', None)
return mol.obasis, mol.coordinates, mol.numbers, dm_full, mol.orb_alpha, orb_beta, mol.energy
def test_load_wfn_h2o_sto3g_decontracted():
check_wfn(
'test/h2o_sto3g_decontracted.wfn',
True, 21, -75.162231674351,
np.array([-0.546656, 0.273328, 0.273328]),
)
def test_load_wfn_h2_ccpvqz_virtual():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/h2_ccpvqz.wfn',
True, 74, -1.133504568400,
np.array([0.0, 0.0]),
)
expect = [82.64000, 12.41000, 2.824000, 0.7977000, 0.2581000]
assert (abs(obasis.alphas[:5] - expect) < 1.e-5).all()
expect = [-0.596838, 0.144565, 0.209605, 0.460401, 0.460401]
assert (orb_alpha.energies[:5] == expect).all()
expect = [12.859067, 13.017471, 16.405834, 25.824716, 26.100443]
assert (orb_alpha.energies[-5:] == expect).all()
assert (orb_alpha.occupations[:5] == [1.0, 0.0, 0.0, 0.0, 0.0]).all()
assert abs(orb_alpha.occupations.sum() - 1.0) < 1.e-6
def test_load_wfn_h2o_sto3g():
check_wfn(
'test/h2o_sto3g.wfn',
True, 21, -74.965901217080,
np.array([-0.330532, 0.165266, 0.165266])
)
def test_load_wfn_li_sp_virtual():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/li_sp_virtual.wfn',
False, 8, -3.712905542719,
np.array([0.0, 0.0])
)
assert abs(orb_alpha.occupations.sum() - 2.0) < 1.e-6
assert abs(orb_beta.occupations.sum() - 1.0) < 1.e-6
assert (orb_alpha.occupations == [1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).all()
assert (orb_beta.occupations == [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).all()
expect = [-0.087492, -0.080310, 0.158784, 0.158784, 1.078773, 1.090891, 1.090891, 49.643670]
assert (abs(orb_alpha.energies - expect) < 1.e-6).all()
expect = [-0.079905, 0.176681, 0.176681, 0.212494, 1.096631, 1.096631, 1.122821, 49.643827]
assert (abs(orb_beta.energies - expect) < 1.e-6).all()
assert orb_alpha.coeffs.shape == (8, 8)
assert orb_beta.coeffs.shape == (8, 8)
def test_load_wfn_li_sp():
fn_wfn = context.get_fn('test/li_sp_orbital.wfn')
mol = IOData.from_file(fn_wfn)
assert mol.title == 'Li atom - using s & p orbitals'
assert mol.orb_alpha.nfn == 2
assert mol.orb_beta.nfn == 1
assert abs(mol.energy - (-3.712905542719)) < 1.e-5
def test_load_wfn_o2():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/o2_uhf.wfn',
False, 72, -149.664140769678,
np.array([0.0, 0.0]),
)
assert orb_alpha.nfn == 9
assert orb_beta.nfn == 7
def test_load_wfn_o2_virtual():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/o2_uhf_virtual.wfn',
False, 72, -149.664140769678,
np.array([0.0, 0.0]),
)
assert abs(orb_alpha.occupations.sum() - 9.0) < 1.e-6
assert abs(orb_beta.occupations.sum() - 7.0) < 1.e-6
assert orb_alpha.occupations.shape == (44,)
assert orb_beta.occupations.shape == (44,)
assert (orb_alpha.occupations[:9] == np.ones(9)).all()
assert (orb_beta.occupations[:7] == np.ones(7)).all()
assert (orb_alpha.occupations[9:] == np.zeros(35)).all()
assert (orb_beta.occupations[7:] == np.zeros(37)).all()
assert orb_alpha.energies.shape == (44,)
assert orb_beta.energies.shape == (44,)
assert orb_alpha.energies[0] == -20.752000
assert orb_alpha.energies[10] == 0.179578
assert orb_alpha.energies[-1] == 51.503193
assert orb_beta.energies[0] == -20.697027
assert orb_beta.energies[15] == 0.322590
assert orb_beta.energies[-1] == 51.535258
assert orb_alpha.coeffs.shape == (72, 44)
assert orb_beta.coeffs.shape == (72, 44)
def test_load_wfn_lif_fci():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/lif_fci.wfn',
True, 44, None,
np.array([-0.645282, 0.645282]),
)
assert orb_alpha.occupations.shape == (18,)
assert abs(orb_alpha.occupations.sum() - 6.0) < 1.e-6
assert orb_alpha.occupations[0] == 2.00000000 / 2
assert orb_alpha.occupations[10] == 0.00128021 / 2
assert orb_alpha.occupations[-1] == 0.00000054 / 2
assert orb_alpha.energies.shape == (18,)
assert orb_alpha.energies[0] == -26.09321253
assert orb_alpha.energies[15] == 1.70096290
assert orb_alpha.energies[-1] == 2.17434072
assert orb_alpha.coeffs.shape == (44, 18)
kin = obasis.compute_kinetic()
expected_kin = 106.9326884815 # FCI kinetic energy
expected_nn = 9.1130265227
assert (np.einsum('ab,ba', kin, dm_full) - expected_kin) < 1.e-6
assert (compute_nucnuc(coordinates, numbers.astype(float)) - expected_nn) < 1.e-6
points = np.array([[0.0, 0.0, -0.17008], [0.0, 0.0, 0.0], [0.0, 0.0, 0.03779]])
density = np.zeros(3)
obasis.compute_grid_density_dm(dm_full, points, density)
assert (abs(density - [0.492787, 0.784545, 0.867723]) < 1.e-4).all()
assert abs(energy - (-107.0575700853)) < 1.e-5 # FCI energy
def test_load_wfn_lih_cation_fci():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/lih_cation_fci.wfn',
True, 26, None,
np.array([0.913206, 0.086794]),
)
assert (numbers == [3, 1]).all()
expected_kin = 7.7989675958 # FCI kinetic energy
expected_nn = 0.9766607347
kin = obasis.compute_kinetic()
assert (np.einsum('ab,ba', kin, dm_full) - expected_kin) < 1.e-6
assert (compute_nucnuc(coordinates, numbers.astype(float)) - expected_nn) < 1.e-6
assert orb_alpha.occupations.shape == (11,)
assert abs(orb_alpha.occupations.sum() - 1.5) < 1.e-6
assert abs(energy - (-7.7214366383)) < 1.e-5 # FCI energy
|
QuantumElephant/horton
|
horton/io/test/test_wfn.py
|
Python
|
gpl-3.0
| 19,258
| 0.002181
|
# -*- coding: utf-8 -*-
#
# clopath_synapse_spike_pairing.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Spike pairing experiment
--------------------------------------
This script simulates one ``aeif_psc_delta_clopath`` neuron that is connected with
a Clopath connection [1]_. The synapse receives pairs of a pre- and a postsynaptic
spikes that are separated by either 10 ms (pre before post) or -10 ms (post
before pre). The change of the synaptic weight is measured after five of such
pairs. This experiment is repeated five times with different rates of the
sequence of the spike pairs: 10Hz, 20Hz, 30Hz, 40Hz, and 50Hz.
References
~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import numpy as np
import matplotlib.pyplot as plt
import nest
##############################################################################
# First we specify the neuron parameters. To enable voltage dependent
# prefactor ``A_LTD(u_bar_bar)`` add ``A_LTD_const: False`` to the dictionary.
nrn_params = {'V_m': -70.6,
'E_L': -70.6,
'C_m': 281.0,
'theta_minus': -70.6,
'theta_plus': -45.3,
'A_LTD': 14.0e-5,
'A_LTP': 8.0e-5,
'tau_u_bar_minus': 10.0,
'tau_u_bar_plus': 7.0,
'delay_u_bars': 4.0,
'a': 4.0,
'b': 0.0805,
'V_reset': -70.6 + 21.0,
'V_clamp': 33.0,
't_clamp': 2.0,
't_ref': 0.0,
}
##############################################################################
# Hardcoded spike times of presynaptic spike generator
spike_times_pre = [
# Presynaptic spike before the postsynaptic
[ 20.0, 120.0, 220.0, 320.0, 420.0], # noqa
[ 20.0, 70.0, 120.0, 170.0, 220.0], # noqa
[ 20.0, 53.3, 86.7, 120.0, 153.3], # noqa
[ 20.0, 45.0, 70.0, 95.0, 120.0], # noqa
[ 20.0, 40.0, 60.0, 80.0, 100.0], # noqa
# Presynaptic spike after the postsynaptic
[120.0, 220.0, 320.0, 420.0, 520.0, 620.0], # noqa
[ 70.0, 120.0, 170.0, 220.0, 270.0, 320.0], # noqa
[ 53.3, 86.6, 120.0, 153.3, 186.6, 220.0], # noqa
[ 45.0, 70.0, 95.0, 120.0, 145.0, 170.0], # noqa
[ 40.0, 60.0, 80.0, 100.0, 120.0, 140.0]] # noqa
##############################################################################
# Hardcoded spike times of postsynaptic spike generator
spike_times_post = [
[ 10.0, 110.0, 210.0, 310.0, 410.0], # noqa
[ 10.0, 60.0, 110.0, 160.0, 210.0], # noqa
[ 10.0, 43.3, 76.7, 110.0, 143.3], # noqa
[ 10.0, 35.0, 60.0, 85.0, 110.0], # noqa
[ 10.0, 30.0, 50.0, 70.0, 90.0], # noqa
[130.0, 230.0, 330.0, 430.0, 530.0, 630.0], # noqa
[ 80.0, 130.0, 180.0, 230.0, 280.0, 330.0], # noqa
[ 63.3, 96.6, 130.0, 163.3, 196.6, 230.0], # noqa
[ 55.0, 80.0, 105.0, 130.0, 155.0, 180.0], # noqa
[ 50.0, 70.0, 90.0, 110.0, 130.0, 150.0]] # noqa
init_w = 0.5
syn_weights = []
resolution = 0.1
##############################################################################
# Loop over pairs of spike trains
for s_t_pre, s_t_post in zip(spike_times_pre, spike_times_post):
nest.ResetKernel()
nest.resolution = resolution
# Create one neuron
nrn = nest.Create("aeif_psc_delta_clopath", 1, nrn_params)
# We need a parrot neuron since spike generators can only
# be connected with static connections
prrt_nrn = nest.Create("parrot_neuron", 1)
# Create and connect spike generators
spike_gen_pre = nest.Create("spike_generator", {"spike_times": s_t_pre})
nest.Connect(spike_gen_pre, prrt_nrn,
syn_spec={"delay": resolution})
spike_gen_post = nest.Create("spike_generator", {"spike_times": s_t_post})
nest.Connect(spike_gen_post, nrn, syn_spec={"delay": resolution, "weight": 80.0})
# Create weight recorder
wr = nest.Create('weight_recorder')
# Create Clopath connection with weight recorder
nest.CopyModel("clopath_synapse", "clopath_synapse_rec",
{"weight_recorder": wr})
syn_dict = {"synapse_model": "clopath_synapse_rec",
"weight": init_w, "delay": resolution}
nest.Connect(prrt_nrn, nrn, syn_spec=syn_dict)
# Simulation
simulation_time = (10.0 + max(s_t_pre[-1], s_t_post[-1]))
nest.Simulate(simulation_time)
# Extract and save synaptic weights
weights = wr.get("events", "weights")
syn_weights.append(weights[-1])
syn_weights = np.array(syn_weights)
# scaling of the weights so that they are comparable to [1]
syn_weights = 100.0 * 15.0 * (syn_weights - init_w) / init_w + 100.0
# Plot results
fig, ax = plt.subplots(1, sharex=False)
ax.plot([10., 20., 30., 40., 50.], syn_weights[5:], color='b', lw=2.5, ls='-',
label="pre-post pairing")
ax.plot([10., 20., 30., 40., 50.], syn_weights[:5], color='g', lw=2.5, ls='-',
label="post-pre pairing")
ax.set_ylabel("normalized weight change")
ax.set_xlabel("rho (Hz)")
ax.legend()
ax.set_title("synaptic weight")
plt.show()
|
heplesser/nest-simulator
|
pynest/examples/clopath_synapse_spike_pairing.py
|
Python
|
gpl-2.0
| 6,051
| 0.003636
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.sprite import Sprite
import pyglet
import random
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.sprite = Sprite('grossini.png')
self.sprite.position = x/2, y/2
self.add( self.sprite )
self.schedule( self.change_x )
self.schedule_interval( self.change_y, 1 )
def change_x(self, dt):
self.sprite.x = random.random()*director.get_window_size()[0]
def change_y(self, dt):
self.sprite.y = random.random()*director.get_window_size()[1]
if __name__ == "__main__":
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
adamwiggins/cocos2d
|
test/test_schedule.py
|
Python
|
bsd-3-clause
| 1,034
| 0.024178
|
import tornado.web
import json
from tornado_cors import CorsMixin
from common import ParameterFormat, EnumEncoder
class DefaultRequestHandler(CorsMixin, tornado.web.RequestHandler):
CORS_ORIGIN = '*'
def initialize(self):
self.default_format = self.get_argument("format", "json", True)
self.show_about = self.get_argument("show_about", True, True)
self.pg_version = self.get_argument("pg_version", 9.6, True)
self.version = "2.0 beta"
def write_about_stuff(self, format_type="alter_system"):
default_comment = "--"
if format_type == "conf":
default_comment = "#"
self.write("{} Generated by PGConfig {}\n".format(default_comment,
self.version))
self.write("{} http://pgconfig.org\n\n".format(default_comment * 2))
def write_comment(self, format_type, comment):
default_comment = "--"
if format_type == "conf":
default_comment = "#"
if comment != "NONE":
self.write("\n{} {}\n".format(default_comment, comment))
def write_config(self, output_data):
if self.show_about is True:
self.write_about_stuff("conf")
for category in output_data:
self.write("# {}\n".format(category["description"]))
for parameter in category["parameters"]:
config_value = parameter.get("config_value", "NI")
value_format = parameter.get("format", ParameterFormat.NONE)
if value_format in (ParameterFormat.String,
ParameterFormat.Time):
config_value = "'{}'".format(config_value)
parameter_comment = parameter.get("comment", "NONE")
if parameter_comment != "NONE":
self.write_comment("conf", parameter_comment)
self.write("{} = {}\n".format(parameter["name"], config_value))
self.write("\n")
def write_alter_system(self, output_data):
if float(self.pg_version) <= 9.3:
self.write("-- ALTER SYSTEM format it's only supported on version 9.4 and higher. Use 'conf' format instead.")
else:
if self.show_about is True:
self.write_about_stuff()
for category in output_data:
self.write("-- {}\n".format(category["description"]))
for parameter in category["parameters"]:
config_value = parameter.get("config_value", "NI")
parameter_comment = parameter.get("comment", "NONE")
self.write_comment("alter_system", parameter_comment)
self.write("ALTER SYSTEM SET {} TO '{}';\n".format(parameter[
"name"], config_value))
self.write("\n")
def write_plain(self, message=list()):
if len(message) == 1:
self.write(message[0])
else:
for line in message:
self.write(line + '\n')
def write_bash(self, message=list()):
bash_script = """
#!/bin/bash
"""
self.write(bash_script)
if len(message) == 1:
self.write('SQL_QUERY="{}"\n'.format(message[0]))
self.write('psql -c "${SQL_QUERY}"\n')
else:
for line in message:
self.write('SQL_QUERY="{}"\n'.format(line))
self.write('psql -c "${SQL_QUERY}"\n\n')
def write_json_api(self, message):
self.set_header('Content-Type', 'application/vnd.api+json')
_document = {}
_document["data"] = message
_meta = {}
_meta["copyright"] = "PGConfig API"
_meta["version"] = self.version
_meta["arguments"] = self.request.arguments
_document["meta"] = _meta
_document["jsonapi"] = {"version": "1.0"}
full_url = self.request.protocol + "://" + self.request.host + self.request.uri
_document["links"] = {"self": full_url}
self.write(
json.dumps(
_document,
sort_keys=True,
separators=(',', ': '),
cls=EnumEncoder))
def write_json(self, message=list()):
self.set_header('Content-Type', 'application/json')
if len(message) == 1:
self.write("{ \"output\": \"" + message[0] + "\"}")
else:
new_output = "{ \"output\": ["
first_line = True
for line in message:
if not first_line:
new_output += ","
else:
first_line = False
new_output += "\"{}\"".format(line)
new_output += "] } "
self.write(new_output)
def return_output(self, message=list()):
# default_format=self.get_argument("format", "json", True)
# converting string input into a list (for solve issue with multiline strings)
process_data = []
if not isinstance(message, list):
process_data.insert(0, message)
else:
process_data = message
if self.default_format == "json":
self.write_json_api(message)
elif self.default_format == "bash":
self.write_bash(message)
elif self.default_format == "conf":
self.write_config(message)
elif self.default_format == "alter_system":
self.write_alter_system(message)
else:
self.write_plain(message)
class GeneratorRequestHandler(DefaultRequestHandler):
pass
|
sebastianwebber/pgconfig-api
|
common/util.py
|
Python
|
bsd-2-clause
| 5,642
| 0.001241
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\github repos\Python\A05_SimplyGame\Binaries\MyView.ui'
#
# Created: Tue Oct 25 22:22:12 2016
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(808, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayoutWidget = QtGui.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(240, 110, 561, 281))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setHorizontalSpacing(7)
self.gridLayout.setVerticalSpacing(9)
self.gridLayout.setObjectName("gridLayout")
self.pushButton_5 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_5.setObjectName("pushButton_5")
self.gridLayout.addWidget(self.pushButton_5, 0, 4, 1, 1)
self.pushButton_1 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_1.setObjectName("pushButton_1")
self.gridLayout.addWidget(self.pushButton_1, 0, 0, 1, 1)
self.pushButton_9 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_9.setObjectName("pushButton_9")
self.gridLayout.addWidget(self.pushButton_9, 1, 3, 1, 1)
self.pushButton_6 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_6.setObjectName("pushButton_6")
self.gridLayout.addWidget(self.pushButton_6, 1, 0, 1, 1)
self.pushButton_10 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_10.setObjectName("pushButton_10")
self.gridLayout.addWidget(self.pushButton_10, 1, 4, 1, 1)
self.pushButton_15 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_15.setObjectName("pushButton_15")
self.gridLayout.addWidget(self.pushButton_15, 2, 4, 1, 1)
self.pushButton_4 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_4.setObjectName("pushButton_4")
self.gridLayout.addWidget(self.pushButton_4, 0, 3, 1, 1)
self.pushButton_11 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_11.setObjectName("pushButton_11")
self.gridLayout.addWidget(self.pushButton_11, 2, 0, 1, 1)
self.pushButton_12 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_12.setObjectName("pushButton_12")
self.gridLayout.addWidget(self.pushButton_12, 2, 1, 1, 1)
self.pushButton_7 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_7.setObjectName("pushButton_7")
self.gridLayout.addWidget(self.pushButton_7, 1, 1, 1, 1)
self.pushButton_3 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_3.setObjectName("pushButton_3")
self.gridLayout.addWidget(self.pushButton_3, 0, 2, 1, 1)
self.pushButton_13 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_13.setObjectName("pushButton_13")
self.gridLayout.addWidget(self.pushButton_13, 2, 2, 1, 1)
self.pushButton_8 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_8.setObjectName("pushButton_8")
self.gridLayout.addWidget(self.pushButton_8, 1, 2, 1, 1)
self.pushButton_14 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_14.setObjectName("pushButton_14")
self.gridLayout.addWidget(self.pushButton_14, 2, 3, 1, 1)
self.pushButton_2 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout.addWidget(self.pushButton_2, 0, 1, 1, 1)
self.formLayoutWidget = QtGui.QWidget(self.centralwidget)
self.formLayoutWidget.setGeometry(QtCore.QRect(50, 70, 191, 481))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtGui.QFormLayout(self.formLayoutWidget)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.label = QtGui.QLabel(self.formLayoutWidget)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.label_2 = QtGui.QLabel(self.formLayoutWidget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_2)
self.label_3 = QtGui.QLabel(self.formLayoutWidget)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_3)
self.label_4 = QtGui.QLabel(self.formLayoutWidget)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(9, QtGui.QFormLayout.LabelRole, self.label_4)
self.label_5 = QtGui.QLabel(self.formLayoutWidget)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(12, QtGui.QFormLayout.LabelRole, self.label_5)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(1, QtGui.QFormLayout.LabelRole, spacerItem)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(5, QtGui.QFormLayout.LabelRole, spacerItem1)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(8, QtGui.QFormLayout.LabelRole, spacerItem2)
spacerItem3 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(11, QtGui.QFormLayout.LabelRole, spacerItem3)
spacerItem4 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(2, QtGui.QFormLayout.LabelRole, spacerItem4)
spacerItem5 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(4, QtGui.QFormLayout.LabelRole, spacerItem5)
spacerItem6 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(7, QtGui.QFormLayout.LabelRole, spacerItem6)
spacerItem7 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(10, QtGui.QFormLayout.LabelRole, spacerItem7)
self.label_6 = QtGui.QLabel(self.formLayoutWidget)
self.label_6.setObjectName("label_6")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.label_6)
self.label_7 = QtGui.QLabel(self.formLayoutWidget)
self.label_7.setObjectName("label_7")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.label_7)
self.label_8 = QtGui.QLabel(self.formLayoutWidget)
self.label_8.setObjectName("label_8")
self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.label_8)
self.label_9 = QtGui.QLabel(self.formLayoutWidget)
self.label_9.setObjectName("label_9")
self.formLayout.setWidget(9, QtGui.QFormLayout.FieldRole, self.label_9)
self.label_10 = QtGui.QLabel(self.formLayoutWidget)
self.label_10.setObjectName("label_10")
self.formLayout.setWidget(12, QtGui.QFormLayout.FieldRole, self.label_10)
self.gridLayoutWidget_2 = QtGui.QWidget(self.centralwidget)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(240, 390, 561, 161))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_2 = QtGui.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem8, 1, 2, 1, 1)
self.pushButton_24 = QtGui.QPushButton(self.gridLayoutWidget_2)
self.pushButton_24.setObjectName("pushButton_24")
self.gridLayout_2.addWidget(self.pushButton_24, 1, 1, 1, 1)
self.pushButton_25 = QtGui.QPushButton(self.gridLayoutWidget_2)
self.pushButton_25.setObjectName("pushButton_25")
self.gridLayout_2.addWidget(self.pushButton_25, 1, 3, 1, 1)
spacerItem9 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem9, 1, 4, 1, 1)
spacerItem10 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem10, 1, 0, 1, 1)
self.label_11 = QtGui.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(240, 0, 561, 111))
self.label_11.setObjectName("label_11")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 808, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.pushButton_25, QtCore.SIGNAL("clicked()"), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_5.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_1.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_9.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_6.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_10.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_15.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_4.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_11.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_12.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_7.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_3.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_13.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_8.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_14.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "offen:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "korrket:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "falsch:", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "gesamt:", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("MainWindow", "Spiele:", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("MainWindow", "offenAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("MainWindow", "korrektAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("MainWindow", "falschAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("MainWindow", "gesamtAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_10.setText(QtGui.QApplication.translate("MainWindow", "spieleAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_24.setText(QtGui.QApplication.translate("MainWindow", "Neu", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_25.setText(QtGui.QApplication.translate("MainWindow", "Ende", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:14pt; font-weight:600;\">Drücken Sie die Buttons in aufsteigender Reihenfolge</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
|
lzuba-tgm/A05_SimplyGame
|
Ui_MainWindow.py
|
Python
|
gpl-3.0
| 13,842
| 0.003396
|
# ----------------------------------------------------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
# Edson de Lima Barros 1715310043
# Tiago Ferreira Aranha 1715310047
# Vitor Simôes Azevedo 1715310025
# Roberta de Oliveira da Cruz 0825070169
# Uriel Brito Barros 1515120558
#
# 16. Faça um procedimento que recebe, por parâmetro,
# 2 vetores de 10 elementos inteiros e que calcule e retorne,
# também por parâmetro, o vetor intersecção dos dois primeiros.
from lista08.ipc import vetor
vetor1 = vetor.cria_vetor(10)
vetor2 = vetor.cria_vetor(10)
vetor_interseccao = vetor.vetor_interseccao(vetor1, vetor2)
print(vetor_interseccao)
|
jucimarjr/IPC_2017-1
|
lista08/lista08_lista02_questao16.py
|
Python
|
apache-2.0
| 799
| 0
|
import mcpi.minecraft as minecraft
import mcpi.block as Block
import serial
import time
# The location where redstone torch needs to spawn.
a0 = (-112, 0, 62) # <- YOU MUST SET THIS VALUE (x,y,z)
"""
Helper method: get_pin(pin)
Returns whether the minecraft pin is turned on or off (based on redstone torch type)
Block(76, 1) -> Redstone Toch ON
Block(75, 1) -> Redstone Toch OFF
"""
def get_pin(pin):
block = mc.getBlockWithData(pin)
print(block)
if block.id == 76:
return 1
elif block.id == 75:
return 0
else:
return -1
if __name__ == "__main__":
# My espruino was COM23, and I had to use value 22 here.
port = 22;
old_val = 0
ser = serial.Serial(port, timeout=1) # open first serial port
print ser.portstr # check which port was really used
# Create mc object.
mc = minecraft.Minecraft.create()
# Main loop
try:
while True:
# Read the minecraft pin
cur_val = get_pin(a0)
if cur_val != old_val:
# write the result to the LED1 on Espruino
if int(cur_val):
# turn LED on
ser.write("digitalWrite(LED1, 1)\n")
else:
# turn LED off
ser.write("digitalWrite(LED1, 0)\n")
old_val = cur_val
time.sleep(.5) # small sleep
except KeyboardInterrupt:
print("stopped")
ser.close()
|
FoamyGuy/mcpi_with_espruino
|
examples/example2_led/example2_led.py
|
Python
|
unlicense
| 1,489
| 0.003358
|
#!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud command line tool."""
import time
START_TIME = time.time()
# pylint:disable=g-bad-import-order
# pylint:disable=g-import-not-at-top, We want to get the start time first.
import os
import signal
import sys
from googlecloudsdk.calliope import backend
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import cli
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.updater import local_state
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import platforms
import surface
# Disable stack traces when people kill a command.
def CTRLCHandler(unused_signal, unused_frame):
"""Custom SIGNINT handler.
Signal handler that doesn't print the stack trace when a command is
killed by keyboard interupt.
"""
try:
log.err.Print('\n\nCommand killed by keyboard interrupt\n')
except NameError:
sys.stderr.write('\n\nCommand killed by keyboard interrupt\n')
# Kill ourselves with SIGINT so our parent can detect that we exited because
# of a signal. SIG_DFL disables further KeyboardInterrupt exceptions.
signal.signal(signal.SIGINT, signal.SIG_DFL)
os.kill(os.getpid(), signal.SIGINT)
# Just in case the kill failed ...
sys.exit(1)
signal.signal(signal.SIGINT, CTRLCHandler)
# Enable normal UNIX handling of SIGPIPE to play nice with grep -q, head, etc.
# See https://mail.python.org/pipermail/python-list/2004-June/273297.html and
# http://utcc.utoronto.ca/~cks/space/blog/python/SignalExceptionSurprise
# for more details.
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def _DoStartupChecks():
if not platforms.PythonVersion().IsCompatible():
sys.exit(1)
_DoStartupChecks()
if not config.Paths().sdk_root:
# Don't do update checks if there is no install root.
properties.VALUES.component_manager.disable_update_check.Set(True)
def UpdateCheck(command_path, **unused_kwargs):
try:
update_manager.UpdateManager.PerformUpdateCheck(command_path=command_path)
# pylint:disable=broad-except, We never want this to escape, ever. Only
# messages printed should reach the user.
except Exception:
log.debug('Failed to perform update check.', exc_info=True)
def CreateCLI(surfaces):
"""Generates the gcloud CLI from 'surface' folder with extra surfaces.
Args:
surfaces: list(tuple(dot_path, dir_path)), extra commands or subsurfaces
to add, where dot_path is calliope command path and dir_path
path to command group or command.
Returns:
calliope cli object.
"""
def VersionFunc():
generated_cli.Execute(['version'])
pkg_root = os.path.dirname(os.path.dirname(surface.__file__))
loader = cli.CLILoader(
name='gcloud',
command_root_directory=os.path.join(pkg_root, 'surface'),
allow_non_existing_modules=True,
version_func=VersionFunc)
loader.AddReleaseTrack(base.ReleaseTrack.ALPHA,
os.path.join(pkg_root, 'surface', 'alpha'),
component='alpha')
loader.AddReleaseTrack(base.ReleaseTrack.BETA,
os.path.join(pkg_root, 'surface', 'beta'),
component='beta')
for dot_path, dir_path in surfaces:
loader.AddModule(dot_path, dir_path, component=None)
# Check for updates on shutdown but not for any of the updater commands.
loader.RegisterPostRunHook(UpdateCheck,
exclude_commands=r'gcloud\.components\..*')
generated_cli = loader.Generate()
return generated_cli
def _PrintSuggestedAction(err, err_string):
"""Print the best action for the user to take, given the error."""
if (isinstance(err, backend.CommandLoadFailure) and
type(err.root_exception) is ImportError):
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
else:
log.error('gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__),
err_string))
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
def main(gcloud_cli=None):
metrics.Started(START_TIME)
# TODO(user): Put a real version number here
metrics.Executions(
'gcloud',
local_state.InstallationState.VersionForInstalledComponent('core'))
if gcloud_cli is None:
gcloud_cli = CreateCLI([])
try:
gcloud_cli.Execute()
except Exception as err: # pylint:disable=broad-except
# We want this to be parsable by `gcloud feedback`, so we print the
# stacktrace with a nice recognizable string
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
_PrintSuggestedAction(err, gcloud_cli.SafeExceptionToString(err))
if properties.VALUES.core.print_unhandled_tracebacks.GetBool():
# We want to see the traceback as normally handled by Python
raise
else:
# This is the case for most non-Cloud SDK developers. They shouldn't see
# the full stack trace, but just the nice "gcloud crashed" message.
sys.exit(1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
CTRLCHandler(None, None)
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py
|
Python
|
bsd-3-clause
| 7,025
| 0.00911
|
DEBUG = False
USERNAME = 'hikaru'
CHANNEL = 'random'
VOCAB = {
'RANDOM': ['random', ':troll:', ':trollface:'],
'PASS': ['pass', 'skip'],
'RESIGN': ['resign', 'give up'],
'VOTE': ['vote', 'move', 'play'],
'VOTES': ['votes', 'moves', 'voted', 'chance'],
'CAPTURES': ['captures'],
'SHOW': ['show', 'board'],
'YES': ['yes', 'yeah', 'ya', 'y', 'ja', 'please', 'ok', 'yep'],
'NO': ['no', 'nope', 'n', 'nee', "don't", 'cancel'],
}
RESPONSES = {
'RESIGN_CONFIRMATION': [
'Are you sure you want to resign?',
'Sure?',
],
'RESIGN_CANCELLED': [
'Ok.',
'Resignation cancelled.',
],
'UNKNOWN': [
"I don't know.",
'What do you mean?',
"That doesn't make any sense.",
"I'm just a bot.",
],
}
# How often to play moves. See `man crontab` for format information.
if DEBUG:
CRON = '*/2 * * * *' # Every two minutes.
else:
CRON = '0 9-18 * * 1-5' # Hourly between 9:00 and 18:00 on weekdays.
|
shobute/go-slack
|
config.py
|
Python
|
isc
| 1,009
| 0.001982
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test Transformer model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.nlp.transformer import model_params
from official.transformer.v2 import transformer
class TransformerV2Test(tf.test.TestCase):
def setUp(self):
self.params = params = model_params.TINY_PARAMS
params["batch_size"] = params["default_batch_size"] = 16
params["use_synthetic_data"] = True
params["hidden_size"] = 12
params["num_hidden_layers"] = 2
params["filter_size"] = 14
params["num_heads"] = 2
params["vocab_size"] = 41
params["extra_decode_length"] = 2
params["beam_size"] = 3
params["dtype"] = tf.float32
def test_create_model_train(self):
model = transformer.create_model(self.params, True)
inputs, outputs = model.inputs, model.outputs
self.assertEqual(len(inputs), 2)
self.assertEqual(len(outputs), 1)
self.assertEqual(inputs[0].shape.as_list(), [None, None])
self.assertEqual(inputs[0].dtype, tf.int64)
self.assertEqual(inputs[1].shape.as_list(), [None, None])
self.assertEqual(inputs[1].dtype, tf.int64)
self.assertEqual(outputs[0].shape.as_list(), [None, None, 41])
self.assertEqual(outputs[0].dtype, tf.float32)
def test_create_model_not_train(self):
model = transformer.create_model(self.params, False)
inputs, outputs = model.inputs, model.outputs
self.assertEqual(len(inputs), 1)
self.assertEqual(len(outputs), 2)
self.assertEqual(inputs[0].shape.as_list(), [None, None])
self.assertEqual(inputs[0].dtype, tf.int64)
self.assertEqual(outputs[0].shape.as_list(), [None, None])
self.assertEqual(outputs[0].dtype, tf.int32)
self.assertEqual(outputs[1].shape.as_list(), [None])
self.assertEqual(outputs[1].dtype, tf.float32)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.test.main()
|
alexgorban/models
|
official/transformer/v2/transformer_test.py
|
Python
|
apache-2.0
| 2,619
| 0.001909
|
from . import mne # noqa
from .mne.spectral import TFRmorlet # noqa
|
neuropycon/ephypype
|
ephypype/interfaces/__init__.py
|
Python
|
bsd-3-clause
| 70
| 0.028571
|
# Copyright (C) 2010-2017 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from snf_django.management.commands import SynnefoCommand
from synnefo.db.models import Backend
from synnefo.logic import backend as backend_mod
from synnefo.db import transaction
HELP_MSG = """Query Ganeti backends and update the status of backend in DB.
This command updates:
* the list of the enabled disk-templates
* the available resources (disk, memory, CPUs)
"""
class Command(SynnefoCommand):
help = HELP_MSG
@transaction.atomic
def handle(self, **options):
for backend in Backend.objects.select_for_update()\
.filter(offline=False):
backend_mod.update_backend_disk_templates(backend)
backend_mod.update_backend_resources(backend)
self.stdout.write("Successfully updated backend '%s'\n" % backend)
|
grnet/synnefo
|
snf-cyclades-app/synnefo/logic/management/commands/backend-update-status.py
|
Python
|
gpl-3.0
| 1,489
| 0
|
"""Defines all search scopes used in this project."""
from os import path
ROOT_PATH = path.abspath('/')
class TreeSearchScope:
"""Encapsulation of a search scope to search up the tree."""
def __init__(self,
from_folder=ROOT_PATH,
to_folder=ROOT_PATH):
"""Initialize the search scope."""
self.from_folder = from_folder
self.to_folder = to_folder
@property
def from_folder(self):
"""Get the starting folder."""
return self._from_folder
@from_folder.setter
def from_folder(self, folder):
"""Set the last folder in search."""
self._from_folder = folder
self._current_folder = self._from_folder
@property
def to_folder(self):
"""Get the end of search folder."""
return self._to_folder
@to_folder.setter
def to_folder(self, folder):
"""Set the last folder in search."""
self._to_folder = folder
self._one_past_last = path.dirname(self._to_folder)
def __bool__(self):
"""Check if the search scope is empty."""
return self.from_folder != ROOT_PATH
def __iter__(self):
"""Make this an iterator."""
self._current_folder = self._from_folder
return self
def __next__(self):
"""Get next folder to search in."""
current_folder = self._current_folder
self._current_folder = path.dirname(self._current_folder)
scope_end_reached = current_folder == self._one_past_last
root_reached = current_folder == self._current_folder
if root_reached or scope_end_reached:
raise StopIteration
else:
return current_folder
def __repr__(self):
"""Return search scope as a printable string."""
return 'SearchScope: from_folder: {}, to_folder: {}'.format(
self._from_folder, self._to_folder)
class ListSearchScope:
"""Encapsulation of a search scope to search in a list."""
def __init__(self, paths=[]):
"""Initialize the search scope."""
self.folders = paths
@property
def folders(self):
"""Get the starting folder."""
return self._folders
@folders.setter
def folders(self, paths):
"""Set the folders."""
self._folders = [f for f in paths if path.isdir(f)]
self._iter = iter(self._folders)
def __bool__(self):
"""Check if the search scope is not empty."""
return len(self._folders) > 0
def __iter__(self):
"""Make this an iterator."""
self._iter = iter(self._folders)
return self._iter
def __next__(self):
"""Get next folder to search in."""
return next(self._iter)
def __repr__(self):
"""Return search scope as a printable string."""
return 'SearchScope: folders: {}'.format(self._folders)
|
niosus/EasyClangComplete
|
plugin/utils/search_scope.py
|
Python
|
mit
| 2,889
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_stack
----------------------------------
Tests for `python_algorithms.stack` module.
"""
import unittest
from python_algorithms.basic.stack import Stack
class TestStack(unittest.TestCase):
def setUp(self):
self.empty_stack = Stack()
self.stack = Stack()
self.seq = [0, 2, 4, 6, 8]
for x in self.seq:
self.stack.push(x)
def test_push_to_empty_stack(self):
self.empty_stack.push(0)
self.assertEqual(self.empty_stack.peek(), 0)
def test_push_to_stack(self):
self.stack.push(10)
self.assertEqual(self.stack.peek(), 10)
def test_pop_from_empty_stack(self):
self.assertRaises(IndexError, self.empty_stack.pop)
def test_pop_from_stack(self):
self.assertEqual(self.stack.pop(), self.seq[-1])
def test_size_of_empty_stack(self):
self.assertEqual(self.empty_stack.size, 0)
def test_size_of_stack(self):
self.assertEqual(self.stack.size, len(self.seq))
def test_peek_at_empty_stack(self):
self.assertRaises(IndexError, self.empty_stack.peek)
def test_peek_at_stack(self):
self.assertEqual(self.stack.peek(), self.seq[-1])
def test_iterate_empty_stack(self):
for curr in self.empty_stack:
self.assertEqual(False, True)
def test_iterate_stack(self):
iter_seq = []
for curr in self.stack:
iter_seq.append(curr)
iter_seq.reverse()
self.assertEqual(iter_seq, self.seq)
def test_str_empty_stack(self):
self.assertEqual(str(self.empty_stack), "")
def test_str_stack(self):
self.assertEqual(str(self.stack), " ".join([str(x) for x in self.seq]))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
cjauvin/python_algorithms
|
tests/basic/test_stack.py
|
Python
|
bsd-3-clause
| 1,843
| 0.000543
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from pootle.core.delegate import search_backend
from pootle.core.plugin import getter
from pootle_project.models import Project
from pootle_statistics.models import Submission, SubmissionTypes
from pootle_store.getters import get_search_backend
from pootle_store.constants import FUZZY, TRANSLATED, UNTRANSLATED
from pootle_store.models import Suggestion, Unit
from pootle_store.unit.filters import (
FilterNotFound, UnitChecksFilter, UnitContributionFilter, UnitSearchFilter,
UnitStateFilter, UnitTextSearch)
from pootle_store.unit.search import DBSearchBackend
def _expected_text_search_words(text, exact):
if exact:
return [text]
return [t.strip() for t in text.split(" ") if t.strip()]
def _expected_text_search_results(qs, words, search_fields):
def _search_field(k):
subresult = qs.all()
for word in words:
subresult = subresult.filter(
**{("%s__icontains" % k): word})
return subresult
result = qs.none()
for k in search_fields:
result = result | _search_field(k)
return list(result.order_by("pk"))
def _expected_text_search_fields(sfields):
search_fields = set()
for field in sfields:
if field in UnitTextSearch.search_mappings:
search_fields.update(UnitTextSearch.search_mappings[field])
else:
search_fields.add(field)
return search_fields
def _test_units_checks_filter(qs, check_type, check_data):
result = UnitChecksFilter(qs, **{check_type: check_data}).filter("checks")
for item in result:
assert item in qs
assert result.count() == result.distinct().count()
if check_type == "checks":
for item in result:
assert any(
qc in item.qualitycheck_set.values_list("name", flat=True)
for qc
in check_data)
assert(
list(result)
== list(
qs.filter(
qualitycheck__false_positive=False,
qualitycheck__name__in=check_data).distinct()))
else:
for item in result:
item.qualitycheck_set.values_list("category", flat=True)
assert(
list(result)
== list(
qs.filter(
qualitycheck__false_positive=False,
qualitycheck__category=check_data).distinct()))
def _test_units_contribution_filter(qs, user, unit_filter):
result = UnitContributionFilter(qs, user=user).filter(unit_filter)
for item in result:
assert item in qs
assert result.count() == result.distinct().count()
user_subs_overwritten = [
"my_submissions_overwritten",
"user_submissions_overwritten"]
if unit_filter == "suggestions":
assert (
result.count()
== qs.filter(
suggestion__state__name="pending").distinct().count())
return
elif not user:
assert result.count() == 0
return
elif unit_filter in ["my_suggestions", "user_suggestions"]:
expected = qs.filter(
suggestion__state__name="pending",
suggestion__user=user).distinct()
elif unit_filter == "user_suggestions_accepted":
expected = qs.filter(
suggestion__state__name="accepted",
suggestion__user=user).distinct()
elif unit_filter == "user_suggestions_rejected":
expected = qs.filter(
suggestion__state__name="rejected",
suggestion__user=user).distinct()
elif unit_filter in ["my_submissions", "user_submissions"]:
expected = qs.filter(submitted_by=user)
elif unit_filter in user_subs_overwritten:
# lets calc this long hand
# first submissions that have been added with no suggestion
user_edit_subs = Submission.objects.filter(
type__in=SubmissionTypes.EDIT_TYPES).filter(
suggestion__isnull=True).filter(
submitter=user).values_list("unit_id", flat=True)
# next the suggestions that are accepted and the user is this user
user_suggestions = Suggestion.objects.filter(
state__name="accepted",
user=user).values_list("unit_id", flat=True)
expected = qs.filter(
id__in=(
set(user_edit_subs)
| set(user_suggestions))).exclude(submitted_by=user)
assert (
list(expected.order_by("pk"))
== list(result.order_by("pk")))
def _test_unit_text_search(qs, text, sfields, exact, empty=True):
unit_search = UnitTextSearch(qs)
result = unit_search.search(text, sfields, exact).order_by("pk")
words = unit_search.get_words(text, exact)
fields = unit_search.get_search_fields(sfields)
# ensure result meets our expectation
assert (
list(result)
== _expected_text_search_results(qs, words, fields))
# ensure that there are no dupes in result qs
assert list(result) == list(result.distinct())
if not empty:
assert result.count()
for item in result:
# item is in original qs
assert item in qs
for word in words:
searchword_found = False
for field in fields:
if word.lower() in getattr(item, field).lower():
# one of the items attrs matches search
searchword_found = True
break
assert searchword_found
def _test_units_state_filter(qs, unit_filter):
result = UnitStateFilter(qs).filter(unit_filter)
for item in result:
assert item in qs
assert result.count() == result.distinct().count()
if unit_filter == "all":
assert list(result) == list(qs)
return
elif unit_filter == "translated":
states = [TRANSLATED]
elif unit_filter == "untranslated":
states = [UNTRANSLATED]
elif unit_filter == "fuzzy":
states = [FUZZY]
elif unit_filter == "incomplete":
states = [UNTRANSLATED, FUZZY]
assert all(
state in states
for state
in result.values_list("state", flat=True))
assert (
qs.filter(state__in=states).count()
== result.count())
@pytest.mark.django_db
def test_get_units_text_search(units_text_searches):
search = units_text_searches
sfields = search["sfields"]
fields = _expected_text_search_fields(sfields)
words = _expected_text_search_words(search['text'], search["exact"])
# ensure the fields parser works correctly
assert (
UnitTextSearch(Unit.objects.all()).get_search_fields(sfields)
== fields)
# ensure the text tokeniser works correctly
assert (
UnitTextSearch(Unit.objects.all()).get_words(
search['text'], search["exact"])
== words)
assert isinstance(words, list)
# run the all units test first and check its not empty if it shouldnt be
_test_unit_text_search(
Unit.objects.all(),
search["text"], search["sfields"], search["exact"],
search["empty"])
for qs in [Unit.objects.none(), Unit.objects.live()]:
# run tests against different qs
_test_unit_text_search(
qs, search["text"], search["sfields"], search["exact"])
@pytest.mark.django_db
def test_units_contribution_filter_none(units_contributor_searches):
unit_filter = units_contributor_searches
user = None
qs = Unit.objects.all()
if not hasattr(UnitContributionFilter, "filter_%s" % unit_filter):
with pytest.raises(FilterNotFound):
UnitContributionFilter(qs, user=user).filter(unit_filter)
return
test_qs = [
qs,
qs.none(),
qs.filter(
store__translation_project__project=Project.objects.first())]
for _qs in test_qs:
_test_units_contribution_filter(_qs, user, unit_filter)
@pytest.mark.django_db
def test_units_contribution_filter(units_contributor_searches, site_users):
unit_filter = units_contributor_searches
user = site_users["user"]
qs = Unit.objects.all()
if not hasattr(UnitContributionFilter, "filter_%s" % unit_filter):
with pytest.raises(FilterNotFound):
UnitContributionFilter(qs, user=user).filter(unit_filter)
return
test_qs = [
qs,
qs.none(),
qs.filter(
store__translation_project__project=Project.objects.first())]
for _qs in test_qs:
_test_units_contribution_filter(_qs, user, unit_filter)
@pytest.mark.django_db
def test_units_state_filter(units_state_searches):
unit_filter = units_state_searches
qs = Unit.objects.all()
if not hasattr(UnitStateFilter, "filter_%s" % unit_filter):
with pytest.raises(FilterNotFound):
UnitStateFilter(qs).filter(unit_filter)
return
test_qs = [
qs,
qs.none(),
qs.filter(
store__translation_project__project=Project.objects.first())]
for _qs in test_qs:
_test_units_state_filter(_qs, unit_filter)
@pytest.mark.django_db
def test_units_checks_filter(units_checks_searches):
check_type, check_data = units_checks_searches
qs = Unit.objects.all()
test_qs = [
qs,
qs.none(),
qs.filter(
store__translation_project__project=Project.objects.first())]
for _qs in test_qs:
_test_units_checks_filter(_qs, check_type, check_data)
@pytest.mark.django_db
def test_units_checks_filter_bad():
qs = Unit.objects.all()
with pytest.raises(FilterNotFound):
UnitChecksFilter(qs).filter("BAD")
# if you dont supply check/category you get empty qs
assert not UnitChecksFilter(qs).filter("checks").count()
@pytest.mark.django_db
def test_units_filters():
qs = Unit.objects.all()
assert UnitSearchFilter().filter(qs, "FOO").count() == 0
@pytest.mark.django_db
def test_unit_search_backend():
assert search_backend.get() is None
assert search_backend.get(Unit) is DBSearchBackend
@pytest.mark.django_db
def test_unit_search_backend_custom():
class CustomSearchBackend(DBSearchBackend):
pass
# add a custom getter, simulating adding before pootle_store
# in INSTALLED_APPS
# disconnect the default search_backend
search_backend.disconnect(get_search_backend, sender=Unit)
@getter(search_backend, sender=Unit)
def custom_get_search_backend(**kwargs):
return CustomSearchBackend
# reconnect the default search_backend
search_backend.connect(get_search_backend, sender=Unit)
assert search_backend.get(Unit) is CustomSearchBackend
|
claudep/pootle
|
tests/search/units.py
|
Python
|
gpl-3.0
| 10,939
| 0
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from django.conf import settings
from polycommon.options.exceptions import OptionException
from polycommon.options.feature import Feature
from polycommon.options.option import NAMESPACE_DB_OPTION_MARKER, OptionStores
class DummyFeature(Feature):
pass
class TestFeature(TestCase):
def test_feature_default_store(self):
assert DummyFeature.store == OptionStores(settings.STORE_OPTION)
def test_feature_marker(self):
assert DummyFeature.get_marker() == NAMESPACE_DB_OPTION_MARKER
def test_parse_key_wtong_namespace(self):
DummyFeature.key = "FOO"
with self.assertRaises(OptionException):
DummyFeature.parse_key()
DummyFeature.key = "FOO:BAR"
with self.assertRaises(OptionException):
DummyFeature.parse_key()
def test_parse_key_without_namespace(self):
DummyFeature.key = "FEATURES:FOO"
assert DummyFeature.parse_key() == (None, "FOO")
def test_parse_key_with_namespace(self):
DummyFeature.key = "FEATURES:FOO:BAR"
assert DummyFeature.parse_key() == ("FOO", "BAR")
|
polyaxon/polyaxon
|
platform/polycommon/tests/test_options/test_feature.py
|
Python
|
apache-2.0
| 1,739
| 0
|
import sqlalchemy as sa
from sqlalchemy_utils.functions.sort_query import make_order_by_deterministic
from tests import assert_contains, TestCase
class TestMakeOrderByDeterministic(TestCase):
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode)
email = sa.Column(sa.Unicode, unique=True)
email_lower = sa.orm.column_property(
sa.func.lower(name)
)
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
author_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
author = sa.orm.relationship(User)
User.article_count = sa.orm.column_property(
sa.select([sa.func.count()], from_obj=Article)
.where(Article.author_id == User.id)
.label('article_count')
)
self.User = User
self.Article = Article
def test_column_property(self):
query = self.session.query(self.User).order_by(self.User.email_lower)
query = make_order_by_deterministic(query)
assert_contains('lower("user".name), "user".id ASC', query)
def test_unique_column(self):
query = self.session.query(self.User).order_by(self.User.email)
query = make_order_by_deterministic(query)
assert str(query).endswith('ORDER BY "user".email')
def test_non_unique_column(self):
query = self.session.query(self.User).order_by(self.User.name)
query = make_order_by_deterministic(query)
assert_contains('ORDER BY "user".name, "user".id ASC', query)
def test_descending_order_by(self):
query = self.session.query(self.User).order_by(
sa.desc(self.User.name)
)
query = make_order_by_deterministic(query)
assert_contains('ORDER BY "user".name DESC, "user".id DESC', query)
def test_ascending_order_by(self):
query = self.session.query(self.User).order_by(
sa.asc(self.User.name)
)
query = make_order_by_deterministic(query)
assert_contains('ORDER BY "user".name ASC, "user".id ASC', query)
def test_string_order_by(self):
query = self.session.query(self.User).order_by('name')
query = make_order_by_deterministic(query)
assert_contains('ORDER BY name, "user".id ASC', query)
def test_annotated_label(self):
query = self.session.query(self.User).order_by(self.User.article_count)
query = make_order_by_deterministic(query)
assert_contains('article_count, "user".id ASC', query)
def test_annotated_label_with_descending_order(self):
query = self.session.query(self.User).order_by(
sa.desc(self.User.article_count)
)
query = make_order_by_deterministic(query)
assert_contains('ORDER BY article_count DESC, "user".id DESC', query)
def test_query_without_order_by(self):
query = self.session.query(self.User)
query = make_order_by_deterministic(query)
assert 'ORDER BY "user".id' in str(query)
def test_alias(self):
alias = sa.orm.aliased(self.User.__table__)
query = self.session.query(alias).order_by(alias.c.name)
query = make_order_by_deterministic(query)
assert str(query).endswith('ORDER BY user_1.name, "user".id ASC')
|
cheungpat/sqlalchemy-utils
|
tests/functions/test_make_order_by_deterministic.py
|
Python
|
bsd-3-clause
| 3,482
| 0
|
from gooddataclient.dataset import Dataset
from gooddataclient.columns import ConnectionPoint, Label, Reference
class Employee(Dataset):
employee = ConnectionPoint(title='Employee', folder='Employee')
firstname = Label(title='First Name', reference='employee', folder='Employee')
lastname = Label(title='Last Name', reference='employee', folder='Employee')
department = Reference(title='Department', reference='department', schemaReference='Department', folder='Employee')
class Meta(Dataset.Meta):
column_order = ('employee', 'firstname', 'lastname', 'department')
def data(self):
return [{'employee': 'e1', 'lastname': 'Nowmer', 'department': 'd1', 'firstname': 'Sheri'},
{'employee': 'e2', 'lastname': 'Whelply', 'department': 'd1', 'firstname': 'Derrick'},
{'employee': 'e6', 'lastname': 'Damstra', 'department': 'd2', 'firstname': 'Roberta'},
{'employee': 'e7', 'lastname': 'Kanagaki', 'department': 'd3', 'firstname': 'Rebecca'},
{'employee': 'e8', 'lastname': 'Brunner', 'department': 'd11', 'firstname': 'Kim'},
{'employee': 'e9', 'lastname': 'Blumberg', 'department': 'd11', 'firstname': 'Brenda'},
{'employee': 'e10', 'lastname': 'Stanz', 'department': 'd5', 'firstname': 'Darren'},
{'employee': 'e11', 'lastname': 'Murraiin', 'department': 'd11', 'firstname': 'Jonathan'},
{'employee': 'e12', 'lastname': 'Creek', 'department': 'd11', 'firstname': 'Jewel'},
{'employee': 'e13', 'lastname': 'Medina', 'department': 'd11', 'firstname': 'Peggy'},
{'employee': 'e14', 'lastname': 'Rutledge', 'department': 'd11', 'firstname': 'Bryan'},
{'employee': 'e15', 'lastname': 'Cavestany', 'department': 'd11', 'firstname': 'Walter'},
{'employee': 'e16', 'lastname': 'Planck', 'department': 'd11', 'firstname': 'Peggy'},
{'employee': 'e17', 'lastname': 'Marshall', 'department': 'd11', 'firstname': 'Brenda'},
{'employee': 'e18', 'lastname': 'Wolter', 'department': 'd11', 'firstname': 'Daniel'},
{'employee': 'e19', 'lastname': 'Collins', 'department': 'd11', 'firstname': 'Dianne'}
]
maql = """
# THIS IS MAQL SCRIPT THAT GENERATES PROJECT LOGICAL MODEL.
# SEE THE MAQL DOCUMENTATION AT http://developer.gooddata.com/api/maql-ddl.html FOR MORE DETAILS
# CREATE DATASET. DATASET GROUPS ALL FOLLOWING LOGICAL MODEL ELEMENTS TOGETHER.
CREATE DATASET {dataset.employee} VISUAL(TITLE "Employee");
# CREATE THE FOLDERS THAT GROUP ATTRIBUTES AND FACTS
CREATE FOLDER {dim.employee} VISUAL(TITLE "Employee") TYPE ATTRIBUTE;
# CREATE ATTRIBUTES.
# ATTRIBUTES ARE CATEGORIES THAT ARE USED FOR SLICING AND DICING THE NUMBERS (FACTS)
CREATE ATTRIBUTE {attr.employee.employee} VISUAL(TITLE "Employee", FOLDER {dim.employee}) AS KEYS {f_employee.id} FULLSET;
ALTER DATASET {dataset.employee} ADD {attr.employee.employee};
# CREATE FACTS
# FACTS ARE NUMBERS THAT ARE AGGREGATED BY ATTRIBUTES.
# CREATE DATE FACTS
# DATES ARE REPRESENTED AS FACTS
# DATES ARE ALSO CONNECTED TO THE DATE DIMENSIONS
# CREATE REFERENCES
# REFERENCES CONNECT THE DATASET TO OTHER DATASETS
# CONNECT THE REFERENCE TO THE APPROPRIATE DIMENSION
ALTER ATTRIBUTE {attr.department.department} ADD KEYS {f_employee.department_id};
# ADD LABELS TO ATTRIBUTES
ALTER ATTRIBUTE {attr.employee.employee} ADD LABELS {label.employee.employee.firstname} VISUAL(TITLE "First Name") AS {f_employee.nm_firstname};
ALTER ATTRIBUTE {attr.employee.employee} DEFAULT LABEL {label.employee.employee.firstname};
# ADD LABELS TO ATTRIBUTES
ALTER ATTRIBUTE {attr.employee.employee} ADD LABELS {label.employee.employee.lastname} VISUAL(TITLE "Last Name") AS {f_employee.nm_lastname};
ALTER ATTRIBUTE {attr.employee.employee} ADD LABELS {label.employee.employee} VISUAL(TITLE "Employee") AS {f_employee.nm_employee};
# SYNCHRONIZE THE STORAGE AND DATA LOADING INTERFACES WITH THE NEW LOGICAL MODEL
SYNCHRONIZE {dataset.employee};
"""
schema_xml = '''
<schema>
<name>Employee</name>
<columns>
<column>
<name>employee</name>
<title>Employee</title>
<ldmType>CONNECTION_POINT</ldmType>
<folder>Employee</folder>
</column>
<column>
<name>firstname</name>
<title>First Name</title>
<ldmType>LABEL</ldmType>
<reference>employee</reference>
<folder>Employee</folder>
</column>
<column>
<name>lastname</name>
<title>Last Name</title>
<ldmType>LABEL</ldmType>
<reference>employee</reference>
<folder>Employee</folder>
</column>
<column>
<name>department</name>
<title>Department</title>
<ldmType>REFERENCE</ldmType>
<reference>department</reference>
<schemaReference>Department</schemaReference>
<folder>Employee</folder>
</column>
</columns>
</schema>
'''
data_csv = '''"employee","firstname","lastname","department"
"e1","Sheri","Nowmer","d1"
"e2","Derrick","Whelply","d1"
"e6","Roberta","Damstra","d2"
"e7","Rebecca","Kanagaki","d3"
"e8","Kim","Brunner","d11"
"e9","Brenda","Blumberg","d11"
"e10","Darren","Stanz","d5"
"e11","Jonathan","Murraiin","d11"
"e12","Jewel","Creek","d11"
"e13","Peggy","Medina","d11"
"e14","Bryan","Rutledge","d11"
"e15","Walter","Cavestany","d11"
"e16","Peggy","Planck","d11"
"e17","Brenda","Marshall","d11"
"e18","Daniel","Wolter","d11"
"e19","Dianne","Collins","d11"
'''
sli_manifest = {"dataSetSLIManifest": {
"parts": [
{
"columnName": "employee",
"mode": "FULL",
"populates": ["label.employee.employee"],
"referenceKey": 1
},
{
"columnName": "firstname",
"mode": "FULL",
"populates": ["label.employee.employee.firstname"]
},
{
"columnName": "lastname",
"mode": "FULL",
"populates": ["label.employee.employee.lastname"]
},
{
"columnName": "department",
"mode": "FULL",
"populates": ["label.department.department"],
"referenceKey": 1
}
],
"file": "data.csv",
"dataSet": "dataset.employee",
"csvParams": {
"quoteChar": "\"",
"escapeChar": "\"",
"separatorChar": ",",
"endOfLine": "\n"
}
}}
|
comoga/gooddata-python
|
tests/examples/employee.py
|
Python
|
bsd-3-clause
| 6,397
| 0.00766
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A layer that produces a dense `Tensor` based on given `feature_columns`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras.feature_column import base_feature_layer as kfc
from keras.feature_column import dense_features
from keras.utils import tf_contextlib
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.DenseFeatures', v1=[])
class DenseFeatures(dense_features.DenseFeatures):
"""A layer that produces a dense `Tensor` based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
This layer can be called multiple times with different features.
This is the V2 version of this layer that uses name_scopes to create
variables instead of variable_scopes. But this approach currently lacks
support for partitioned variables. In that case, use the V1 version instead.
Example:
```python
price = tf.feature_column.numeric_column('price')
keywords_embedded = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_hash_bucket("keywords", 10000),
dimensions=16)
columns = [price, keywords_embedded, ...]
feature_layer = tf.keras.layers.DenseFeatures(columns)
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = feature_layer(features)
for units in [128, 64, 32]:
dense_tensor = tf.keras.layers.Dense(units, activation='relu')(dense_tensor)
prediction = tf.keras.layers.Dense(1)(dense_tensor)
```
"""
def __init__(self,
feature_columns,
trainable=True,
name=None,
**kwargs):
"""Creates a DenseFeatures object.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical
features, you can wrap them with an `embedding_column` or
`indicator_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is not a `DenseColumn`.
"""
super(DenseFeatures, self).__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
**kwargs)
self._state_manager = _StateManagerImplV2(self, self.trainable)
def build(self, _):
for column in self._feature_columns:
with tf.name_scope(column.name):
column.create_state(self._state_manager)
# We would like to call Layer.build and not _DenseFeaturesHelper.build.
# pylint: disable=protected-access
super(kfc._BaseFeaturesLayer, self).build(None) # pylint: disable=bad-super-call
class _StateManagerImplV2(tf.__internal__.feature_column.StateManager): # pylint: disable=protected-access
"""Manages the state of DenseFeatures."""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_weight call does.
with no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource)
if isinstance(var, tf.__internal__.tracking.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
@tf_contextlib.contextmanager
def no_manual_dependency_tracking_scope(obj):
"""A context that disables manual dependency tracking for the given `obj`.
Sometimes library methods might track objects on their own and we might want
to disable that and do the tracking on our own. One can then use this context
manager to disable the tracking the library method does and do your own
tracking.
For example:
class TestLayer(tf.keras.Layer):
def build():
with no_manual_dependency_tracking_scope(self):
var = self.add_weight("name1") # Creates a var and doesn't track it
self._track_trackable("name2", var) # We track variable with name `name2`
Args:
obj: A trackable object.
Yields:
a scope in which the object doesn't track dependencies manually.
"""
# pylint: disable=protected-access
previous_value = getattr(obj, '_manual_tracking', True)
obj._manual_tracking = False
try:
yield
finally:
obj._manual_tracking = previous_value
|
keras-team/keras
|
keras/feature_column/dense_features_v2.py
|
Python
|
apache-2.0
| 6,123
| 0.00343
|
from autoprotocol.unit import Unit
def dna_mass_to_mole(length, mass, ds=True):
"""
For the DNA Length and mass given, return the mole amount of DNA
Example Usage:
.. code-block:: python
from autoprotocol_utilities import dna_mass_to_mole
from autoprotocol.unit import Unit
dna_length = 100
dna_mass = Unit(33, 'ng')
dna_mass_to_mole(dna_length, dna_mass)
Returns:
.. code-block:: python
Unit(0.5, 'picomole')
Parameters
----------
length: int
Length of DNA in bp
mass: str, Unit
Weight of DNA in prefix-g
ds: bool, optional
True for dsDNA, False for ssDNA
Returns
-------
pmole_dna: Unit
Mole amount of DNA in pmol
Raises
------
ValueError
If inputs are not of specified types
"""
if isinstance(mass, str):
mass = Unit.fromstring(mass)
if not isinstance(mass, Unit) or str(mass.dimensionality) != "[mass]":
raise ValueError("Mass of DNA must be of type Unit in prefix-gram")
if not isinstance(length, int):
raise ValueError(
"Length of DNA is of type %s, must be of type "
"integer" % type(length))
if not isinstance(ds, bool):
raise ValueError(
"ds is of type %s, must be of type bool: True for dsDNA, "
"False for ssDNA" % type(ds))
dna_pg = mass.to("pg")
if ds:
dna_pmol = dna_pg / (Unit(660, "pg/pmol") * length)
else:
dna_pmol = dna_pg / (Unit(330, "pg/pmol") * length)
return dna_pmol
def dna_mole_to_mass(length, mole, ds=True):
"""
For the DNA Length and mole amount given, return the mass of DNA
Example Usage:
.. code-block:: python
from autoprotocol_utilities import dna_mole_to_mass
from autoprotocol.unit import Unit
dna_length = 5000
dna_mole = "10:pmol"
dna_mole_to_mass(dna_length, dna_mole)
Returns:
.. code-block:: python
Unit(33.0, 'microgram')
Parameters
----------
length: int
Length of DNA in bp
mole: str, Unit
Mole amount of DNA in prefix-mol
ds: bool, optional
True for dsDNA, False for ssDNA
Returns
-------
dna_ug: Unit
Weight of DNA in ug
Raises
------
ValueError
If inputs are not of specified types
"""
if isinstance(mole, str):
mole = Unit.fromstring(mole)
if not isinstance(mole, Unit) or str(mole.dimensionality) != "[substance]":
raise ValueError(
"Mole amount of DNA must be of type Unit in prefix-mol")
if not isinstance(length, int):
raise ValueError(
"Length of DNA is of type %s, must be of type "
"integer" % type(length))
if not isinstance(ds, bool):
raise ValueError(
"ds is of type %s, must be of type bool: True for dsDNA, "
"False for ssDNA" % type(ds))
dna_pmol = mole.to("pmol")
if ds:
dna_ug = (
Unit(660, "pg/pmol") * dna_pmol * Unit(10**(-6), "ug/pg") * length)
else:
dna_ug = (
Unit(330, "pg/pmol") * dna_pmol * Unit(10**(-6), "ug/pg") * length)
return dna_ug
def molar_to_mass_conc(length, molar, ds=True):
"""
For the DNA molarity given, return the mass concentration of DNA
Example Usage:
.. code-block:: python
from autoprotocol_utilities import molar_to_mass_conc
from autoprotocol_utilities import dna_mole_to_mass
from autoprotocol.unit import Unit
dna_length = 5000
dna_molarity = Unit(10, 'uM')
molar_to_mass_conc(dna_length, dna_molarity)
Returns:
.. code-block:: python
Unit(33000.0, 'nanogram / microliter')
Parameters
----------
length: int
Length of DNA in bp
molar: str, Unit
Molarity of DNA in prefix-M
ds: bool, optional
True for dsDNA, False for ssDNA
Returns
-------
mass_conc: Unit
Mass concentration of DNA in ng/uL
Raises
------
ValueError
If inputs are not of specified types
"""
if not isinstance(length, int):
raise ValueError(
"Length of DNA is of type %s, must be of type "
"integer" % type(length))
if isinstance(molar, str):
molar = Unit.fromstring(molar)
if not (isinstance(molar, Unit) and
str(molar.dimensionality) == '[substance] / [length] ** 3'):
raise ValueError(
"Molar concentration of DNA must be of type string or Unit")
if not isinstance(ds, bool):
raise ValueError(
"ds is of type %s, must be of type bool: True for dsDNA, "
"False for ssDNA" % type(ds))
dna_umole = Unit((molar / Unit(1, "M")).magnitude, "umol")
dna_ug = dna_mole_to_mass(length, dna_umole, ds)
mass_conc = Unit(dna_ug.magnitude * 1000, "ng/uL")
return mass_conc
def mass_conc_to_molar(length, mass_conc, ds=True):
"""
For the DNA mass concentration given, return the molarity of DNA
Example Usage:
.. code-block:: python
from autoprotocol_utilities import mass_conc_to_molar
from autoprotocol_utilities import dna_mass_to_mole
from autoprotocol.unit import Unit
dna_length = 5000
dna_mass_conc = Unit(33, 'ng/uL')
mass_conc_to_molar(dna_length, dna_mass_conc)
Returns:
.. code-block:: python
Unit(0.01, 'micromolar')
Parameters
----------
length: int
Length of DNA in bp
mass_conc: str, Unit
Mass concentration of DNA
ds: bool, optional
True for dsDNA, False for ssDNA
Returns
-------
molar: Unit
Molarity of DNA in uM
Raises
------
ValueError
If inputs are not of specified types
"""
if not isinstance(length, int):
raise ValueError(
"Length of DNA is of type %s, must be of type "
"integer" % type(length))
if isinstance(mass_conc, str):
mass_conc = Unit.fromstring(mass_conc)
if not isinstance(mass_conc, Unit) or \
str(mass_conc.dimensionality) != '[mass] / [length] ** 3':
raise ValueError("Mass concentration of DNA must be of type Unit")
if not isinstance(ds, bool):
raise ValueError(
"ds is of type %s, must be of type bool: True for dsDNA, "
"False for ssDNA" % type(ds))
dna_ng = Unit((mass_conc / Unit(1, "ng/uL")).magnitude, "ng")
dna_pmol = dna_mass_to_mole(length, dna_ng, ds)
dna_molar = Unit(round(dna_pmol.magnitude, 9), "uM")
return dna_molar
def ligation_insert_ng(plasmid_size, plasmid_mass,
insert_size, molar_ratio=1):
"""
For the plasmid size, plasmid amount, insert size, and molar ratio given,
return the mass of insert needed for ligation
Different from ligation_insert_volume: no insert concentration is
given -> returns mass of insert needed
Example Usage:
.. code-block:: python
from autoprotocol_utilities import ligation_insert_ng
from autoprotocol.unit import Unit
plasmid_size = 3000
plasmid_mass = Unit(100, 'ng')
insert_size = 48
ligation_insert_ng(plasmid_size, plasmid_mass, insert_size)
Returns:
.. code-block:: python
Unit(1.6, 'nanogram')
Parameters
----------
plasmid_size : int
Length of plasmid in bp.
insert_size: int
Length of insert in bp
plasmid_mass : str, Unit
Mass of plasmid in prefix-g
molar_ratio : int, float, string, optional
Ligation molar ratio of insert : vector. By default it is 1 : 1.
Generally ligations are tested at 1:3, 1:1, and 3:1
Returns
-------
insert_amount: Unit
Amount of insert solution needed in ng
Raises
------
ValueError
If wells are not of type list, WellGroup or Container
"""
# Check input types
if not isinstance(plasmid_size, int):
raise ValueError("Plasmid_size: must be an integer")
if not isinstance(insert_size, int):
raise ValueError("insert_size: must be an integer")
if type(molar_ratio) == str:
molar_ratio = float(
molar_ratio.split(":")[0]) / float(molar_ratio.split(":")[1])
if type(molar_ratio) not in (int, float):
raise ValueError(
"molar_ratio: must be an int, float, or string in the form "
"of int:int")
if isinstance(plasmid_mass, str):
plasmid_mass = Unit.fromstring(plasmid_mass)
if not (isinstance(plasmid_mass, Unit) and
str(plasmid_mass.dimensionality) == "[mass]"):
raise ValueError(
"Plasmid amount must be of type str or Unit in prefix-g")
length_ratio = float(insert_size) / float(plasmid_size)
plasmid_ng = plasmid_mass.to("ng")
insert_ng = plasmid_ng * length_ratio * molar_ratio
return insert_ng
def ligation_insert_volume(plasmid_size, plasmid_mass, insert_size,
insert_conc, ds=True, molar_ratio=1):
"""
For the plasmid size, plasmid amount, insert size, insert concentration,
and molar ratio given, return the volume of insert solution needed for
ligation
Different from ligation_insert_ng: insert concentration is given -> returns
volume of insert solution needed
Example Usage:
.. code-block:: python
from autoprotocol_utilities import ligation_insert_volume
from autoprotocol_utilities import molar_to_mass_conc
from autoprotocol.unit import Unit
plasmid_size = 3000
plasmid_mass = Unit(100, 'ng')
insert_size = 48
insert_conc = Unit(25, 'ng/uL')
ligation_insert_volume(plasmid_size, plasmid_mass, insert_size,
insert_conc)
Returns:
.. code-block:: python
Unit(0.064, 'microliter')
Parameters
----------
plasmid_size : int
Length of plasmid in bp.
plasmid_mass : str, Unit
Mass of plasmid in prefix-g
insert_size: int
Length of insert in bp
insert_conc: str, Unit
Molar or mass concentration of insert
ds: bool, optional
True for dsDNA, False for ssDNA
molar_ratio : int, float, string, optional
Ligation molar ratio of insert : vector.
Common ratios are 1:3, 1:1, and 3:1. 1:1 by default
Returns
-------
insert_amount: Unit
Volume of insert solution needed in uL
Raises
------
ValueError
If wells are not of type list, WellGroup or Container
"""
conc_dimension = ["[substance] / [length] ** 3", '[mass] / [length] ** 3']
# Check input types
if not isinstance(plasmid_size, int):
raise ValueError("Plasmid_size: must be an integer")
if isinstance(plasmid_mass, str):
plasmid_mass = Unit.fromstring(plasmid_mass)
if not isinstance(plasmid_mass, Unit) and \
str(plasmid_mass.dimensionality) == "[mass]":
raise ValueError(
"Plasmid mass must be of type str or Unit in prefix-g")
if not isinstance(insert_size, int):
raise ValueError("insert_size: must be an integer")
if isinstance(insert_conc, str):
insert_conc = Unit.fromstring(insert_conc)
if not (isinstance(insert_conc, Unit) and
str(insert_conc.dimensionality) in conc_dimension):
raise ValueError(
"Plasmid concentration must be of type Unit in prefix-M or "
"prefix-g / prefix-L ")
if not isinstance(ds, bool):
raise ValueError(
"ds is of type %s, must be of type bool: True for dsDNA, "
"False for ssDNA" % type(ds))
if type(molar_ratio) == str:
molar_ratio = float(
molar_ratio.split(":")[0]) / float(molar_ratio.split(":")[1])
if type(molar_ratio) not in (int, float):
raise ValueError(
"molar_ratio: must be an int, float, or string in the "
"form of int:int")
len_ratio = float(insert_size) / float(plasmid_size)
plasmid_ng = plasmid_mass.to("ng")
insert_ng = plasmid_ng * len_ratio * molar_ratio
# Convert concentration to ng/uL
if str(insert_conc.dimensionality) == conc_dimension[0]:
insert_conc = molar_to_mass_conc(insert_size, insert_conc, ds)
else:
insert_conc = insert_conc.to("ng/uL")
insert_vol = insert_ng / insert_conc
return insert_vol
def ligation_insert_amount(plasmid_size, plasmid_conc, plasmid_volume,
insert_size, insert_conc, ds=True, molar_ratio=1):
"""
For the plasmid size, plasmid concentration, insert size, insert
concentration, and molar ratio given,
return the volume of insert solution needed for ligation
Different form ligation_insert_volume: plasmid concentration and volume
are given instead of plasmid mass
Example Usage:
.. code-block:: python
from autoprotocol_utilities import ligation_insert_amount
from autoprotocol_utilities import molar_to_mass_conc
from autoprotocol.unit import Unit
plasmid_size = 2000
plasmid_conc = '1.5:uM'
plasmid_volume = Unit(10, 'uL')
insert_size = 25
insert_conc = Unit(10, 'ng/uL')
ligation_insert_amount(plasmid_size, plasmid_conc, plasmid_volume,
insert_size, insert_conc)
Returns:
.. code-block:: python
Unit(24.75, 'microliter')
Parameters
----------
plasmid_size : int
Length of plasmid in bp.
plasmid_conc : str, Unit
Molar or mass concentration of plasmid solution
plasmid_volume: str, Unit
Volume of plasmid solution in prefix-L
insert_size: int
Length of insert in bp
insert_conc : str, Unit
Molar or mass concentration of insert solution
ds: bool, optional
True for dsDNA, False for ssDNA
molar_ratio : int, float, string, optional
Ligation molar ratio of insert : vector.
Common ratios are 1:3, 1:1, and 3:1. 1:1 by default
Returns
-------
insert_amount: Unit
Volume of insert solution in uL
Raises
------
ValueError
If wells are not of type list, WellGroup or Container
"""
# Check input types
if not isinstance(plasmid_size, int):
raise ValueError("Plasmid_size: must be an integer")
if not isinstance(insert_size, int):
raise ValueError("insert_size: must be an integer")
if isinstance(plasmid_volume, str):
plasmid_volume = Unit.fromstring(plasmid_volume)
if not isinstance(plasmid_volume, Unit) or \
str(plasmid_volume.dimensionality) != "[length] ** 3":
raise ValueError(
"Volume of plasmid solution must be of type str or Unit")
conc_dimension = ["[substance] / [length] ** 3", '[mass] / [length] ** 3']
conc = [plasmid_conc, insert_conc]
size = [plasmid_size, insert_size]
for i in range(0, 2):
if isinstance(conc[i], str):
conc[i] = Unit.fromstring(conc[i])
if (isinstance(conc[i], Unit) and
str(conc[i].dimensionality) in conc_dimension):
# Convert all concentrations to ng/uL
if str(conc[i].dimensionality) == conc_dimension[0]:
conc[i] = molar_to_mass_conc(size[i], conc[i], ds)
else:
conc[i] = conc[i].to("ng/uL")
else:
raise ValueError(
"Concentration must be of type string or Unit ")
if not isinstance(ds, bool):
raise ValueError(
"ds is of type %s, must be of type bool: True for dsDNA, "
"False for ssDNA" % type(ds))
if type(molar_ratio) == str:
molar_ratio = float(
molar_ratio.split(":")[0]) / float(molar_ratio.split(":")[1])
if type(molar_ratio) not in (int, float):
raise ValueError(
"molar_ratio: must be an int, float, or string in the "
"form of int:int")
plasmid_conc = conc[0]
insert_conc = conc[1]
# Convert input volume to uL
plasmid_uL = Unit((plasmid_volume / Unit(1, "uL")).magnitude, "uL")
len_ratio = float(insert_size) / float(plasmid_size)
plasmid_ng = plasmid_conc * plasmid_uL
insert_ng = plasmid_ng * len_ratio * molar_ratio
insert_amount = insert_ng / insert_conc
return insert_amount
|
autoprotocol/autoprotocol-utilities
|
autoprotocol_utilities/bio_calculators.py
|
Python
|
bsd-3-clause
| 16,561
| 0
|
"""
Revision ID: 0356_add_webautn_auth_type
Revises: 0355_add_webauthn_table
Create Date: 2021-05-13 12:42:45.190269
"""
from alembic import op
revision = '0356_add_webautn_auth_type'
down_revision = '0355_add_webauthn_table'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("INSERT INTO auth_type VALUES ('webauthn_auth')")
op.drop_constraint('ck_users_mobile_or_email_auth', 'users', type_=None, schema=None)
op.execute("""
ALTER TABLE users ADD CONSTRAINT "ck_user_has_mobile_or_other_auth"
CHECK (auth_type in ('email_auth', 'webauthn_auth') or mobile_number is not null)
NOT VALID
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE users SET auth_type = 'sms_auth' WHERE auth_type = 'webauthn_auth'")
op.execute("UPDATE invited_users SET auth_type = 'sms_auth' WHERE auth_type = 'webauthn_auth'")
op.drop_constraint('ck_user_has_mobile_or_other_auth', 'users', type_=None, schema=None)
op.execute("""
ALTER TABLE users ADD CONSTRAINT "ck_users_mobile_or_email_auth"
CHECK (auth_type = 'email_auth' or mobile_number is not null)
NOT VALID
""")
op.execute("DELETE FROM auth_type WHERE name = 'webauthn_auth'")
# ### end Alembic commands ###
|
alphagov/notifications-api
|
migrations/versions/0356_add_webautn_auth_type.py
|
Python
|
mit
| 1,380
| 0.003623
|
import fileinput
import argparse
from astexport import __version__, __prog_name__
from astexport.parse import parse
from astexport.export import export_json
def create_parser():
parser = argparse.ArgumentParser(
prog=__prog_name__,
description="Python source code in, JSON AST out. (v{})".format(
__version__
)
)
parser.add_argument(
"-i", "--input",
default="-",
help="file to read from or '-' to use standard input (default)"
)
parser.add_argument(
"-p", "--pretty",
action="store_true",
help="print indented JSON"
)
parser.add_argument(
"-v", "--version",
action="store_true",
help="print version and exit"
)
return parser
def main():
"""Read source from stdin, parse and export the AST as JSON"""
parser = create_parser()
args = parser.parse_args()
if args.version:
print("{} version {}".format(__prog_name__, __version__))
return
source = "".join(fileinput.input(args.input))
tree = parse(source)
json = export_json(tree, args.pretty)
print(json)
|
fpoli/python-astexport
|
astexport/cli.py
|
Python
|
mit
| 1,148
| 0
|
from copy import deepcopy
from distutils.spawn import find_executable
class Settings(object):
_upload_limit = 0
def __init__(self, settings=None):
if settings:
self._upload_limit = settings.up_kbytes_sec
@property
def upload_limit(self):
""" Returns the value as required by the trickle command (i.e. in KBytes) """
return self._upload_limit
def upload_limit_in_kbytes(self, upload_limit):
self._upload_limit = upload_limit if upload_limit is not None else 0
def to_argument_list(self):
"""
converts the setting in a list as required by the trickle command
"""
return ["-u", self._upload_limit] if self._upload_limit != 0 else []
class TrickleBwShaper(object):
_trickle_cmd = "trickle"
"""
Helper class to handle trickle (http://linux.die.net/man/1/trickle) usage
"""
def __init__(self, settings):
self._settings = deepcopy(settings)
self._trickle_cmd = find_executable("trickle")
if self._trickle_cmd is None:
raise RuntimeError("Couldn't find 'trickle' program")
def wrap_call(self, call_cmd):
"""
"wraps" the call_cmd so it can be executed by subprocess.call (and related flavors) as "args" argument
:param call_cmd: original args like argument (string or sequence)
:return: a sequence with the original command "executed" under trickle
"""
if isinstance(call_cmd, basestring): # FIXME python 3 unsafe
call_cmd = [call_cmd]
return [self._trickle_cmd, "-s"] + self._settings.to_argument_list() + list(call_cmd)
|
MatiasSM/fcb
|
fcb/utils/trickle.py
|
Python
|
lgpl-3.0
| 1,657
| 0.001811
|
#!/usr/bin/env python
"""
simple example script for running notebooks and reporting exceptions.
Usage: `checkipnb.py foo.ipynb [bar.ipynb [...]]`
Each cell is submitted to the kernel, and checked for errors.
"""
import os
import glob
from runipy.notebook_runner import NotebookRunner
from pyfolio.utils import pyfolio_root
from pyfolio.ipycompat import read as read_notebook
def test_nbs():
path = os.path.join(pyfolio_root(), 'examples', '*.ipynb')
for ipynb in glob.glob(path):
with open(ipynb) as f:
nb = read_notebook(f, 'json')
nb_runner = NotebookRunner(nb)
nb_runner.run_notebook(skip_exceptions=False)
|
quantopian/pyfolio
|
pyfolio/tests/test_nbs.py
|
Python
|
apache-2.0
| 666
| 0
|
import sys
import traceback
import unittest
import unittest.mock
import rail
class TestIdentity(unittest.TestCase):
def test_returns_input_value(self):
value = unittest.mock.Mock()
self.assertEqual(value, rail.identity(value))
class TestNot(unittest.TestCase):
def test_returns_inverse_for_bool(self):
self.assertEquals(True, rail.not_(False))
self.assertEquals(False, rail.not_(True))
def test_returns_inverse_for_truthy(self):
self.assertEquals(True, rail.not_([]))
self.assertEquals(False, rail.not_([0]))
class TestRaise(unittest.TestCase):
def test_raises_exception(self):
with self.assertRaises(ValueError) as context:
rail.raise_(ValueError('exception'))
self.assertEqual('exception', str(context.exception))
def test_preserves_traceback_when_reraising_without_exception(self):
def func(exception):
raise exception
try:
try:
func(ValueError('exception'))
except ValueError:
expected_exc_info = sys.exc_info()
rail.raise_()
except ValueError:
actual_exc_info = sys.exc_info()
self.assertEqual(expected_exc_info[0], actual_exc_info[0])
self.assertEqual(expected_exc_info[1], actual_exc_info[1])
expected_tb = traceback.format_tb(expected_exc_info[2])
actual_tb = traceback.format_tb(actual_exc_info[2])
self.assertEqual(expected_tb, actual_tb[-len(expected_tb):])
def test_preserves_traceback_when_reraising_with_exception(self):
def func(exception):
raise exception
try:
try:
func(ValueError('exception'))
except ValueError as exception:
expected_exc_info = sys.exc_info()
rail.raise_(exception)
except ValueError:
actual_exc_info = sys.exc_info()
self.assertEqual(expected_exc_info[0], actual_exc_info[0])
self.assertEqual(expected_exc_info[1], actual_exc_info[1])
expected_tb = traceback.format_tb(expected_exc_info[2])
actual_tb = traceback.format_tb(actual_exc_info[2])
self.assertEqual(expected_tb, actual_tb[-len(expected_tb):])
class TestTry(unittest.TestCase):
def test_no_exception_raised(self):
input = unittest.mock.Mock()
expected_value = unittest.mock.Mock()
func = unittest.mock.Mock(return_value=expected_value)
handle = unittest.mock.Mock()
self.assertEqual(expected_value, rail.try_(func, handle)(input))
func.assert_called_once_with(input)
handle.assert_not_called()
def test_exception_raised(self):
input = unittest.mock.Mock()
exception = ValueError('value')
func = unittest.mock.Mock(side_effect=lambda _: rail.raise_(exception))
output = unittest.mock.Mock()
handle = unittest.mock.Mock(return_value=output)
self.assertEqual(output, rail.try_(func, handle)(input))
func.assert_called_once_with(input)
handle.assert_called_once_with(exception)
class TestMatch(unittest.TestCase):
def test_no_match_statements_provided(self):
value = unittest.mock.Mock()
with self.assertRaises(rail.UnmatchedValueError) as context:
rail.match()(value)
self.assertEqual(value, context.exception.value)
def test_value_unmatched_by_all_match_statements(self):
value = unittest.mock.Mock()
with self.assertRaises(rail.UnmatchedValueError) as context:
match = rail.match(
(lambda _: False, lambda _: unittest.mock.Mock()),
(lambda _: False, lambda _: unittest.mock.Mock()),
(lambda _: False, lambda _: unittest.mock.Mock())
)
match(value)
self.assertEqual(value, context.exception.value)
def test_value_matches_single_match_statement(self):
expected_value = unittest.mock.Mock()
match = rail.match(
(lambda _: False, lambda _: unittest.mock.Mock()),
(lambda _: True, lambda _: expected_value),
(lambda _: False, lambda _: unittest.mock.Mock())
)
self.assertEqual(expected_value, match(unittest.mock.Mock()))
def test_value_matches_multiple_match_statements(self):
expected_value = unittest.mock.Mock()
match = rail.match(
(lambda _: False, lambda _: unittest.mock.Mock()),
(lambda _: True, lambda _: expected_value),
(lambda _: True, lambda _: unittest.mock.Mock())
)
self.assertEqual(expected_value, match(unittest.mock.Mock()))
class TestMatchType(unittest.TestCase):
def test_no_match_statements_provided(self):
value = unittest.mock.Mock()
with self.assertRaises(rail.UnmatchedValueError) as context:
rail.match_type()(value)
self.assertEqual(value, context.exception.value)
def test_value_unmatched_by_all_match_statements(self):
value = unittest.mock.Mock()
with self.assertRaises(rail.UnmatchedValueError) as context:
match = rail.match_type(
(str, lambda _: unittest.mock.Mock()),
(float, lambda _: unittest.mock.Mock()),
(Exception, lambda _: unittest.mock.Mock())
)
match(value)
self.assertEqual(value, context.exception.value)
def test_value_matches_single_match_statement(self):
expected_value = unittest.mock.Mock()
match = rail.match_type(
(int, lambda _: unittest.mock.Mock()),
(unittest.mock.Mock, lambda _: expected_value),
(dict, lambda _: unittest.mock.Mock())
)
self.assertEqual(expected_value, match(unittest.mock.Mock()))
def test_value_matches_multiple_match_statements(self):
expected_value = unittest.mock.Mock()
match = rail.match_type(
(bool, lambda _: unittest.mock.Mock()),
(unittest.mock.Mock, lambda _: expected_value),
(unittest.mock.Mock, lambda _: unittest.mock.Mock())
)
self.assertEqual(expected_value, match(unittest.mock.Mock()))
def test_value_subclass_of_match_type(self):
expected_value = unittest.mock.Mock()
match = rail.match_type(
(bool, lambda _: unittest.mock.Mock()),
(object, lambda _: expected_value),
(unittest.mock.Mock, lambda _: unittest.mock.Mock())
)
self.assertEqual(expected_value, match(unittest.mock.Mock()))
class TestMatchLength(unittest.TestCase):
def test_no_match_statements_provided(self):
value = unittest.mock.Mock()
with self.assertRaises(rail.UnmatchedValueError) as context:
rail.match_length()(value)
self.assertEqual(value, context.exception.value)
def test_value_unmatched_by_all_match_statements(self):
value = unittest.mock.Mock()
value.__len__ = unittest.mock.Mock(return_value=2)
with self.assertRaises(rail.UnmatchedValueError) as context:
match = rail.match_length(
(rail.eq(8), lambda _: unittest.mock.Mock()),
(rail.gt(3), lambda _: unittest.mock.Mock())
)
match(value)
self.assertEqual(value, context.exception.value)
def test_value_matches_single_match_statement(self):
expected_value = unittest.mock.Mock()
match = rail.match_length(
(rail.lt(0), lambda _: unittest.mock.Mock()),
(rail.eq(0), lambda _: expected_value),
(rail.gt(0), lambda _: unittest.mock.Mock())
)
value = unittest.mock.Mock()
value.__len__ = unittest.mock.Mock(return_value=0)
self.assertEqual(expected_value, match(value))
def test_value_matches_multiple_match_statements(self):
expected_value = unittest.mock.Mock()
match = rail.match_length(
(rail.lt(0), lambda _: unittest.mock.Mock()),
(rail.ge(0), lambda _: expected_value),
(rail.eq(0), lambda _: unittest.mock.Mock())
)
value = unittest.mock.Mock()
value.__len__ = unittest.mock.Mock(return_value=0)
self.assertEqual(expected_value, match(value))
class TestPartial(unittest.TestCase):
def test_func_with_no_args(self):
@rail.partial
def func():
return 'value'
self.assertEqual('value', func())
def test_func_with_single_arg(self):
@rail.partial
def func(arg):
return arg
value = unittest.mock.Mock()
self.assertEqual(value, func(value))
def test_func_with_multiple_args(self):
@rail.partial
def func(arg1, arg2, arg3):
return arg1, arg2, arg3
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
self.assertEqual((val1, val2, val3), func(val1, val2, val3))
self.assertEqual((val1, val2, val3), func(val1)(val2, val3))
self.assertEqual((val1, val2, val3), func(val1, val2)(val3))
self.assertEqual((val1, val2, val3), func(val1)(val2)(val3))
def test_func_with_arguments_applied_out_of_order(self):
@rail.partial
def func(arg1, arg2, arg3):
return arg1, arg2, arg3
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
self.assertEqual((val1, val2, val3), func(arg2=val2)(val1, val3))
self.assertEqual((val1, val2, val3), func(arg3=val3)(val1, val2))
self.assertEqual(
(val1, val2, val3), func(arg2=val2, arg3=val3)(val1)
)
self.assertEqual(
(val1, val2, val3), func(arg3=val3)(arg2=val2)(val1)
)
self.assertEqual((val1, val2, val3), func(val1, arg3=val3)(val2))
def test_func_with_default_arguments(self):
@rail.partial
def func(arg1, arg2, arg3='val3', arg4='val4'):
return arg1, arg2, arg3, arg4
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
val4 = unittest.mock.Mock()
self.assertEqual((val1, val2, 'val3', 'val4'), func(val1, val2))
self.assertEqual((val1, val2, 'val3', 'val4'), func(val1)(val2))
self.assertEqual(
(val1, val2, val3, val4), func(val1, val2, val3, val4)
)
self.assertEqual(
(val1, val2, val3, val4), func(val1)(val2, val3, val4)
)
self.assertEqual(
(val1, val2, val3, val4), func(val1, arg3=val3)(val2, val4)
)
def test_func_with_default_arguments_only(self):
@rail.partial
def func(arg1='val1', arg2='val2'):
return arg1, arg2
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
self.assertEqual(('val1', 'val2'), func())
self.assertEqual((val1, 'val2'), func(val1))
self.assertEqual(('val1', val2), func(arg2=val2))
self.assertEqual((val1, val2), func(val1, val2))
def test_func_with_argument_list(self):
@rail.partial
def func(arg1, arg2, *args):
return (arg1, arg2) + args
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
val4 = unittest.mock.Mock()
self.assertEqual((val1, val2), func(val1, val2))
self.assertEqual((val1, val2), func(val1)(val2))
self.assertEqual(
(val1, val2, val3, val4), func(val1, val2, val3, val4)
)
self.assertEqual(
(val1, val2, val3, val4), func(val1)(val2, val3, val4)
)
def test_func_with_argument_list_only(self):
@rail.partial
def func(*args):
return args
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
self.assertEqual((), func())
self.assertEqual((val1,), func(val1))
self.assertEqual((val1, val2), func(val1, val2))
def test_func_with_keyword_arguments(self):
@rail.partial
def func(arg1, arg2, **kwargs):
return (arg1, arg2) + ((kwargs,) if kwargs else ())
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
val4 = unittest.mock.Mock()
self.assertEqual((val1, val2), func(val1, val2))
self.assertEqual((val1, val2), func(val1)(val2))
self.assertEqual(
(val1, val2, {'val3': val3, 'val4': val4}),
func(val1, val2, val3=val3, val4=val4)
)
self.assertEqual(
(val1, val2, {'val3': val3, 'val4': val4}),
func(val1, val3=val3)(val2, val4=val4)
)
def test_func_with_keyword_arguments_only(self):
@rail.partial
def func(**kwargs):
return kwargs
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
self.assertEqual({}, func())
self.assertEqual({'arg1': val1}, func(arg1=val1))
self.assertEqual(
{'arg1': val1, 'arg2': val2}, func(arg1=val1, arg2=val2)
)
def test_docstring_preserved(self):
@rail.partial
def func1(arg1, arg2):
"""Docstring for func"""
return arg1, arg2
self.assertEqual('Docstring for func', func1.__doc__)
func2 = func1(unittest.mock.Mock())
self.assertEqual('Docstring for func', func2.__doc__)
class TestCompose(unittest.TestCase):
def test_compose_with_no_funcs(self):
func = rail.compose()
value = unittest.mock.Mock()
self.assertEqual(value, func(value))
def test_compose_with_no_exception(self):
expected_value = unittest.mock.Mock()
func = rail.compose(
lambda value: expected_value
)
self.assertEqual(expected_value, func(unittest.mock.Mock()))
def test_compose_with_exception(self):
with self.assertRaises(ValueError) as context:
func = rail.compose(
lambda value: rail.raise_(ValueError('exception'))
)
func(unittest.mock.Mock())
self.assertEqual('exception', str(context.exception))
def test_compose_with_multiple_funcs(self):
return_value1 = unittest.mock.Mock()
return_value2 = unittest.mock.Mock()
return_value3 = unittest.mock.Mock()
func1 = unittest.mock.Mock(return_value=return_value1)
func2 = unittest.mock.Mock(return_value=return_value2)
func3 = unittest.mock.Mock(return_value=return_value3)
func = rail.compose(
func1,
func2,
func3
)
value = unittest.mock.Mock()
self.assertEqual(return_value3, func(value))
func1.assert_called_once_with(value)
func2.assert_called_once_with(return_value1)
func3.assert_called_once_with(return_value2)
class TestPipe(unittest.TestCase):
def test_pipe(self):
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
self.assertEqual(
(val1, val2, val3),
rail.pipe(
(val1,),
lambda val: val + (val2,),
lambda val: val + (val3,)
)
)
def test_use_pipe_to_create_scope(self):
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
self.assertEqual(
(val1, val2, val3),
rail.pipe(
(val1,),
lambda arg1: rail.pipe(
(val2,),
lambda arg2: arg1 + arg2,
lambda arg: arg + (val3,)
)
)
)
class TestTee(unittest.TestCase):
def test_with_multiple_funcs(self):
input = unittest.mock.Mock()
func1 = unittest.mock.Mock(return_value=unittest.mock.Mock())
func2 = unittest.mock.Mock(return_value=unittest.mock.Mock())
func3 = unittest.mock.Mock()
self.assertEqual(
input,
rail.pipe(input, rail.tee(func1, func2, func3))
)
func1.assert_called_once_with(input)
func2.assert_called_once_with(func1.return_value)
func3.assert_called_once_with(func2.return_value)
class TestCallWith(unittest.TestCase):
def test_calls_function_with_value(self):
value = unittest.mock.Mock()
func = unittest.mock.Mock()
rail.call_with(value, func)
func.assert_called_once_with(value)
def test_partial_application(self):
value = unittest.mock.Mock()
func = unittest.mock.Mock()
rail.pipe(func, rail.call_with(value))
def test_returns_func_return_value(self):
return_value = unittest.mock.Mock()
func = unittest.mock.Mock(return_value=return_value)
self.assertEqual(
return_value, rail.call_with(unittest.mock.Mock(), func)
)
class TestLt(unittest.TestCase):
def test_pipe_returns_true(self):
self.assertTrue(rail.pipe(5, rail.lt(7)))
def test_pipe_returns_false(self):
self.assertFalse(rail.pipe(8, rail.lt(1)))
class TestLe(unittest.TestCase):
def test_pipe_returns_true_for_different_values(self):
self.assertTrue(rail.pipe(5, rail.le(7)))
def test_pipe_returns_true_for_equal_values(self):
self.assertTrue(rail.pipe(5, rail.le(5)))
def test_pipe_returns_false(self):
self.assertFalse(rail.pipe(8, rail.le(1)))
class TestEq(unittest.TestCase):
def test_pipe_returns_true(self):
value = unittest.mock.Mock()
self.assertTrue(rail.pipe(value, rail.eq(value)))
def test_pipe_returns_false(self):
value1 = unittest.mock.Mock()
value2 = unittest.mock.Mock()
self.assertFalse(rail.pipe(value1, rail.eq(value2)))
class TestNe(unittest.TestCase):
def test_pipe_returns_true(self):
value1 = unittest.mock.Mock()
value2 = unittest.mock.Mock()
self.assertTrue(rail.pipe(value1, rail.ne(value2)))
def test_pipe_returns_false(self):
value = unittest.mock.Mock()
self.assertFalse(rail.pipe(value, rail.ne(value)))
class TestGt(unittest.TestCase):
def test_pipe_returns_true(self):
self.assertTrue(rail.pipe(4, rail.gt(0)))
def test_pipe_returns_false(self):
self.assertFalse(rail.pipe(13, rail.gt(15)))
class TestGe(unittest.TestCase):
def test_pipe_returns_true_for_different_values(self):
self.assertTrue(rail.pipe(6, rail.ge(2)))
def test_pipe_returns_true_for_equal_values(self):
self.assertTrue(rail.pipe(4, rail.ge(4)))
def test_pipe_returns_false(self):
self.assertFalse(rail.pipe(6, rail.ge(9)))
class TestTrack(unittest.TestCase):
def test_compose_with_existing_func(self):
return_value1 = unittest.mock.Mock()
return_value2 = unittest.mock.Mock()
return_value3 = unittest.mock.Mock()
func1 = unittest.mock.Mock(return_value=return_value1)
func2 = unittest.mock.Mock(return_value=return_value2)
func3 = unittest.mock.Mock(return_value=return_value3)
func = rail.Track().compose(
func1
).compose(
func2,
func3
)
value = unittest.mock.Mock()
self.assertEqual(return_value3, func(value))
func1.assert_called_once_with(value)
func2.assert_called_once_with(return_value1)
func3.assert_called_once_with(return_value2)
def test_tee_called_consecutively(self):
func1 = unittest.mock.Mock()
func2 = unittest.mock.Mock()
func = rail.Track().tee(
func1
).tee(
func2
)
value = unittest.mock.Mock()
self.assertEqual(value, func(value))
func1.assert_called_once_with(value)
func2.assert_called_once_with(value)
def test_fold_with_no_exception(self):
expected_value = unittest.mock.Mock()
func = rail.Track().compose(
lambda value: unittest.mock.Mock()
).fold(
lambda value: expected_value,
lambda exception: self.fail()
)
self.assertEqual(expected_value, func(unittest.mock.Mock()))
def test_fold_with_exception(self):
expected_exception = KeyError('key')
actual_exception = rail.pipe(
unittest.mock.Mock(),
rail.Track().compose(
lambda _: rail.raise_(expected_exception)
).fold(
lambda _: self.fail(),
rail.identity
)
)
self.assertEqual(expected_exception, actual_exception)
def test_fold_traceback_with_exception(self):
exception = KeyError('key')
func = rail.Track().compose(
lambda _: rail.raise_(exception)
)
try:
func(unittest.mock.Mock())
except KeyError:
expected_exc_info = sys.exc_info()
try:
rail.pipe(
unittest.mock.Mock(),
func.fold(
lambda _: self.fail(),
rail.raise_
)
)
except KeyError:
actual_exc_info = sys.exc_info()
self.assertEqual(expected_exc_info[0], actual_exc_info[0])
self.assertEqual(expected_exc_info[1], actual_exc_info[1])
expected_tb = traceback.format_tb(expected_exc_info[2])
actual_tb = traceback.format_tb(actual_exc_info[2])
self.assertEqual(expected_tb, actual_tb[-len(expected_tb):])
def test_handle_with_multiple_funcs(self):
expected_exception = ValueError('value')
func = rail.Track().compose(
lambda value: rail.raise_(ValueError('value'))
).handle(
lambda exception: unittest.mock.Mock(),
lambda exception: expected_exception
)
self.assertEqual(expected_exception, func(unittest.mock.Mock()))
def test_handle_with_no_exception(self):
expected_value = unittest.mock.Mock()
func = rail.Track().compose(
lambda value: expected_value
).handle(
lambda exception: self.fail()
)
self.assertEqual(expected_value, func(unittest.mock.Mock()))
def test_handle_with_exception(self):
expected_exception = KeyError('key')
actual_exception = rail.pipe(
unittest.mock.Mock(),
rail.Track().compose(
lambda _: rail.raise_(expected_exception)
).handle(
rail.identity
)
)
self.assertEqual(expected_exception, actual_exception)
def test_handle_traceback_with_exception(self):
exception = KeyError('key')
func = rail.Track().compose(
lambda _: rail.raise_(exception)
)
try:
func(unittest.mock.Mock())
except KeyError:
expected_exc_info = sys.exc_info()
try:
rail.pipe(
unittest.mock.Mock(),
func.handle(
rail.raise_
)
)
except KeyError:
actual_exc_info = sys.exc_info()
self.assertEqual(expected_exc_info[0], actual_exc_info[0])
self.assertEqual(expected_exc_info[1], actual_exc_info[1])
expected_tb = traceback.format_tb(expected_exc_info[2])
actual_tb = traceback.format_tb(actual_exc_info[2])
self.assertEqual(expected_tb, actual_tb[-len(expected_tb):])
if __name__ == '__main__':
unittest.main()
|
rob-earwaker/rail
|
test_rail.py
|
Python
|
mit
| 23,712
| 0
|
ad = 1.
# this statement will be ignored at the codegen
x = ad is None
|
ratnania/pyccel
|
tests/warnings/codegen/is.py
|
Python
|
mit
| 72
| 0
|
import json
from django.utils import unittest
from django.test.client import RequestFactory
from formalizr.tests.views import SimpleFormView, SimpleCreateView, SimpleUpdateView
from formalizr.tests.models import SimpleModel
class AjaxFormViewTest(unittest.TestCase):
view_class = SimpleFormView
VALUE = 1
def setUp(self):
self.factory = RequestFactory()
SimpleModel.objects.all().delete()
def testRequest(self):
"""
Posts valid form in normal way
"""
data = {"value": AjaxFormViewTest.VALUE}
request = self.factory.post('/', data)
response = self.view_class.as_view()(request)
self.assertEqual(302, response.status_code)
self.assertEqual(self.view_class.success_url, response["location"])
def testNotValid(self):
"""
Posts not valid form in normal way
"""
data = {}
request = self.factory.post('/', data)
response = self.view_class.as_view()(request)
self.assertEqual(200, response.status_code)
self.assertIn("value", response.context_data["form"].errors)
def testAjaxRequest(self):
"""
Posts valid form through AJAX request.
Response with redirect must be in JSON.
"""
data = {"value": AjaxFormViewTest.VALUE}
request = self.factory.post('/', data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = self.view_class.as_view()(request)
self.assertEqual(200, response.status_code)
self.assertEqual('application/json', response['content-type'].split(';')[0])
resp = json.loads(response.content)
self.assertEqual("redirect", resp["status"])
self.assertEqual(self.view_class.success_url, resp["location"])
return resp
def testAjaxNotValid(self):
"""
Posts not valid form through AJAX request.
Response with errors must be in JSON.
"""
data = {}
request = self.factory.post('/', data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = self.view_class.as_view()(request)
self.assertEqual(400, response.status_code)
self.assertEqual('application/json', response['content-type'].split(';')[0])
resp = json.loads(response.content)
self.assertEqual("error", resp["status"])
self.assertIn("value", resp["errors"])
return resp
def testAjaxResultRequest(self):
"""
Posts valid form through AJAX request.
Response with result must be in JSON.
"""
data = {"value": AjaxFormViewTest.VALUE, "_return": "result"}
request = self.factory.post('/', data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = self.view_class.as_view()(request)
self.assertEqual(200, response.status_code)
self.assertEqual('application/json', response['content-type'].split(';')[0])
resp = json.loads(response.content)
self.assertEqual("success", resp["status"])
return resp
class AjaxCreateViewTest(AjaxFormViewTest):
view_class = SimpleCreateView
def testRequest(self):
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 0)
super(AjaxCreateViewTest, self).testRequest()
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 1)
def testAjaxRequest(self):
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 0)
super(AjaxCreateViewTest, self).testAjaxRequest()
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 1)
def testAjaxResultRequest(self):
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 0)
resp = super(AjaxCreateViewTest, self).testAjaxResultRequest()
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 1)
self.assertIn("pk", resp["object"])
obj = SimpleModel.objects.get(pk=resp["object"]["pk"])
self.assertEqual(AjaxFormViewTest.VALUE, obj.value)
class AjaxUpdateViewTest(AjaxCreateViewTest):
view_class = SimpleUpdateView
def setUp(self):
super(AjaxUpdateViewTest, self).setUp()
SimpleModel.objects.filter(value=SimpleUpdateView.VALUE).delete()
SimpleModel(value=SimpleUpdateView.VALUE).save()
|
krasnoperov/django-formalizr
|
formalizr/tests/tests.py
|
Python
|
bsd-3-clause
| 4,412
| 0.002947
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 2016. 10. 11.
@author: "comfact"
'''
import yaml
from ansible.module_utils.basic import *
from acidipy import deployACI
DOCUMENTATION = '''
---
module: acibuilder
version_added: historical
short_description: acidipy ansible module.
description:
- This is Acidipy Ansible Module named AciBuilder
options: {}
author: hyjang@cisco.com
'''
EXAMPLES = '''
# Test 'webservers' status
ansible webservers -m ping
'''
def main():
module = AnsibleModule(
argument_spec = dict(
Controller=dict(required=True),
Option=dict(required=True),
Tenant=dict(required=True)
),
supports_check_mode = True
)
ctrl = yaml.load(module.params['Controller'])
opts = yaml.load(module.params['Option'])
tnts = yaml.load(module.params['Tenant'])
desc = {'Controller' : ctrl, 'Option' : opts, 'Tenant' : tnts}
result = deployACI(desc)
module.exit_json(**result)
main()
|
HyechurnJang/acidipy
|
tools/ansible/acibuilder.py
|
Python
|
apache-2.0
| 1,055
| 0.010427
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GenericCommandDialog.ui'
#
# Created: Wed Mar 25 17:15:12 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_GenericCommandDialog(object):
def setupUi(self, GenericCommandDialog):
GenericCommandDialog.setObjectName(_fromUtf8("GenericCommandDialog"))
GenericCommandDialog.resize(549, 504)
self.verticalLayout_6 = QtGui.QVBoxLayout(GenericCommandDialog)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.subSystemCommandPageLabel = QtGui.QLabel(GenericCommandDialog)
self.subSystemCommandPageLabel.setAlignment(QtCore.Qt.AlignCenter)
self.subSystemCommandPageLabel.setObjectName(_fromUtf8("subSystemCommandPageLabel"))
self.verticalLayout.addWidget(self.subSystemCommandPageLabel)
self.subSystemTextBrowser = QtGui.QTextBrowser(GenericCommandDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.subSystemTextBrowser.sizePolicy().hasHeightForWidth())
self.subSystemTextBrowser.setSizePolicy(sizePolicy)
self.subSystemTextBrowser.setMinimumSize(QtCore.QSize(159, 31))
self.subSystemTextBrowser.setMaximumSize(QtCore.QSize(300, 31))
self.subSystemTextBrowser.setObjectName(_fromUtf8("subSystemTextBrowser"))
self.verticalLayout.addWidget(self.subSystemTextBrowser)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.packetIdLabel = QtGui.QLabel(GenericCommandDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.packetIdLabel.sizePolicy().hasHeightForWidth())
self.packetIdLabel.setSizePolicy(sizePolicy)
self.packetIdLabel.setMinimumSize(QtCore.QSize(0, 13))
self.packetIdLabel.setMaximumSize(QtCore.QSize(16777193, 13))
self.packetIdLabel.setObjectName(_fromUtf8("packetIdLabel"))
self.verticalLayout_2.addWidget(self.packetIdLabel)
self.packetId = QtGui.QLCDNumber(GenericCommandDialog)
self.packetId.setObjectName(_fromUtf8("packetId"))
self.verticalLayout_2.addWidget(self.packetId)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label_5 = QtGui.QLabel(GenericCommandDialog)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout_3.addWidget(self.label_5)
self.commandAddressLineEdit = QtGui.QLineEdit(GenericCommandDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.commandAddressLineEdit.sizePolicy().hasHeightForWidth())
self.commandAddressLineEdit.setSizePolicy(sizePolicy)
self.commandAddressLineEdit.setMinimumSize(QtCore.QSize(135, 31))
self.commandAddressLineEdit.setMaximumSize(QtCore.QSize(135, 31))
self.commandAddressLineEdit.setObjectName(_fromUtf8("commandAddressLineEdit"))
self.verticalLayout_3.addWidget(self.commandAddressLineEdit)
self.horizontalLayout.addLayout(self.verticalLayout_3)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.buttonBox = QtGui.QDialogButtonBox(GenericCommandDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.horizontalLayout.addWidget(self.buttonBox)
self.verticalLayout_4.addLayout(self.horizontalLayout)
self.label = QtGui.QLabel(GenericCommandDialog)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_4.addWidget(self.label)
self.verticalLayout_6.addLayout(self.verticalLayout_4)
self.scrollArea = QtGui.QScrollArea(GenericCommandDialog)
self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 508, 1000))
self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 1000))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.itemLabelTextBrowser_1 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_1.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_1.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_1.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_1.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_1.setObjectName(_fromUtf8("itemLabelTextBrowser_1"))
self.horizontalLayout_2.addWidget(self.itemLabelTextBrowser_1)
self.SendButton_1 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_1.sizePolicy().hasHeightForWidth())
self.SendButton_1.setSizePolicy(sizePolicy)
self.SendButton_1.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_1.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_1.setObjectName(_fromUtf8("SendButton_1"))
self.horizontalLayout_2.addWidget(self.SendButton_1)
self.verticalLayout_5.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.itemLabelTextBrowser_2 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_2.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_2.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_2.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_2.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_2.setObjectName(_fromUtf8("itemLabelTextBrowser_2"))
self.horizontalLayout_3.addWidget(self.itemLabelTextBrowser_2)
self.SendButton_2 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_2.sizePolicy().hasHeightForWidth())
self.SendButton_2.setSizePolicy(sizePolicy)
self.SendButton_2.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_2.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_2.setObjectName(_fromUtf8("SendButton_2"))
self.horizontalLayout_3.addWidget(self.SendButton_2)
self.verticalLayout_5.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.itemLabelTextBrowser_3 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_3.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_3.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_3.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_3.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_3.setObjectName(_fromUtf8("itemLabelTextBrowser_3"))
self.horizontalLayout_4.addWidget(self.itemLabelTextBrowser_3)
self.SendButton_3 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_3.sizePolicy().hasHeightForWidth())
self.SendButton_3.setSizePolicy(sizePolicy)
self.SendButton_3.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_3.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_3.setObjectName(_fromUtf8("SendButton_3"))
self.horizontalLayout_4.addWidget(self.SendButton_3)
self.verticalLayout_5.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.itemLabelTextBrowser_4 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_4.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_4.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_4.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_4.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_4.setObjectName(_fromUtf8("itemLabelTextBrowser_4"))
self.horizontalLayout_5.addWidget(self.itemLabelTextBrowser_4)
self.SendButton_4 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_4.sizePolicy().hasHeightForWidth())
self.SendButton_4.setSizePolicy(sizePolicy)
self.SendButton_4.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_4.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_4.setObjectName(_fromUtf8("SendButton_4"))
self.horizontalLayout_5.addWidget(self.SendButton_4)
self.verticalLayout_5.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.itemLabelTextBrowser_5 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_5.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_5.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_5.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_5.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_5.setObjectName(_fromUtf8("itemLabelTextBrowser_5"))
self.horizontalLayout_6.addWidget(self.itemLabelTextBrowser_5)
self.SendButton_5 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_5.sizePolicy().hasHeightForWidth())
self.SendButton_5.setSizePolicy(sizePolicy)
self.SendButton_5.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_5.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_5.setObjectName(_fromUtf8("SendButton_5"))
self.horizontalLayout_6.addWidget(self.SendButton_5)
self.verticalLayout_5.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.itemLabelTextBrowser_6 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_6.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_6.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_6.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_6.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_6.setObjectName(_fromUtf8("itemLabelTextBrowser_6"))
self.horizontalLayout_7.addWidget(self.itemLabelTextBrowser_6)
self.SendButton_6 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_6.sizePolicy().hasHeightForWidth())
self.SendButton_6.setSizePolicy(sizePolicy)
self.SendButton_6.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_6.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_6.setObjectName(_fromUtf8("SendButton_6"))
self.horizontalLayout_7.addWidget(self.SendButton_6)
self.verticalLayout_5.addLayout(self.horizontalLayout_7)
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.itemLabelTextBrowser_7 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_7.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_7.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_7.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_7.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_7.setObjectName(_fromUtf8("itemLabelTextBrowser_7"))
self.horizontalLayout_8.addWidget(self.itemLabelTextBrowser_7)
self.SendButton_7 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_7.sizePolicy().hasHeightForWidth())
self.SendButton_7.setSizePolicy(sizePolicy)
self.SendButton_7.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_7.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_7.setObjectName(_fromUtf8("SendButton_7"))
self.horizontalLayout_8.addWidget(self.SendButton_7)
self.verticalLayout_5.addLayout(self.horizontalLayout_8)
self.horizontalLayout_9 = QtGui.QHBoxLayout()
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.itemLabelTextBrowser_8 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_8.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_8.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_8.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_8.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_8.setObjectName(_fromUtf8("itemLabelTextBrowser_8"))
self.horizontalLayout_9.addWidget(self.itemLabelTextBrowser_8)
self.SendButton_8 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_8.sizePolicy().hasHeightForWidth())
self.SendButton_8.setSizePolicy(sizePolicy)
self.SendButton_8.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_8.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_8.setObjectName(_fromUtf8("SendButton_8"))
self.horizontalLayout_9.addWidget(self.SendButton_8)
self.verticalLayout_5.addLayout(self.horizontalLayout_9)
self.horizontalLayout_10 = QtGui.QHBoxLayout()
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.itemLabelTextBrowser_9 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_9.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_9.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_9.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_9.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_9.setObjectName(_fromUtf8("itemLabelTextBrowser_9"))
self.horizontalLayout_10.addWidget(self.itemLabelTextBrowser_9)
self.SendButton_9 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_9.sizePolicy().hasHeightForWidth())
self.SendButton_9.setSizePolicy(sizePolicy)
self.SendButton_9.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_9.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_9.setObjectName(_fromUtf8("SendButton_9"))
self.horizontalLayout_10.addWidget(self.SendButton_9)
self.verticalLayout_5.addLayout(self.horizontalLayout_10)
self.horizontalLayout_11 = QtGui.QHBoxLayout()
self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
self.itemLabelTextBrowser_10 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_10.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_10.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_10.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_10.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_10.setObjectName(_fromUtf8("itemLabelTextBrowser_10"))
self.horizontalLayout_11.addWidget(self.itemLabelTextBrowser_10)
self.SendButton_10 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_10.sizePolicy().hasHeightForWidth())
self.SendButton_10.setSizePolicy(sizePolicy)
self.SendButton_10.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_10.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_10.setObjectName(_fromUtf8("SendButton_10"))
self.horizontalLayout_11.addWidget(self.SendButton_10)
self.verticalLayout_5.addLayout(self.horizontalLayout_11)
self.horizontalLayout_12 = QtGui.QHBoxLayout()
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.itemLabelTextBrowser_11 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_11.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_11.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_11.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_11.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_11.setObjectName(_fromUtf8("itemLabelTextBrowser_11"))
self.horizontalLayout_12.addWidget(self.itemLabelTextBrowser_11)
self.SendButton_11 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_11.sizePolicy().hasHeightForWidth())
self.SendButton_11.setSizePolicy(sizePolicy)
self.SendButton_11.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_11.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_11.setObjectName(_fromUtf8("SendButton_11"))
self.horizontalLayout_12.addWidget(self.SendButton_11)
self.verticalLayout_5.addLayout(self.horizontalLayout_12)
self.horizontalLayout_13 = QtGui.QHBoxLayout()
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.itemLabelTextBrowser_12 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_12.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_12.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_12.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_12.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_12.setObjectName(_fromUtf8("itemLabelTextBrowser_12"))
self.horizontalLayout_13.addWidget(self.itemLabelTextBrowser_12)
self.SendButton_12 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_12.sizePolicy().hasHeightForWidth())
self.SendButton_12.setSizePolicy(sizePolicy)
self.SendButton_12.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_12.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_12.setObjectName(_fromUtf8("SendButton_12"))
self.horizontalLayout_13.addWidget(self.SendButton_12)
self.verticalLayout_5.addLayout(self.horizontalLayout_13)
self.horizontalLayout_14 = QtGui.QHBoxLayout()
self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14"))
self.itemLabelTextBrowser_13 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_13.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_13.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_13.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_13.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_13.setObjectName(_fromUtf8("itemLabelTextBrowser_13"))
self.horizontalLayout_14.addWidget(self.itemLabelTextBrowser_13)
self.SendButton_13 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_13.sizePolicy().hasHeightForWidth())
self.SendButton_13.setSizePolicy(sizePolicy)
self.SendButton_13.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_13.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_13.setObjectName(_fromUtf8("SendButton_13"))
self.horizontalLayout_14.addWidget(self.SendButton_13)
self.verticalLayout_5.addLayout(self.horizontalLayout_14)
self.horizontalLayout_15 = QtGui.QHBoxLayout()
self.horizontalLayout_15.setObjectName(_fromUtf8("horizontalLayout_15"))
self.itemLabelTextBrowser_14 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_14.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_14.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_14.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_14.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_14.setObjectName(_fromUtf8("itemLabelTextBrowser_14"))
self.horizontalLayout_15.addWidget(self.itemLabelTextBrowser_14)
self.SendButton_14 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_14.sizePolicy().hasHeightForWidth())
self.SendButton_14.setSizePolicy(sizePolicy)
self.SendButton_14.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_14.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_14.setObjectName(_fromUtf8("SendButton_14"))
self.horizontalLayout_15.addWidget(self.SendButton_14)
self.verticalLayout_5.addLayout(self.horizontalLayout_15)
self.horizontalLayout_16 = QtGui.QHBoxLayout()
self.horizontalLayout_16.setObjectName(_fromUtf8("horizontalLayout_16"))
self.itemLabelTextBrowser_15 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_15.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_15.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_15.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_15.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_15.setObjectName(_fromUtf8("itemLabelTextBrowser_15"))
self.horizontalLayout_16.addWidget(self.itemLabelTextBrowser_15)
self.SendButton_15 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_15.sizePolicy().hasHeightForWidth())
self.SendButton_15.setSizePolicy(sizePolicy)
self.SendButton_15.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_15.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_15.setObjectName(_fromUtf8("SendButton_15"))
self.horizontalLayout_16.addWidget(self.SendButton_15)
self.verticalLayout_5.addLayout(self.horizontalLayout_16)
self.horizontalLayout_17 = QtGui.QHBoxLayout()
self.horizontalLayout_17.setObjectName(_fromUtf8("horizontalLayout_17"))
self.itemLabelTextBrowser_16 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_16.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_16.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_16.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_16.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_16.setObjectName(_fromUtf8("itemLabelTextBrowser_16"))
self.horizontalLayout_17.addWidget(self.itemLabelTextBrowser_16)
self.SendButton_16 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_16.sizePolicy().hasHeightForWidth())
self.SendButton_16.setSizePolicy(sizePolicy)
self.SendButton_16.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_16.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_16.setObjectName(_fromUtf8("SendButton_16"))
self.horizontalLayout_17.addWidget(self.SendButton_16)
self.verticalLayout_5.addLayout(self.horizontalLayout_17)
self.horizontalLayout_18 = QtGui.QHBoxLayout()
self.horizontalLayout_18.setObjectName(_fromUtf8("horizontalLayout_18"))
self.itemLabelTextBrowser_17 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_17.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_17.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_17.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_17.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_17.setObjectName(_fromUtf8("itemLabelTextBrowser_17"))
self.horizontalLayout_18.addWidget(self.itemLabelTextBrowser_17)
self.SendButton_17 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_17.sizePolicy().hasHeightForWidth())
self.SendButton_17.setSizePolicy(sizePolicy)
self.SendButton_17.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_17.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_17.setObjectName(_fromUtf8("SendButton_17"))
self.horizontalLayout_18.addWidget(self.SendButton_17)
self.verticalLayout_5.addLayout(self.horizontalLayout_18)
self.horizontalLayout_19 = QtGui.QHBoxLayout()
self.horizontalLayout_19.setObjectName(_fromUtf8("horizontalLayout_19"))
self.itemLabelTextBrowser_18 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_18.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_18.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_18.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_18.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_18.setObjectName(_fromUtf8("itemLabelTextBrowser_18"))
self.horizontalLayout_19.addWidget(self.itemLabelTextBrowser_18)
self.SendButton_18 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_18.sizePolicy().hasHeightForWidth())
self.SendButton_18.setSizePolicy(sizePolicy)
self.SendButton_18.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_18.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_18.setObjectName(_fromUtf8("SendButton_18"))
self.horizontalLayout_19.addWidget(self.SendButton_18)
self.verticalLayout_5.addLayout(self.horizontalLayout_19)
self.horizontalLayout_20 = QtGui.QHBoxLayout()
self.horizontalLayout_20.setObjectName(_fromUtf8("horizontalLayout_20"))
self.itemLabelTextBrowser_19 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_19.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_19.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_19.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_19.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_19.setObjectName(_fromUtf8("itemLabelTextBrowser_19"))
self.horizontalLayout_20.addWidget(self.itemLabelTextBrowser_19)
self.SendButton_19 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_19.sizePolicy().hasHeightForWidth())
self.SendButton_19.setSizePolicy(sizePolicy)
self.SendButton_19.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_19.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_19.setObjectName(_fromUtf8("SendButton_19"))
self.horizontalLayout_20.addWidget(self.SendButton_19)
self.verticalLayout_5.addLayout(self.horizontalLayout_20)
self.horizontalLayout_21 = QtGui.QHBoxLayout()
self.horizontalLayout_21.setObjectName(_fromUtf8("horizontalLayout_21"))
self.itemLabelTextBrowser_20 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_20.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_20.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_20.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_20.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_20.setObjectName(_fromUtf8("itemLabelTextBrowser_20"))
self.horizontalLayout_21.addWidget(self.itemLabelTextBrowser_20)
self.SendButton_20 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_20.sizePolicy().hasHeightForWidth())
self.SendButton_20.setSizePolicy(sizePolicy)
self.SendButton_20.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_20.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_20.setObjectName(_fromUtf8("SendButton_20"))
self.horizontalLayout_21.addWidget(self.SendButton_20)
self.verticalLayout_5.addLayout(self.horizontalLayout_21)
self.horizontalLayout_22 = QtGui.QHBoxLayout()
self.horizontalLayout_22.setObjectName(_fromUtf8("horizontalLayout_22"))
self.itemLabelTextBrowser_21 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_21.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_21.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_21.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_21.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_21.setObjectName(_fromUtf8("itemLabelTextBrowser_21"))
self.horizontalLayout_22.addWidget(self.itemLabelTextBrowser_21)
self.SendButton_21 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_21.sizePolicy().hasHeightForWidth())
self.SendButton_21.setSizePolicy(sizePolicy)
self.SendButton_21.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_21.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_21.setObjectName(_fromUtf8("SendButton_21"))
self.horizontalLayout_22.addWidget(self.SendButton_21)
self.verticalLayout_5.addLayout(self.horizontalLayout_22)
self.horizontalLayout_23 = QtGui.QHBoxLayout()
self.horizontalLayout_23.setObjectName(_fromUtf8("horizontalLayout_23"))
self.itemLabelTextBrowser_22 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_22.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_22.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_22.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_22.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_22.setObjectName(_fromUtf8("itemLabelTextBrowser_22"))
self.horizontalLayout_23.addWidget(self.itemLabelTextBrowser_22)
self.SendButton_22 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_22.sizePolicy().hasHeightForWidth())
self.SendButton_22.setSizePolicy(sizePolicy)
self.SendButton_22.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_22.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_22.setObjectName(_fromUtf8("SendButton_22"))
self.horizontalLayout_23.addWidget(self.SendButton_22)
self.verticalLayout_5.addLayout(self.horizontalLayout_23)
self.horizontalLayout_24 = QtGui.QHBoxLayout()
self.horizontalLayout_24.setObjectName(_fromUtf8("horizontalLayout_24"))
self.itemLabelTextBrowser_23 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_23.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_23.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_23.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_23.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_23.setObjectName(_fromUtf8("itemLabelTextBrowser_23"))
self.horizontalLayout_24.addWidget(self.itemLabelTextBrowser_23)
self.SendButton_23 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_23.sizePolicy().hasHeightForWidth())
self.SendButton_23.setSizePolicy(sizePolicy)
self.SendButton_23.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_23.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_23.setObjectName(_fromUtf8("SendButton_23"))
self.horizontalLayout_24.addWidget(self.SendButton_23)
self.verticalLayout_5.addLayout(self.horizontalLayout_24)
self.horizontalLayout_25 = QtGui.QHBoxLayout()
self.horizontalLayout_25.setObjectName(_fromUtf8("horizontalLayout_25"))
self.itemLabelTextBrowser_24 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_24.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_24.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_24.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_24.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_24.setObjectName(_fromUtf8("itemLabelTextBrowser_24"))
self.horizontalLayout_25.addWidget(self.itemLabelTextBrowser_24)
self.SendButton_24 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_24.sizePolicy().hasHeightForWidth())
self.SendButton_24.setSizePolicy(sizePolicy)
self.SendButton_24.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_24.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_24.setObjectName(_fromUtf8("SendButton_24"))
self.horizontalLayout_25.addWidget(self.SendButton_24)
self.verticalLayout_5.addLayout(self.horizontalLayout_25)
self.horizontalLayout_26 = QtGui.QHBoxLayout()
self.horizontalLayout_26.setObjectName(_fromUtf8("horizontalLayout_26"))
self.itemLabelTextBrowser_25 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_25.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_25.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_25.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_25.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_25.setObjectName(_fromUtf8("itemLabelTextBrowser_25"))
self.horizontalLayout_26.addWidget(self.itemLabelTextBrowser_25)
self.SendButton_25 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_25.sizePolicy().hasHeightForWidth())
self.SendButton_25.setSizePolicy(sizePolicy)
self.SendButton_25.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_25.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_25.setObjectName(_fromUtf8("SendButton_25"))
self.horizontalLayout_26.addWidget(self.SendButton_25)
self.verticalLayout_5.addLayout(self.horizontalLayout_26)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_6.addWidget(self.scrollArea)
self.retranslateUi(GenericCommandDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("clicked(QAbstractButton*)")), GenericCommandDialog.close)
QtCore.QMetaObject.connectSlotsByName(GenericCommandDialog)
def retranslateUi(self, GenericCommandDialog):
GenericCommandDialog.setWindowTitle(_translate("GenericCommandDialog", "Command Page", None))
self.subSystemCommandPageLabel.setText(_translate("GenericCommandDialog", "Subsystem", None))
self.packetIdLabel.setText(_translate("GenericCommandDialog", "Packet ID", None))
self.label_5.setText(_translate("GenericCommandDialog", "Send To:", None))
self.label.setText(_translate("GenericCommandDialog", "Command", None))
self.SendButton_1.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_2.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_3.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_4.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_5.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_6.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_7.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_8.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_9.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_10.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_11.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_12.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_13.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_14.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_15.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_16.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_17.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_18.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_19.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_20.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_21.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_22.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_23.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_24.setText(_translate("GenericCommandDialog", "Send", None))
self.SendButton_25.setText(_translate("GenericCommandDialog", "Send", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
GenericCommandDialog = QtGui.QWidget()
ui = Ui_GenericCommandDialog()
ui.setupUi(GenericCommandDialog)
GenericCommandDialog.show()
sys.exit(app.exec_())
|
CACTUS-Mission/TRAPSat
|
TRAPSat_cFS/cfs/cfe/tools/cFS-GroundSystem/Subsystems/cmdGui/GenericCommandDialog.py
|
Python
|
mit
| 49,087
| 0.004339
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from contextlib import contextmanager
from textwrap import dedent
from pants.util.dirutil import safe_file_dump, safe_rmtree
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_engine
SUBPROJ_SPEC = 'testprojects/src/python/subproject_test/'
SUBPROJ_ROOT = 'testprojects/src/python/subproject_test/subproject'
BUILD_FILES = {
'testprojects/src/python/subproject_test/BUILD':
"""
python_library(
dependencies = ['//testprojects/src/python/subproject_test/subproject/src/python:helpers'],
)
""",
'testprojects/src/python/subproject_test/subproject/BUILD':
"""
target(
name = 'local',
dependencies = [
':relative',
'//:absolute',
],
)
target(
name = 'relative',
)
target(
name = 'absolute',
)
""",
'testprojects/src/python/subproject_test/subproject/src/python/BUILD':
"""
python_library(
name = 'helpers',
dependencies = ['//src/python/helpershelpers'],
)
""",
'testprojects/src/python/subproject_test/subproject/src/python/helpershelpers/BUILD':
"""
python_library(
name = 'helpershelpers',
)
"""
}
"""
Test layout
-----------
testprojects/
src/
python/
subproject_test/
BUILD
subproject/
src/
python/
BUILD/
helpershelpers/
BUILD/
"""
@contextmanager
def harness():
try:
for name, content in BUILD_FILES.items():
safe_file_dump(name, dedent(content))
yield
finally:
safe_rmtree(SUBPROJ_SPEC)
class SubprojectIntegrationTest(PantsRunIntegrationTest):
@ensure_engine
def test_subproject_without_flag(self):
"""
Assert that when getting the dependencies of a project which relies
on a subproject which relies on its own internal library, a failure
occurs without the --subproject-roots option
"""
with harness():
pants_args = ['dependencies', SUBPROJ_SPEC]
self.assert_failure(self.run_pants(pants_args))
@ensure_engine
def test_subproject_with_flag(self):
"""
Assert that when getting the dependencies of a project which relies on
a subproject which relies on its own internal library, all things
go well when that subproject is declared as a subproject
"""
with harness():
# Has dependencies below the subproject.
pants_args = ['--subproject-roots={}'.format(SUBPROJ_ROOT),
'dependencies', SUBPROJ_SPEC]
self.assert_success(self.run_pants(pants_args))
# A relative path at the root of the subproject.
pants_args = ['--subproject-roots={}'.format(SUBPROJ_ROOT),
'dependencies', '{}:local'.format(SUBPROJ_ROOT)]
self.assert_success(self.run_pants(pants_args))
|
pombredanne/pants
|
tests/python/pants_test/build_graph/test_subproject_integration.py
|
Python
|
apache-2.0
| 3,193
| 0.006577
|
#!/usr/bin/env python
##################################################
# Parallel MLMC: Config class #
# #
# Jun Nie #
# Last modification: 19-09-2017 #
##################################################
import sys, os
import numpy as np
class Config:
"""
config class wchich is used for fvm solver, mlmc & parallelization
TODO: adding read config parameters from file.
"""
def __init__(self, config_file):
# === fvm solver parameters
self.DIM = 2
self.ORDER = 1
self.case = 'vayu_burgers' # 'vayu_ls89', 'su2_ls89'
self.mesh_ncoarsest = 8+1
self.mesh_nfinest = 128+1
self.mesh_filename = '/home/jun/vayu/TestMatrix/Burgers.Test/mesh/' + \
'cartesian_tube_0009x0009x2.BlockMesh'
# === mlmc parameters
self.eps = 0.
self.alpha = 0.
self.beta = 0.
self.gamma = 0.
self.L = 2 # highest level
self.ML = 8 # number of samples on finest level
self.M = 2 # refinement factor
self.SAMPLES_FACTOR = 1
self.mlmc_convergence_test = True
self.READ_NUMBER_OF_SAMPLES_FROM_FILE = False
self.USE_OPTIMAL_NUMBER_OF_SAMPLES = False
self.USE_EQUIDISTRIBUTED_NUMBER_OF_SAMPLES = True
self.COMPUTE_IN_DIFFERENCE = True
# === qoi
self.STATS = 'MEAN_VAR'
# === parallelization parameters
self.multi = 'mpi' # 'mpi' for parallel, 'single' for serial
self.MULTIN = 1 # number of processes for fvm solver, 1 or multiples of 2
self.MULTIM = 4 # number of samplers (processor group)
self.MULTI_CORES = 0
# === update
self.update(config_file)
def update(self, config_file):
''' read config file and update parameters'''
pass
if __name__ == '__main__':
pass
|
henrynj/PMLMC
|
v0.0/config.py
|
Python
|
gpl-3.0
| 2,156
| 0.008813
|
########################################################################
# amara/xslt/expressions/avt.py
"""
Implementation of XSLT attribute value templates
"""
from amara.xpath import datatypes
from amara.xpath.expressions import expression
from amara.xslt import XsltError
from amara.xslt.expressions import _avt
_parse_avt = _avt.parser().parse
class avt_expression(expression):
__slots__ = ('_format', '_args')
def __init__(self, value):
try:
# parts is a list of unicode and/or parsed XPath
parts = _parse_avt(value)
except _avt.error, error:
raise XsltError(XsltError.AVT_SYNTAX)
self._args = args = []
for pos, part in enumerate(parts):
if isinstance(part, unicode):
if '%' in part:
parts[pos] = part.replace('%', '%%')
else:
parts[pos] = u'%s'
args.append(part)
self._format = u''.join(parts)
if not self._args:
# use empty format args to force '%%' replacement
self._format = datatypes.string(self._format % ())
return
def evaluate_as_string(self, context):
if not self._args:
return self._format
result = self._format % tuple(arg.evaluate_as_string(context)
for arg in self._args)
return datatypes.string(result)
evaluate = evaluate_as_string
def __str__(self):
return '{' + self._format % tuple(self._args) + '}'
|
zepheira/amara
|
lib/xslt/expressions/avt.py
|
Python
|
apache-2.0
| 1,540
| 0.001299
|
#!/usr/bin/env python
from distutils.core import setup
from pip.req import parse_requirements
install_reqs = parse_requirements("requirements.txt", session=False)
reqs = [str(ir.req) for ir in install_reqs]
setup(name='pgoapi',
version='1.0',
url='https://github.com/tejado/pgoapi',
packages=['pgoapi'],
install_requires=reqs)
|
earthchie/PokemonGo-Bot
|
setup.py
|
Python
|
mit
| 355
| 0
|
from oracleplsqlsource import OraclePLSQLSource
class OracleJavaSource(OraclePLSQLSource):
def __init__(self, name, source):
self.name = name
#debug_message("debug: generating java source ")
OraclePLSQLSource.__init__(self,source)
|
sawdog/OraclePyDoc
|
oraclepydoc/oracleobjects/oraclejavasource.py
|
Python
|
gpl-2.0
| 261
| 0.007663
|
from cupy import elementwise
_id = 'out0 = in0'
# TODO(okuta): Implement convolve
_clip = elementwise.create_ufunc(
'cupy_clip',
('???->?', 'bbb->b', 'BBB->B', 'hhh->h', 'HHH->H', 'iii->i', 'III->I',
'lll->l', 'LLL->L', 'qqq->q', 'QQQ->Q', 'eee->e', 'fff->f', 'ddd->d'),
'out0 = min(in2, max(in1, in0))')
def clip(a, a_min, a_max, out=None):
'''Clips the values of an array to a given interval.
This is equivalent to ``maximum(minimum(a, a_max), a_min)``, while this
function is more efficient.
Args:
a (cupy.ndarray): The source array.
a_min (scalar or cupy.ndarray): The left side of the interval.
a_max (scalar or cupy.ndarray): The right side of the interval.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: Clipped array.
.. seealso:: :func:`numpy.clip`
'''
return _clip(a, a_min, a_max, out=out)
sqrt = elementwise.create_ufunc(
'cupy_sqrt',
# I think this order is a bug of NumPy, though we select this "buggy"
# behavior for compatibility with NumPy.
('f->f', 'd->d', 'e->e'),
'out0 = sqrt(in0)',
doc='''Elementwise positive square-root function.
.. note::
This ufunc outputs float32 arrays for float16 arrays input by default as
well as NumPy 1.9. If you want to override this behavior, specify the
dtype argument explicitly, or use ``cupy.math.misc.sqrt_fixed`` instead.
.. seealso:: :data:`numpy.sqrt`
''')
# Fixed version of sqrt
sqrt_fixed = elementwise.create_ufunc(
'cupy_sqrt',
('e->e', 'f->f', 'd->d'),
'out0 = sqrt(in0)')
square = elementwise.create_ufunc(
'cupy_square',
('b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q',
'Q->Q', 'e->e', 'f->f', 'd->d'),
'out0 = in0 * in0',
doc='''Elementwise square function.
.. seealso:: :data:`numpy.square`
''')
absolute = elementwise.create_ufunc(
'cupy_absolute',
(('?->?', _id), 'b->b', ('B->B', _id), 'h->h', ('H->H', _id), 'i->i',
('I->I', _id), 'l->l', ('L->L', _id), 'q->q', ('Q->Q', _id),
('e->e', 'out0 = fabsf(in0)'),
('f->f', 'out0 = fabsf(in0)'),
('d->d', 'out0 = fabs(in0)')),
'out0 = in0 > 0 ? in0 : -in0',
doc='''Elementwise absolute value function.
.. seealso:: :data:`numpy.absolute`
''')
# TODO(beam2d): Implement it
# fabs
_unsigned_sign = 'out0 = in0 > 0'
sign = elementwise.create_ufunc(
'cupy_sign',
('b->b', ('B->B', _unsigned_sign), 'h->h', ('H->H', _unsigned_sign),
'i->i', ('I->I', _unsigned_sign), 'l->l', ('L->L', _unsigned_sign),
'q->q', ('Q->Q', _unsigned_sign), 'e->e', 'f->f', 'd->d'),
'out0 = (in0 > 0) - (in0 < 0)',
doc='''Elementwise sign function.
It returns -1, 0, or 1 depending on the sign of the input.
.. seealso:: :data:`numpy.sign`
''')
_float_maximum = \
'out0 = isnan(in0) ? in0 : isnan(in1) ? in1 : max(in0, in1)'
maximum = elementwise.create_ufunc(
'cupy_maximum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_maximum),
('ff->f', _float_maximum),
('dd->d', _float_maximum)),
'out0 = max(in0, in1)',
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.maximum`
''')
_float_minimum = \
'out0 = isnan(in0) ? in0 : isnan(in1) ? in1 : min(in0, in1)'
minimum = elementwise.create_ufunc(
'cupy_minimum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_minimum),
('ff->f', _float_minimum),
('dd->d', _float_minimum)),
'out0 = min(in0, in1)',
doc='''Takes the minimum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.minimum`
''')
fmax = elementwise.create_ufunc(
'cupy_fmax',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
'out0 = max(in0, in1)',
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the other operand.
.. seealso:: :data:`numpy.fmax`
''')
fmin = elementwise.create_ufunc(
'cupy_fmin',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
'out0 = min(in0, in1)',
doc='''Takes the minimum of two arrays elementwise.
If NaN apperas, it returns the other operand.
.. seealso:: :data:`numpy.fmin`
''')
# TODO(okuta): Implement nan_to_num
# TODO(okuta): Implement real_if_close
# TODO(okuta): Implement interp
|
tscohen/chainer
|
cupy/math/misc.py
|
Python
|
mit
| 4,772
| 0
|
""" Configuration values.
"""
# Command paths (you can change these to e.g. absolute paths in calling code)
CMD_FLAC = "flac"
CMD_FFMPEG = "ffmpeg"
CMD_IM_MONTAGE = "montage"
CMD_IM_MOGRIFY = "mogrify"
CMD_IM_CONVERT = "convert"
|
pyroscope/auvyon
|
src/auvyon/config.py
|
Python
|
gpl-2.0
| 230
| 0
|
# python3
class HeapBuilder:
def __init__(self):
self._swaps = []
self._data = []
def ReadData(self):
n = int(input())
self._data = [int(s) for s in input().split()]
assert n == len(self._data)
def WriteResponse(self):
print(len(self._swaps))
for swap in self._swaps:
print(swap[0], swap[1])
def GenerateSwaps(self):
# The following naive implementation just sorts
# the given sequence using selection sort algorithm
# and saves the resulting sequence of swaps.
# This turns the given array into a heap,
# but in the worst case gives a quadratic number of swaps.
#
# TODO: replace by a more efficient implementation
for i in range(len(self._data)):
for j in range(i + 1, len(self._data)):
if self._data[i] > self._data[j]:
self._swaps.append((i, j))
self._data[i], self._data[j] = self._data[j], self._data[i]
def Solve(self):
self.ReadData()
self.GenerateSwaps()
self.WriteResponse()
if __name__ == '__main__':
heap_builder = HeapBuilder()
heap_builder.Solve()
|
xunilrj/sandbox
|
courses/coursera-sandiego-algorithms/data-structures/assignment002/make_heap/build_heap.py
|
Python
|
apache-2.0
| 1,100
| 0.010909
|
# -*- coding: utf-8 -*-
import StringIO
import csv
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from xml.dom import minidom
import configdb
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
## SYNTAX
# script.py cities.csv 2015-01-01 2015-04-01 csv|xml
# cities.csv obtained from "Gestió agrupada impost 1.5%"
class MunicipalTaxesInvoicingReport:
def __init__(self, cursor, start_date, end_date, tax, aggregated):
self.cursor = cursor
self.start_date = start_date
self.end_date = end_date
self.tax = tax
self.aggregated = aggregated
pass
def by_city(self, ids, file_type):
sql = '''
SELECT
municipi.name AS name,
municipi.ine AS ine,
EXTRACT(YEAR FROM invoice.date_invoice) AS invoice_year,
EXTRACT(QUARTER FROM invoice.date_invoice) AS invoice_quarter,
COALESCE(SUM(invoice_line.price_subtotal::float*(
CASE
WHEN factura_line.tipus IN ('subtotal_xml') AND invoice.type='in_invoice' THEN 1
WHEN factura_line.tipus IN ('subtotal_xml') AND invoice.type='in_refund' THEN -1
ELSE 0
END
)),0.0) AS provider_amount,
COALESCE(SUM(invoice_line.price_subtotal::float*(
CASE
WHEN factura_line.tipus IN ('energia','reactiva','potencia') AND invoice.type='out_invoice' THEN 1
WHEN factura_line.tipus IN ('energia','reactiva','potencia') AND invoice.type='out_refund' THEN -1
ELSE 0
END
)),0.0) AS client_amount
FROM giscedata_facturacio_factura_linia AS factura_line
LEFT JOIN account_invoice_line AS invoice_line ON invoice_line.id = factura_line.invoice_line_id
LEFT JOIN giscedata_facturacio_factura AS factura ON factura.id = factura_line.factura_id
LEFT JOIN account_invoice AS invoice ON invoice.id = factura.invoice_id
LEFT JOIN giscedata_polissa AS polissa ON polissa.id = factura.polissa_id
LEFT JOIN giscedata_cups_ps AS cups ON cups.id = polissa.cups
LEFT JOIN res_municipi as municipi on municipi.id = cups.id_municipi
WHERE municipi.ID IN ({0})
AND ((invoice.date_invoice >= '{1}') AND (invoice.date_invoice < '{2}'))
AND (((invoice.type LIKE 'out_%%')
AND ((invoice.state = 'open') OR (invoice.state = 'paid')))
OR (invoice.type LIKE 'in_%%'))
GROUP BY 1,2,3,4
ORDER BY 1,2,3,4
'''.format(','.join(map(str, ids)), self.start_date, self.end_date)
self.cursor.execute(sql, {'start_date': self.start_date,
'end_date': self.end_date,
'ids': ids})
return self.build_report(self.cursor.fetchall(), file_type)
def build_report(self, records, file_type):
invoicing_by_name = {}
invoicing_by_date = {}
ines = {}
for record in records:
name = record[0]
ine = record[1]
year = record[2]
quarter = record[3]
invoicing_by_name.setdefault(name, {'total_provider_amount': 0, 'total_client_amount': 0, 'quarters': []})
invoicing_by_name[name]['total_provider_amount'] += record[4]
invoicing_by_name[name]['total_client_amount'] += record[5]
invoicing_by_name[name]['quarters'].append({
'year': record[2],
'quarter': record[3],
'provider_amount': record[4],
'client_amount': record[5]
})
invoicing_by_date.setdefault(year, {})
invoicing_by_date[year].setdefault(quarter, {'total_provider_amount': 0, 'total_client_amount': 0})
invoicing_by_date[year][quarter]['total_provider_amount'] += record[4]
invoicing_by_date[year][quarter]['total_client_amount'] += record[5]
ines.setdefault(name, ine)
if file_type=='csv':
## CSV
csv_doc=StringIO.StringIO()
writer_report = csv.writer(csv_doc)
for name,v in sorted(invoicing_by_name.items()):
writer_report.writerow([name])
writer_report.writerow(['Año', 'Trimestre', 'Pagos a distribuidora', 'Facturas a clientes'])
for quarter in v['quarters']:
writer_report.writerow([
quarter['year'],
quarter['quarter'],
round(quarter['provider_amount'], 2),
round(quarter['client_amount'], 2)
])
writer_report.writerow([])
writer_report.writerow(['', '', '', '', 'Ingresos brutos', 'Tasa', 'Total'])
diff = v['total_client_amount'] - v['total_provider_amount']
writer_report.writerow(['Total',
'',
round(v['total_provider_amount'], 2),
round(v['total_client_amount'], 2),
round(diff, 2),
self.tax,
round(diff*(self.tax/100.0), 2)
])
writer_report.writerow([])
writer_report.writerow([])
writer_report.writerow(['Año', 'Trimestre', 'Pagos a distribuidora', 'Factuas a clientes', 'Ingresos',
'Tasta', 'Total'])
for year, v in sorted(invoicing_by_date.items()):
for quarter, v in sorted(invoicing_by_date[year].items()):
diff = v['total_client_amount'] - v['total_provider_amount']
writer_report.writerow([
year,
quarter,
round(v['total_provider_amount'], 2),
round(v['total_client_amount'], 2),
round(diff, 2),
self.tax,
round(diff*(self.tax/100.0), 2)
])
doc = csv_doc.getvalue()
if file_type == 'xml':
## XML
_empresa = Element("EMPRESA")
_datos = SubElement(_empresa, 'DATOS')
_nombre = SubElement(_datos, 'NOMBRE')
_nombre.text = "Som Energia SCCL"
_nif = SubElement(_datos, 'NIF')
_nif.text = "F55091367"
_municipios = SubElement(_empresa, 'MUNICIPIOS')
for name,v in sorted(invoicing_by_name.items()):
for quarter in v['quarters']:
_municipio = SubElement(_municipios, 'MUNICIPIO')
_ine = SubElement(_municipio, 'INEMUNICIPIO')
_ine.text = ines[name]
_ejercicio = SubElement(_municipio, 'EJERCICIO')
_ejercicio.text = str(int(quarter['year']))
_periodo = SubElement(_municipio, 'PERIODO')
_periodo.text = str(int(quarter['quarter']))
_fechaalta = SubElement(_municipio, 'FECHAALTA')
_fechabaja = SubElement(_municipio, 'FECHABAJA')
_tiposumin = SubElement(_municipio, 'TIPOSUMIN')
_tiposumin.text = '2'
_descsum = SubElement(_municipio, 'DESCSUM')
_descsum.text = 'Electricidad'
_descperi = SubElement(_municipio, 'DESCPERI')
_facturacion = SubElement(_municipio, 'FACTURACION')
_facturacion.text = '%0.2f' % quarter['client_amount']
_derechosacceso = SubElement(_municipio, 'DERECHOSACCESO')
_derechosacceso.text = '%0.2f' % quarter['provider_amount']
_compensacion = SubElement(_municipio, 'COMPENSACION')
_compensacion.text = '0.00'
_baseimponible = SubElement(_municipio, 'BASEIMPONIBLE')
diff = (quarter['client_amount'] - quarter['provider_amount'])
_baseimponible.text = '%0.2f' % diff
_cuotabasica = SubElement(_municipio, 'CUOTABASICA')
_cuotabasica.text = '%0.2f' % (self.tax/100)
_totalingresar = SubElement(_municipio, 'TOTALINGRESAR')
_totalingresar.text = '%0.2f' % (diff*(self.tax/100.0))
doc = prettify(_empresa)
return doc
import psycopg2
import psycopg2.extras
import csv
import sys
municipis_file = sys.argv[1]
start_date = sys.argv[2]
end_date = sys.argv[3]
type_file = sys.argv[4]
municipis_id = []
with open(municipis_file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
municipis_id.append(int(row[0]))
try:
dbconn=psycopg2.connect(**configdb.psycopg)
except Exception, ex:
print "Unable to connect to database " + configdb['DB_NAME']
raise ex
m = MunicipalTaxesInvoicingReport(dbconn.cursor(), start_date,end_date,1.5,False)
print m.by_city(municipis_id, type_file)
|
Som-Energia/invoice-janitor
|
Taxes/municipaltax.py
|
Python
|
agpl-3.0
| 9,574
| 0.004493
|
import android
import android.activity
from os import unlink
from jnius import autoclass, cast
from plyer_lach.facades import Camera
from plyer_lach.platforms.android import activity
Intent = autoclass('android.content.Intent')
PythonActivity = autoclass('org.renpy.android.PythonActivity')
MediaStore = autoclass('android.provider.MediaStore')
Uri = autoclass('android.net.Uri')
class AndroidCamera(Camera):
def _take_picture(self, on_complete, filename=None):
assert(on_complete is not None)
self.on_complete = on_complete
self.filename = filename
android.activity.unbind(on_activity_result=self._on_activity_result)
android.activity.bind(on_activity_result=self._on_activity_result)
intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)
uri = Uri.parse('file://' + filename)
parcelable = cast('android.os.Parcelable', uri)
intent.putExtra(MediaStore.EXTRA_OUTPUT, parcelable)
activity.startActivityForResult(intent, 0x123)
def _take_video(self, on_complete, filename=None):
assert(on_complete is not None)
self.on_complete = on_complete
self.filename = filename
android.activity.unbind(on_activity_result=self._on_activity_result)
android.activity.bind(on_activity_result=self._on_activity_result)
intent = Intent(MediaStore.ACTION_VIDEO_CAPTURE)
uri = Uri.parse('file://' + filename)
parcelable = cast('android.os.Parcelable', uri)
intent.putExtra(MediaStore.EXTRA_OUTPUT, parcelable)
# 0 = low quality, suitable for MMS messages,
# 1 = high quality
intent.putExtra(MediaStore.EXTRA_VIDEO_QUALITY, 1)
activity.startActivityForResult(intent, 0x123)
def _on_activity_result(self, requestCode, resultCode, intent):
if requestCode != 0x123:
return
android.activity.unbind(on_activity_result=self._on_activity_result)
if self.on_complete(self.filename):
self._unlink(self.filename)
def _unlink(self, fn):
try:
unlink(fn)
except:
pass
def instance():
return AndroidCamera()
|
locksmith47/turing-sim-kivy
|
src/plyer_lach/platforms/android/camera.py
|
Python
|
mit
| 2,169
| 0.000461
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'fbcode_builder steps to build Facebook Thrift'
import specs.fbthrift as fbthrift
def fbcode_builder_spec(builder):
return {
'depends_on': [fbthrift],
}
config = {
'github_project': 'facebook/fbthrift',
'fbcode_builder_spec': fbcode_builder_spec,
}
|
getyourguide/fbthrift
|
build/fbcode_builder_config.py
|
Python
|
apache-2.0
| 449
| 0
|
from setuptools import setup
import pybvc
setup(
name='pybvc',
version=pybvc.__version__,
description='A python library for programming your network via the Brocade Vyatta Controller (BVC)',
long_description=open('README.rst').read(),
author='Elbrys Networks',
author_email='jeb@elbrys.com',
url='https://github.com/brcdcomm/pybvc',
packages=['pybvc',
'pybvc.common',
'pybvc.controller',
'pybvc.netconfdev',
'pybvc.netconfdev.vrouter',
'pybvc.netconfdev.vdx',
'pybvc.openflowdev'
],
install_requires=['requests>=1.0.0',
'PyYAML',
'xmltodict'],
zip_safe=False,
include_package_data=True,
platforms='any',
license='BSD',
keywords='sdn nfv bvc brocade vyatta controller network vrouter',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'Topic :: System :: Networking',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
)
|
tnadeau/pybvc
|
setup.py
|
Python
|
bsd-3-clause
| 1,243
| 0.000805
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.