text stringlengths 8 6.05M |
|---|
if __name__ == '__main__' and __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
import os
from sys import platform
import argparse
import mxnet as mx
from mnist_model import MnistModel
from mnist_data_source import MnistDataSource
from utct.MXNet.converter import Converter
def parse_args():
is_win_os = "win" in platform.lower()
parser = argparse.ArgumentParser(
description='Import MXNet model parameters from h5 file',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--cache-dir',
dest='data_cache_dir',
help='Path to data cache directory',
required=True,
type=str)
parser.add_argument(
'--checkpoint-dir',
dest='checkpoint_dir',
help='Path to checkpoint files',
required=True,
type=str)
parser.add_argument(
'--prefix',
dest='prefix',
help='Prefix for MXNet checkpoint files',
default='mnist',
type=str)
parser.add_argument(
'--epoch',
dest='epoch',
help='Epoch for MXNet checkpoint files',
required=True,
type=int)
parser.add_argument(
'--input',
dest='src_filepath',
help='Input file with MXNet model parameters',
required=True,
type=str)
parser.add_argument(
'--gpus',
dest='gpus',
help='List of GPU device numbers to train with, empty is CPU',
default=([] if is_win_os else [0]),
nargs='*',
type=int)
args = parser.parse_args()
return args
def main():
args = parse_args()
model = MnistModel()
data_source = MnistDataSource(use_augmentation=False)
data_source.update_project_dirname(args.data_cache_dir)
ctx = [mx.gpu(i) for i in args.gpus] if args.gpus else mx.cpu()
Converter.import_from_h5(
model=model,
data_source=data_source,
src_filepath=args.src_filepath,
checkpoint_path=os.path.join(args.checkpoint_dir, args.prefix),
checkpoint_epoch=args.epoch,
ctx=ctx)
if __name__ == '__main__':
main()
|
from flask import g, Flask
import psycopg2
import psycopg2.extras
import hashlib
import sys
import bcrypt
DB_HOST = "db"
DB_NAME = "postgres"
DB_USER = "postgres"
DB_PASS = "1234"
def get_db():
if 'db' not in g:
g.db = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASS, host=DB_HOST)
return g.db
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def getUserFromDbById(user_id):
with g.db:
with g.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("SELECT * FROM todolist.users WHERE user_id = %s;",(user_id,))
result = cur.fetchall()
if len(result) == 0:
return None
return result[0]
def getUserFromDbByName(username):
with g.db:
with g.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("SELECT * FROM todolist.users WHERE user_name = %s;",(username,))
result = cur.fetchall()
if len(result) == 0:
return None
return result[0]
def userExists(username):
with g.db:
with g.db.cursor() as cur:
cur.execute("SELECT user_name FROM todolist.users WHERE user_name LIKE %s;",(username,))
result = cur.fetchall()
return len(result) != 0
def addUser(username, password):
with g.db:
with g.db.cursor() as cur:
salt = bcrypt.gensalt()
hashed_passwd = bcrypt.hashpw(password.encode('utf-8'), salt)
cur.execute("INSERT INTO todolist.users (user_name,user_password) VALUES (%s, %s)",(username, hashed_passwd.decode('utf-8')))
def addTaskDb(user_id, task):
with g.db:
with g.db.cursor() as cur:
cur.execute("INSERT INTO todolist.user_task (user_id, task) VALUES (%s, %s);",(user_id, task))
def getTasksDb(user_id):
with g.db:
with g.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("SELECT * FROM todolist.user_task WHERE user_id = %s;",(user_id,))
result = cur.fetchall()
return result
def getTaskDb(user_task_id):
with g.db:
with g.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("SELECT * FROM todolist.user_task WHERE user_task_id = %s;", (user_task_id,))
result = cur.fetchall()
return result
def updateTaskDb(id, task):
with g.db:
with g.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("UPDATE todolist.user_task SET task=%s WHERE user_task_id=%s;",(task, id))
def deleteTaskDb(id):
with g.db:
with g.db.cursor() as cur:
cur.execute("DELETE FROM todolist.user_task WHERE user_task_id=%s;",(id,))
|
# import
from tkinter import*
# 창 생성
win = Tk()
# 창 크기
win.geometry("1000x1000")
# 창 제목
win.title("Smart Management")
# 전체 폰트
win.option_add("*Font","맑은고딕 25")
# 창 실행
win = mainloop()
|
from __future__ import unicode_literals
from django.db import models
import re
email_regex = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
import os, binascii, md5
def get_passwd_salt():
return( binascii.b2a_hex( os.urandom( 15 ) ) )
def get_passwd_hash( passwd, salt ):
return( md5.new( passwd + salt ).hexdigest() )
class UsersManager( models.Manager ):
def register( self, postData ):
errors = []
# validate email (raw content)
if len( postData['email'] ) < 1:
errors.append( "The email field is empty." )
elif not email_regex.match( postData['email'] ):
errors.append( "Incorrectly formatted email." )
# validate email (not in DB)
elif len( self.filter( email = postData['email'] ) ) > 0:
errors.append( "The email ({}) is already used.".format( postData['email'] ) )
# validate first_name (raw content)
if len( postData['f_name'] ) < 1:
errors.append( "The first name field is empty." )
elif not postData['f_name'].isalpha():
errors.append( "The first name field can only contain letters." )
# validate last_name (raw content)
if len( postData['l_name'] ) < 1:
errors.append( "The last name field is empty." )
elif not postData['l_name'].isalpha():
errors.append( "The last name field can only contain letters." )
# validate passwd_1 (raw content)
if len( postData['passwd_1'] ) < 1:
errors.append( "The password field is empty." )
elif len( postData['passwd_1'] ) < 8:
errors.append( "The password field MUST be AT LEAST 8 characters!" )
elif not re.match( r'^.*[A-Z]+.*$', postData['passwd_1'] ):
errors.append( "The password field MUST contain AT LEAST 1 capital letter!" )
elif not re.match( r'^.*\d+.*$', postData['passwd_1'] ):
errors.append( "The password field MUST contain AT LEAST 1 number!" )
# validate passwd_1 against passwd_2
if postData['passwd_1'] != postData['passwd_2']:
errors.append( "The password and confirm password fields MUST match!" )
# return
if len( errors ):
return {
'status': False,
'errors': errors
}
else:
passwd_salt = get_passwd_salt()
passwd_hash = get_passwd_hash( postData['passwd_1'], passwd_salt )
return {
'status': True,
'user': self.create(
email = postData['email'],
first_name = postData['f_name'],
last_name = postData['l_name'],
password = passwd_hash,
salt = passwd_salt,
)
}
def login( self, postData ):
errors = []
# validate email (raw content)
if len( postData['email'] ) < 1:
errors.append( "The email field is empty." )
elif not email_regex.match( postData['email'] ):
errors.append( "Incorrectly formatted email." )
# validate email (in DB)
elif len( self.filter( email = postData['email'] ) ) < 1:
errors.append( "Unknown email." )
# validate password (raw content)
elif len( postData['passwd'] ) < 1:
errors.append( "The password field is empty." )
# validate password (matches DB)
else:
user = self.get( email = postData['email'] )
if get_passwd_hash( postData['passwd'], user.salt ) != user.password:
errors.append( "Incorrect email or password." )
# return
if len( errors ):
return {
'status': False,
'errors': errors
}
else:
return {
'status': True,
'user': self.get( email = postData['email'] )
}
def add_predefined_data( self ):
self.create(
email = "AbCde@f.x",
first_name = "Ab",
last_name = "Cde",
password = "d554cd79bb09a064e714146fcdf9593e", # 1password
salt = "28d0e694c86f0c47ecd910a0348130",
)
self.create(
email = "gh_ijk@l.y",
first_name = "Gh",
last_name = "Ijk",
password = "ea9b64123c0bed77a641fbef723a9fb6", # 2password
salt = "cb52189918385519677cdff03a0012",
)
self.create(
email = "Mn_Opq@r.y",
first_name = "Mn",
last_name = "Opq",
password = "6e8a2346251bb05cf6bf7773501a5997", # password1
salt = "45e9ae8382bdd321174a89d3091dc3",
)
class Users( models.Model ):
first_name = models.CharField( max_length = 255 )
last_name = models.CharField( max_length = 255 )
email = models.CharField( max_length = 255 )
password = models.CharField( max_length = 40 )
salt = models.CharField( max_length = 40 )
created_at = models.DateTimeField( auto_now_add = True )
updated_at = models.DateTimeField( auto_now = True )
objects = UsersManager()
|
# ******************************************
# * File: RelOperatos.py
# * A test program for relational operators
# ******************************************
random.seed(time.clock())
x = random.randint(0, 100)
y = random.randint(0, 100)
print ("x = ", x)
print ("y = ", y)
# Greather than
print ("x > y: ", x > y)
# Less than
print ("x < y: ", x < y)
# Greather or Equal
print("x >= y: ", x >= y)
# Less or Equal
print ("x <= y: ", x <= y)
# Not Equal
print ("x != y: ", x != y)
# Equal
print ("x == y: ", x == y)
|
from flask import Blueprint,render_template
main = Blueprint('main',__name__)
@main.route('/index/',methods=['POST','GET'])
def index():
return render_template('index.html')
@main.route('/submit',methods = ['POST','GET'])
def submit():
return render_template('result.html')
|
import json
import os
import re
from datetime import datetime
import dateutil
import postgres_copy
import pytz
from sqlalchemy.orm.exc import NoResultFound
from bitcoin_acks.constants import PullRequestState, ReviewDecision
from bitcoin_acks.data_schemas import pull_request_schema
from bitcoin_acks.database import session_scope
from bitcoin_acks.github_data.comments_data import CommentsData
from bitcoin_acks.github_data.graphql_queries import (
pull_request_graphql_query,
pull_requests_graphql_query
)
from bitcoin_acks.github_data.repositories_data import RepositoriesData
from bitcoin_acks.github_data.users_data import UsersData
from bitcoin_acks.logging import log
from bitcoin_acks.models import PullRequests
from bitcoin_acks.models.etl.etl_data import ETLData
class PullRequestsData(RepositoriesData):
MAX_PRS = 40
def __init__(self, repository_path: str, repository_name: str, json_data_directory: str):
super(PullRequestsData, self).__init__(repository_path=repository_path,
repository_name=repository_name)
self.comments_data = CommentsData(repository_name=self.repo.name,
repository_path=self.repo.path)
self.users_data = UsersData()
self.json_data_directory = json_data_directory
self.pull_request_data = []
self.review_decisions_data = []
self.labels_data = []
def update(self):
with session_scope() as session:
try:
record = (
session
.query(PullRequests.updated_at)
.order_by(PullRequests.updated_at.desc())
.limit(1)
.one()
)
from_date = record.updated_at
except NoResultFound:
from_date = datetime(2009, 1, 1)
log.debug('Updating PRs starting from', from_date=from_date)
self.update_all(newer_than=from_date)
def get_one(self, number: int):
json_object = {
'query': pull_request_graphql_query,
'variables': {'prNumber': number}
}
data = self.graphql_post(json_object=json_object).json()
pull_request = data['data']['repository']['pullRequest']
validated_pull_request_data = pull_request_schema.load(pull_request)
self.parse_into_queue(validated_pull_request_data)
def update_all(self,
newer_than: datetime,
state: PullRequestState = None,
limit: int = None):
log.debug('update_all', state=state, limit=limit, newer_than=newer_than)
self.get_all(state=state, limit=limit, newer_than=newer_than)
def get_all(self,
newer_than: datetime,
state: PullRequestState = None,
limit: int = None):
variables = {}
received = 0
ends_at = newer_than
while limit is None or received < limit:
if limit is None:
variables['prFirst'] = self.MAX_PRS
else:
variables['prFirst'] = min(limit - received, self.MAX_PRS)
if state is not None:
variables['prState'] = state.value
formatted_ends_at = ends_at.replace(microsecond=0).astimezone(pytz.utc).isoformat()
variables['searchQuery'] = f'type:pr updated:>={formatted_ends_at} repo:bitcoin/bitcoin sort:updated-asc'
log.debug('Variables for graphql pull requests query', variables=variables)
json_object = {
'query': pull_requests_graphql_query,
'variables': variables
}
data = self.graphql_post(json_object=json_object).json()
search_data = data['data']['search']
pull_requests_graphql_data = search_data['edges']
results_count = len(search_data['edges'])
log.debug(
'response from github graphql',
results_count=results_count
)
if not results_count:
break
starts_at = pull_requests_graphql_data[0]['node']['updatedAt']
previous_ends_at = ends_at
ends_at = dateutil.parser.parse(pull_requests_graphql_data[-1]['node']['updatedAt'])
if previous_ends_at == ends_at:
break
log.debug(
'Pull requests fetched',
starts_at=starts_at,
ends_at=ends_at
)
pull_requests_graphql_data = [r['node'] for r in pull_requests_graphql_data if r['node']]
for pull_request_graphql in pull_requests_graphql_data:
validated_pull_request_data = pull_request_schema.load(pull_request_graphql)
self.parse_into_queue(validated_pull_request_data)
if limit is not None and received == limit:
break
received += 1
self.flush_queue_to_database()
def parse_into_queue(self, pull_request: dict):
pull_request['repository_id'] = self.repo.id
comments = pull_request.pop('comments')
reviews = pull_request.pop('reviews')
comments_and_reviews = []
if comments['totalCount'] > 100 or reviews['totalCount'] > 100:
comments_and_reviews += [
c for c in self.comments_data.get_all(pull_request['number'])
]
else:
comments_and_reviews += comments['nodes'] + reviews['nodes']
for comment_or_review in comments_and_reviews:
comment_or_review['review_decision'] = self.comments_data.identify_review_decision(
comment_or_review['bodyText']
)
if comment_or_review['review_decision'] != ReviewDecision.NONE:
comment_or_review['pull_request_id'] = pull_request['id']
if not comment_or_review['id']:
comment_or_review['id'] = comment_or_review['url']
self.review_decisions_data.append(comment_or_review)
project_cards = pull_request.pop('projectCards')
blocker_card = [c for c in project_cards['nodes'] if
c['column'] and c['column']['name'] == 'Blockers']
if blocker_card and not pull_request['closedAt']:
pull_request['is_high_priority'] = blocker_card[0]['createdAt']
else:
pull_request['is_high_priority'] = None
timeline_items = pull_request.pop('timelineItems')
blocker_events = [e for e in timeline_items['nodes'] if
e['projectColumnName'] == 'Blockers']
for blocker_event in blocker_events:
if blocker_event['typename'] == 'AddedToProjectEvent':
pull_request['added_to_high_priority'] = blocker_event['createdAt']
elif blocker_event['typename'] == 'RemovedFromProjectEvent':
pull_request['removed_from_high_priority'] = blocker_event['createdAt']
else:
pull_request['added_to_high_priority'] = None
pull_request['removed_from_high_priority'] = None
# Last commit is used to determine CI status
last_commit_status = None
commits = pull_request.pop('commits')
pull_request['commit_count'] = commits['totalCount']
head_commit_hash = pull_request['headRefOid']
if commits['nodes']:
last_commit = [c for c in commits['nodes'] if c['commit']['oid'] == head_commit_hash][0]['commit']
last_commit_status = last_commit.get('status')
if last_commit_status is not None:
pull_request['last_commit_state'] = last_commit_status['state'].capitalize()
descriptions = [s['description'] for s in last_commit_status['contexts']]
pull_request['last_commit_state_description'] = ', '.join(descriptions)
else:
pull_request['last_commit_state'] = None
pull_request['last_commit_state_description'] = None
if len(commits['nodes']):
pull_request['last_commit_short_hash'] = commits['nodes'][-1]['commit']['oid'][0:7]
pull_request['last_commit_pushed_date'] = commits['nodes'][-1]['commit']['pushedDate']
else:
pull_request['last_commit_short_hash'] = None
pull_request['last_commit_pushed_date'] = None
labels = pull_request.pop('labels')
for label in labels['nodes']:
label['pull_request_id'] = pull_request['id']
self.labels_data.append(label)
self.pull_request_data.append(pull_request)
def flush_queue_to_database(self):
for file_name, data_list, data_insert_function in [
('pull_request_data.json', self.pull_request_data, self.insert_pull_requests),
('review_decisions_data.json', self.review_decisions_data, self.insert_comments_and_reviews),
('labels_data.json', self.labels_data, self.insert_labels)
]:
if not len(data_list):
continue
json_path = os.path.join(self.json_data_directory, file_name)
with open(json_path, 'w') as json_file:
for item in data_list:
item = flatten_json(item)
for key in item.keys():
if isinstance(item[key], str) and key not in ('author_login', 'id', 'pull_request_id', 'name'):
if key == 'id':
print(item[key])
input_string = item[key]
item[key] = ' '.join([re.sub(r'\W+', '', s) for s in input_string.split()]).replace('"', '')
string = json.dumps(item, ensure_ascii=True, separators=(',', ':'), default=str) + '\n'
json_file.write(string)
with session_scope() as db_session:
db_session.execute('TRUNCATE etl_data;')
with open(json_path, 'rb') as fp:
postgres_copy.copy_from(fp,
ETLData,
db_session.connection(),
['data'])
data_insert_function()
self.pull_request_data = []
self.review_decisions_data = []
self.labels_data = []
def insert_comments_and_reviews(self):
with session_scope() as db_session:
missing_authors = db_session.execute(
"""
SELECT DISTINCT etl_data.data ->> 'author_login'
FROM etl_data
LEFT OUTER JOIN users ON etl_data.data ->> 'author_login' = users.login
WHERE users.id IS NULL;
"""
).fetchall()
if missing_authors:
log.debug('missing_authors', missing_authors=missing_authors, count=len(missing_authors))
for author in missing_authors:
login = author[0]
if login is None:
continue
user_data = self.users_data.get(login)
self.users_data.upsert(user_data)
with session_scope() as db_session:
db_session.execute(
"""
WITH etl_data AS (
SELECT DISTINCT etl_data.data ->> 'id' AS id,
etl_data.data ->> 'bodyText' AS body,
(etl_data.data ->> 'publishedAt')::timestamp with time zone AS published_at,
etl_data.data ->> 'url' AS url,
etl_data.data ->> 'pull_request_id' AS pull_request_id,
users.id AS author_id,
split_part(etl_data.data ->> 'review_decision', '.', 2)::reviewdecision AS auto_detected_review_decision
FROM etl_data
LEFT OUTER JOIN users
ON etl_data.data ->> 'author_login' = users.login
)
INSERT
INTO comments (id,
body,
published_at,
url,
pull_request_id,
author_id,
auto_detected_review_decision)
SELECT *
FROM etl_data
ON CONFLICT (id) DO UPDATE SET id = excluded.id,
body = excluded.body,
published_at = excluded.published_at,
url = excluded.url,
pull_request_id = excluded.pull_request_id,
author_id = excluded.author_id,
auto_detected_review_decision = excluded.auto_detected_review_decision
;
"""
)
with session_scope() as db_session:
db_session.execute(
"""
WITH etl_data AS (
SELECT DISTINCT etl_data.data ->> 'pull_request_id' AS pull_request_id FROM etl_data
)
UPDATE pull_requests
SET review_decisions_count = s.review_decisions_count
from (SELECT count(comments.id) as review_decisions_count,
etl_data.pull_request_id
FROM etl_data
LEFT JOIN comments on etl_data.pull_request_id = comments.pull_request_id AND
comments.auto_detected_review_decision is not null and
comments.auto_detected_review_decision != 'NONE'::reviewdecision
GROUP BY etl_data.pull_request_id) s
WHERE s.pull_request_id = pull_requests.id;
"""
)
@staticmethod
def insert_labels():
with session_scope() as db_session:
db_session.execute(
"""
WITH etl_data AS (
SELECT DISTINCT etl_data.data ->> 'id' AS id,
etl_data.data ->> 'name' AS "name",
etl_data.data ->> 'color' AS color
FROM etl_data
)
INSERT
INTO labels (id,
"name",
color)
SELECT id, name, color FROM etl_data
ON CONFLICT (id) DO UPDATE SET name = excluded.name,
color = excluded.color;
WITH etl_data AS (
SELECT DISTINCT etl_data.data ->> 'id' AS label_id,
etl_data.data ->> 'pull_request_id' AS pull_request_id
FROM etl_data
LEFT OUTER JOIN pull_requests_labels
ON etl_data.data ->> 'id' = pull_requests_labels.label_id
AND etl_data.data ->> 'pull_request_id' = pull_requests_labels.pull_request_id
WHERE pull_requests_labels.id IS NULL
)
INSERT
INTO pull_requests_labels (label_id,
pull_request_id)
SELECT label_id, pull_request_id FROM etl_data;
"""
)
def insert_pull_requests(self):
with session_scope() as db_session:
missing_authors = db_session.execute(
"""
SELECT DISTINCT epr.data ->> 'author_login'
FROM etl_data epr
LEFT OUTER JOIN users authors ON epr.data ->> 'author_login' = authors.login
WHERE authors.id IS NULL;
"""
).fetchall()
if missing_authors:
log.debug('missing_authors', missing_authors=missing_authors, count=len(missing_authors))
for author in missing_authors:
login = author[0]
if login is None:
continue
user_data = self.users_data.get(login)
self.users_data.upsert(user_data)
with session_scope() as db_session:
db_session.execute("""
WITH etl_data AS (
SELECT DISTINCT epr.data ->> 'id' AS id,
(epr.data ->> 'repository_id')::int AS repository_id,
author.id AS author_id,
(epr.data ->> 'number')::int AS "number",
epr.data ->> 'state' AS "state",
epr.data ->> 'title' AS title,
(epr.data ->> 'createdAt')::timestamp with time zone AS created_at,
(epr.data ->> 'updatedAt')::timestamp with time zone AS updated_at,
(epr.data ->> 'is_high_priority')::timestamp with time zone AS is_high_priority,
(epr.data ->> 'added_to_high_priority')::timestamp with time zone AS added_to_high_priority,
(epr.data ->> 'removed_from_high_priority')::timestamp with time zone AS removed_from_high_priority,
(epr.data ->> 'additions')::int AS additions,
(epr.data ->> 'deletions')::int AS deletions,
epr.data ->> 'mergeable' AS mergeable,
epr.data ->> 'last_commit_state' AS last_commit_state,
epr.data ->> 'last_commit_state_description' AS last_commit_state_description,
epr.data ->> 'last_commit_short_hash' AS last_commit_short_hash,
(epr.data ->> 'last_commit_pushed_date')::timestamp with time zone AS last_commit_pushed_date,
epr.data ->> 'bodyText' AS body,
(epr.data ->> 'mergedAt')::timestamp with time zone AS merged_at,
(epr.data ->> 'closedAt')::timestamp with time zone AS closed_at,
(epr.data ->> 'commit_count')::int AS commit_count
FROM etl_data epr
LEFT OUTER JOIN users author
ON epr.data ->> 'author_login' = author.login
)
INSERT
INTO pull_requests (id,
repository_id,
author_id,
"number",
"state",
title,
created_at,
updated_at,
is_high_priority,
added_to_high_priority,
removed_from_high_priority,
additions,
deletions,
mergeable,
last_commit_state,
last_commit_state_description,
last_commit_short_hash,
last_commit_pushed_date,
body,
merged_at,
closed_at,
commit_count)
SELECT *
FROM etl_data
ON CONFLICT ON CONSTRAINT pull_requests_unique_constraint DO UPDATE SET repository_id = excluded.repository_id,
author_id = excluded.author_id,
"number" = excluded.number,
"state" = excluded.state,
title = excluded.title,
created_at = excluded.created_at,
updated_at = excluded.updated_at,
is_high_priority = excluded.is_high_priority,
added_to_high_priority = excluded.added_to_high_priority,
removed_from_high_priority = excluded.removed_from_high_priority,
additions = excluded.additions,
deletions = excluded.deletions,
mergeable = excluded.mergeable,
last_commit_state = excluded.last_commit_state,
last_commit_state_description = excluded.last_commit_state_description,
last_commit_short_hash = excluded.last_commit_short_hash,
last_commit_pushed_date = excluded.last_commit_pushed_date,
body = excluded.body,
merged_at = excluded.merged_at,
closed_at = excluded.closed_at,
commit_count = excluded.commit_count
;""")
def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import unittest
from django.test import TestCase
from data_aggregator.cache import DataAggregatorGCSCache
class TestDataAggregatorGCSCache(TestCase):
def test_get_cache_expiration_time(self):
cache = DataAggregatorGCSCache()
# valid analytics urls
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/courses/1392640/analytics/student_summaries.json",
status=200),
0)
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/courses/1452786/analytics/student_summaries.json"
"?per_page=100",
status=200),
0)
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/courses/1399587/analytics/users/3562797/"
"assignments.json",
status=200),
0)
# bad status
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/courses/1399587/analytics/users/3562797/"
"assignments.json",
status=500),
None)
# valid subaccount report urls
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/accounts/sis_account_id:account_103831/analytics/"
"terms/sis_term_id:2021-spring/activity.json",
status=200),
0)
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/accounts/sis_account_id:uwcourse:seattle:"
"information-school:inform:ita:future/analytics/"
"terms/sis_term_id:2021-spring/statistics.json",
status=200),
0)
# bad status
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/accounts/sis_account_id:uwcourse:seattle:"
"information-school:inform:ita:future/analytics/"
"terms/sis_term_id:2021-spring/statistics.json",
status=500),
None)
# unknown service
self.assertEqual(
cache.get_cache_expiration_time(
"foobar",
"/api/v1/courses/1392640/analytics/",
status=200),
None)
# bad url
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v2/courses/1392640/analytics/",
status=200),
None)
# subaccount urls to ignore
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/accounts/sis_account_id:uwcourse/sub_accounts"
"?recursive=true&page=2&per_page=100",
status=200),
None)
if __name__ == "__main__":
unittest.main()
|
import cv2
def findContour(image):
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresholded = cv2.threshold(img, 15, 255, cv2.THRESH_BINARY)[1]
cntr, _ = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return len(cntr) |
../../ZenRoute/DisplayNetwork.py |
from .base import FunctionalTest
import time
from selenium.webdriver.common.keys import Keys
class CRITest(FunctionalTest):
def test_simple_cri_page(self):
# A client has brought in their dogs that got into a fight over a piece of chocolate and then
# both of them got hit by a car. It is an emergency and Marfalo needs to prepare IV fluids for them.
# By serendipitous coincidence the dev of VetCalc has just implemented this feature.
self.browser.get(self.server_url + '/calc/cri/simple/')
# The page has an input box for weight. Marfalo calculates the CRI dosage for an 11 kg dog.
inputbox = self.get_item_input_box()
inputbox.send_keys('25\n')
# The page updates and displays a table with the calculated dosages.
time.sleep(5)
calc_cri = self.browser.find_element_by_xpath("//tbody/tr/td[2]").text
self.assertAlmostEqual('0.083', calc_cri)
def test_advanced_cri_page(self):
# The operation to save the dogs has gone pear shaped and one of the dogs is undergoing heart failure.
# Dr. T is shitting his pants and screams at Marfalo to prepare dobutamine.
# Marfalo doesn't know what that is or what it does but luckily VetCalc has a page for that.
# This time Marfalo uses the advanced calculator.
self.browser.get(self.server_url + '/calc/cri/advanced/')
# Marfalo sees an input box for weight, desired fluid rate, volume of remaining fluids, and desired
# unit/kg/time infusion.
# Marfalo inputs the relevant information.
self.browser.find_element_by_id('id_weight').send_keys(7)
self.browser.find_element_by_id('id_rate').send_keys(3)
self.browser.find_element_by_id('id_volume').send_keys(250)
self.browser.find_element_by_id('id_infusion').send_keys(5)
self.browser.find_element_by_class_name('btn').send_keys(Keys.RETURN)
# The page returns a paragraph explaining how much dobutamine to add to the IV bag and at what rate the fluid
# and infusion will be administered
time.sleep(5)
calc_cri_adv = self.browser.find_element_by_id('id_dosages')
self.assertIn('14.0', calc_cri_adv.text)
self.assertIn('5', calc_cri_adv.text)
def test_post_cpr_calc(self):
# Marfalo has resuscitated the dogs with mouth to mouth CPR.
# Marfalo follows up with more drugs in the IV fluids.
self.browser.get(self.server_url + '/calc/cri/cpr/')
# This page is similar to the advanced CRI calculator, but has additional fields for dobutamine, dopamine,
# and lidocaine.
self.browser.find_element_by_id('id_weight').send_keys(0.5)
self.browser.find_element_by_id('id_rate').send_keys(1)
self.browser.find_element_by_id('id_volume').send_keys(10)
self.browser.find_element_by_id('id_dobutamine').send_keys(4)
self.browser.find_element_by_id('id_dopamine').send_keys(3)
self.browser.find_element_by_id('id_lidocaine').send_keys(60)
self.browser.find_element_by_class_name('btn').send_keys(Keys.RETURN)
# The page spews out the calculated dosages
time.sleep(5)
cri_cpr_calc = self.browser.find_element_by_id('id_cri_cpr_dosages')
values = ['0.096', '0.022', '0.9', '0.3']
for value in values:
self.assertIn(value, cri_cpr_calc.text)
def test_metoclopramide_calc(self):
# All the drugs that were administered have made the dogs puke. Dr. T prescribes more drugs to fix the problem.
# Marfalo needs to prepare metoclopramide.
self.browser.get(self.server_url + '/calc/cri/metoclopramide/')
# This page is similar to the advanced calculator, but has extra inputs for increasing the dosage.
self.browser.find_element_by_id('id_weight').send_keys(4.0)
self.browser.find_element_by_id('id_rate').send_keys(10)
self.browser.find_element_by_id('id_volume').send_keys(100)
self.browser.find_element_by_id('id_infusion').send_keys(4)
self.browser.find_element_by_class_name('btn').send_keys(Keys.RETURN)
# The numbers check out.
cri_metoclopramide_calc = self.browser.find_element_by_css_selector('p')
self.assertIn(0.067, cri_metoclopramide_calc)
self.assertIn(1.33, cri_metoclopramide_calc)
# The dogs are still puking, so Marfalo needs to increase the dosage.
self.browser.find_element_by_id('id_inc_volume').send_keys(100)
self.browser.find_element_by_id('id_inc_infusion').send_keys(1)
self.browser.find_element_by_class_name('btn').send_keys(Keys.RETURN)
# The dosages are updated.
cri_metoclopramide_inc_calc = self.browser.find_element_by_css_selector('p')
self.assertIn(0.2, cri_metoclopramide_inc_calc)
self.assertIn(4.5, cri_metoclopramide_inc_calc)
|
"""Implementation of state API.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import json
import os
import re
import zlib
import sqlite3
import tempfile
import fnmatch
import collections
import time
import six
from treadmill import admin
from treadmill import context
from treadmill import schema
from treadmill import utils
from treadmill import yamlwrapper as yaml
from treadmill import zknamespace as z
from treadmill import zkutils
_LOGGER = logging.getLogger(__name__)
def watch_running(zkclient, cell_state):
"""Watch running instances."""
@zkclient.ChildrenWatch(z.path.running())
@utils.exit_on_unhandled
def _watch_running(running):
"""Watch /running nodes."""
cell_state.running = set(running)
for name, item in six.viewitems(cell_state.placement):
if name in cell_state.running:
item['state'] = 'running'
return True
_LOGGER.info('Loaded running.')
def watch_finished(zkclient, cell_state):
"""Watch finished instances."""
@zkclient.ChildrenWatch(z.path.finished())
@utils.exit_on_unhandled
def _watch_finished(finished):
"""Watch /finished nodes."""
current = set(cell_state.finished)
target = set(finished)
for instance in target - current:
finished_data = zkutils.get_default(
zkclient,
z.path.finished(instance),
{}
)
cell_state.finished[instance] = finished_data
for instance in current - target:
del cell_state.finished[instance]
_LOGGER.info('Loaded finished.')
def watch_placement(zkclient, cell_state):
"""Watch placement."""
@zkclient.DataWatch(z.path.placement())
@utils.exit_on_unhandled
def _watch_placement(placement_data, _stat, event):
"""Watch /placement data."""
if placement_data is None or event == 'DELETED':
cell_state.placement.clear()
return True
try:
placement = json.loads(
zlib.decompress(placement_data).decode()
)
except zlib.error:
# For backward compatibility, remove once all cells use new format.
placement = yaml.load(placement_data)
updated_placement = {}
for row in placement:
instance, _before, _exp_before, after, expires = tuple(row)
if after is None:
state = 'pending'
else:
state = 'scheduled'
if instance in cell_state.running:
state = 'running'
updated_placement[instance] = {
'state': state,
'host': after,
'expires': expires,
}
cell_state.placement = updated_placement
return True
_LOGGER.info('Loaded placement.')
def watch_finished_history(zkclient, cell_state):
"""Watch finished historical snapshots."""
loaded_snapshots = {}
@zkclient.ChildrenWatch(z.FINISHED_HISTORY)
@utils.exit_on_unhandled
def _watch_finished_snapshots(snapshots):
"""Watch /finished.history nodes."""
start_time = time.time()
finished_history = cell_state.finished_history.copy()
for db_node in sorted(set(loaded_snapshots) - set(snapshots)):
_LOGGER.info('Unloading snapshot: %s', db_node)
for instance in loaded_snapshots.pop(db_node):
finished_history.pop(instance, None)
for db_node in sorted(set(snapshots) - set(loaded_snapshots)):
_LOGGER.info('Loading snapshot: %s', db_node)
loading_start_time = time.time()
loaded_snapshots[db_node] = []
data, _stat = zkclient.get(z.path.finished_history(db_node))
with tempfile.NamedTemporaryFile(delete=False, mode='wb') as f:
f.write(zlib.decompress(data))
try:
conn = sqlite3.connect(f.name)
cur = conn.cursor()
sql = 'SELECT name, data FROM finished ORDER BY timestamp'
for row in cur.execute(sql):
instance, data = row
if data:
data = yaml.load(data)
finished_history[instance] = data
loaded_snapshots[db_node].append(instance)
conn.close()
finally:
os.unlink(f.name)
_LOGGER.debug('Loading time: %s', time.time() - loading_start_time)
cell_state.finished_history = finished_history
_LOGGER.debug(
'Loaded snapshots: %d, finished: %d, finished history: %d, '
'time: %s', len(loaded_snapshots), len(cell_state.finished),
len(cell_state.finished_history), time.time() - start_time
)
return True
_LOGGER.info('Loaded finished snapshots.')
class CellState(object):
"""Cell state."""
__slots__ = (
'running',
'placement',
'finished',
'finished_history',
'watches',
)
def __init__(self):
self.running = []
self.placement = {}
self.finished = {}
self.finished_history = collections.OrderedDict()
self.watches = set()
def get_finished(self, rsrc_id):
"""Get finished state if present."""
data = self.finished.get(rsrc_id) or self.finished_history.get(rsrc_id)
if not data:
return
state = {
'name': rsrc_id,
'host': data['host'],
'state': data['state'],
'when': data['when']
}
if data['state'] == 'finished' and data['data']:
try:
rc, signal = map(int, data['data'].split('.'))
if rc > 255:
state['signal'] = signal
else:
state['exitcode'] = rc
except ValueError:
_LOGGER.warning('Unexpected finished state data for %s: %s',
rsrc_id, data['data'])
if data['state'] == 'aborted' and data['data']:
state['aborted_reason'] = data['data']
if data['state'] == 'terminated' and data['data']:
state['terminated_reason'] = data['data']
state['oom'] = data['state'] == 'killed' and data['data'] == 'oom'
return state
class API(object):
"""Treadmill State REST api."""
_FINISHED_LIMIT = 1000
@staticmethod
def _get_server_info():
"""Get server information"""
return admin.Server(context.GLOBAL.ldap.conn).list({
'cell': context.GLOBAL.cell
})
def __init__(self):
if context.GLOBAL.cell is not None:
zkclient = context.GLOBAL.zk.conn
cell_state = CellState()
_LOGGER.info('Initializing api.')
watch_running(zkclient, cell_state)
watch_placement(zkclient, cell_state)
watch_finished(zkclient, cell_state)
watch_finished_history(zkclient, cell_state)
def _list(match=None, finished=False, partition=None):
"""List instances state."""
_LOGGER.info('list: %s %s %s', match, finished, partition)
start_time = time.time()
if match is None:
match = '*'
if '#' not in match:
match += '#*'
match_re = re.compile(fnmatch.translate(os.path.normcase(match)))
def _match(name):
return match_re.match(os.path.normcase(name)) is not None
hosts = None
if partition:
hosts = [server['_id'] for server in API._get_server_info()
if server['partition'] == partition]
filtered = [
{'name': name, 'state': item['state'], 'host': item['host']}
for name, item in six.viewitems(cell_state.placement.copy())
if _match(name) and (hosts is None or item['host'] in hosts)
]
if finished:
filtered_finished = {}
def _filter_finished(iterable, limit=None):
added = 0
for name in iterable:
if not _match(name):
continue
if limit and added >= limit:
break
item = cell_state.get_finished(name)
if item and (hosts is None or item['host'] in hosts):
filtered_finished[name] = item
added += 1
_filter_finished(six.viewkeys(cell_state.finished.copy()))
_filter_finished(reversed(cell_state.finished_history),
self._FINISHED_LIMIT)
filtered.extend(sorted(six.viewvalues(filtered_finished),
key=lambda item: float(item['when']),
reverse=True)[:self._FINISHED_LIMIT])
res = sorted(filtered, key=lambda item: item['name'])
_LOGGER.debug('list time: %s', time.time() - start_time)
return res
@schema.schema({'$ref': 'instance.json#/resource_id'})
def get(rsrc_id):
"""Get instance state."""
if rsrc_id in cell_state.placement:
state = cell_state.placement[rsrc_id]
else:
state = cell_state.get_finished(rsrc_id)
if not state:
return None
res = {'name': rsrc_id}
res.update(state)
return res
self.list = _list
self.get = get
|
from sklearn import linear_model, metrics
from sklearn.model_selection import KFold, cross_val_predict, cross_val_score
from sklearn import datasets
import sklearn
import scipy.stats as stats
import numpy as np
import pandas as pd
# Part 1 - Accuracy and Recall
def logistic_regression():
breast_cancer_dataframe = pd.read_csv('./data/breast_cancer.csv', sep=',')
# Showing main classification metrics
logistic_regr = linear_model.LogisticRegression()
header_temp1 = ['Concavity1','Texture1','Symmetry1']
header_temp2 = ['Perimeter1','Area1','Compactness1']
header_temp3 = ['Perimeter1','Area1','Compactness1','Concavity1','Texture1','Symmetry1']
headers = [header_temp1, header_temp2, header_temp3]
calculated_list = []
for vars in headers:
x = breast_cancer_dataframe[vars].values.reshape(-1, len(vars))
y = breast_cancer_dataframe['Diagnosis']
kf = KFold(n_splits=10)
calculated_accuracy_list = []
for train_index, test_index in kf.split(x, y):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
logistic_regr.fit(x_train, y_train)
predict_regr = cross_val_predict(logistic_regr, x_test, y_test, cv=10)
calculated_accuracy = metrics.accuracy_score(y_test, predict_regr, normalize=True)
calculated_classification = metrics.classification_report(y_test, predict_regr, target_names=['Malignant Cancer', 'Benign Cancer'])
calculated_accuracy_list.append(calculated_accuracy)
calculated_accuracy_list.append(calculated_classification)
print("Classification Report")
print("Accuracy: ", calculated_accuracy)
print("Classification: ", calculated_classification)
calculated_list.append(calculated_accuracy_list)
# Part 2 - Population and T-test
def population_test():
brain_size_dataframe = pd.read_csv('./data/brain_size.csv', delimiter=';')
height_from_dataframe = brain_size_dataframe['Height'].values.astype(float)
height_from_dataframe_female = brain_size_dataframe[brain_size_dataframe['Gender'] == 'Female']['Height']
height_from_dataframe_male = brain_size_dataframe[brain_size_dataframe['Gender'] == 'Male']['Height']
groupby_gender = brain_size_dataframe.groupby('Gender')
for gender, value in groupby_gender['Height']:
print("Dataframe Average Height:", gender, value.mean())
# Average female height in Denmark - 168.7 cm (66.41 inches)
# Average female height in USA - 162 cm (63.77 inches)
# Source - http://www.averageheight.co/average-female-height-by-country
# Average male height in Denmark - 182.6 cm (71.88 inches)
# Average male height in USA - 175.2 (69.2 inches)
# Source - http://www.averageheight.co/average-male-height-by-country
print()
print("Dataframe average female height 65.76 inches compared to Denmark average female height 66.44 inches")
t_test, population_mean = stats.ttest_1samp(height_from_dataframe_female, 66)
print("T-test:", str(t_test), " Popmean: ", str(population_mean))
print()
print("Dataframe average female height 65.76 inches compared to USA average female height 65.77 inches")
t_test, population_mean = stats.ttest_1samp(height_from_dataframe_female, 65)
print("T-test:", str(t_test), " Popmean: ", str(population_mean))
print()
print("Dataframe average male height 71 inches compared to Denmark average male height 71.88 inches")
t_test, population_mean = stats.ttest_1samp(height_from_dataframe_male, 72)
print("T-test:", str(t_test), " Popmean: ", str(population_mean))
print()
print("Dataframe average male height 71 inches compared to USA average male height 69.2 inches")
t_test, population_mean = stats.ttest_1samp(height_from_dataframe_male, 69)
print("T-test:", str(t_test), " Popmean: ", str(population_mean))
print()
print("Dataframe male/female height compared to Denmark")
t_test, population_mean = stats.ttest_1samp(height_from_dataframe, 71)
print("T-test:", str(t_test), " Popmean: ", str(population_mean))
print()
print("Dataframe male/female height compared to USA")
t_test, population_mean = stats.ttest_1samp(height_from_dataframe, 68)
print("T-test:", str(t_test), " Popmean: ", str(population_mean))
def run():
# Part 2
logistic_regression()
# Part 2
population_test()
run()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 13 16:05:33 2018
Code to look at psf data of individual quadrants
@author: ppxee
"""
### Import required libraries ###
import matplotlib.pyplot as plt #for plotting
from astropy.io import fits #for handling fits
#from astropy.table import Table #for handling tables
import numpy as np #for handling arrays
#import math
#from astropy.stats import median_absolute_deviation
import vari_funcs #my module to help run code neatly
from photutils import CircularAperture, aperture_photometry
from astropy.coordinates import match_coordinates_sky
from astropy.coordinates import SkyCoord
from astropy import units as u
plt.close('all') #close any open plots
def radial_profile(data, center):
y, x = np.indices((data.shape)) #create coordinate grid
r = np.sqrt((x - center[0])**2 + (y - center[1])**2) #get radius values for grid
r = r.astype(np.int)
tbin = np.bincount(r.ravel(), data.ravel()) # counts number of times value
# of radius occurs in the psf
# weighted by the data
nr = np.bincount(r.ravel()) # counts number of radii values in psf
radialprofile = tbin / nr # as weighted is r*data then get profile by
# dividing by unweighted counts of r values.
return radialprofile
def get_psf(sem):
return fits.getdata('PSFs/'+sem+'_K_PSF.fits')
def quadrants(initdata,sem):
ira = initdata['X_IMAGE_'+sem]
idec = initdata['Y_IMAGE_'+sem]
### define bounds of quadrants ###
midra = 12450
middec = 13310
### create masks for quadrant ###
mask1 = ira < midra
mask2 = idec >= middec
quad1data = initdata[mask1*mask2]
mask1 = ira >= midra
mask2 = idec >= middec
quad2data = initdata[mask1*mask2]
mask1 = ira < midra
mask2 = idec < middec
quad3data = initdata[mask1*mask2]
mask1 = ira >= midra
mask2 = idec < middec
quad4data = initdata[mask1*mask2]
return quad1data, quad2data, quad3data, quad4data
def get_avg_flux(tbdata):
flux = vari_funcs.hflux4_stacks(tbdata)
flux = vari_funcs.normalise_flux(flux)
return np.nanmedian(flux, axis=0)
def psf_and_profile(quad, sem):
centre = [29,29]
psf = fits.getdata('PSFs/H/Quad_PSFs/cleaned_Kstars_'+sem+'_'+str(quad)+'_H_PSF.fits')
# if sem == '10B':
# psf = fits.getdata('PSFs/Quad_PSFs/'+sem+'_'+str(quad)+'_K_PSF.fits')
# else:
# psf = fits.getdata('PSFs/Quad_PSFs/extraq_'+sem+'_'+str(quad)+'_K_PSF.fits')
rp = vari_funcs.radial_profile(psf, centre)
return psf, rp, np.sqrt(rp)
semesters = ['06B', '07B', '08B', '09B', '10B', '11B', '12B']
hdr08B = fits.getheader('Images/UDS-DR11-K.mef.fits') # random year (same in all)
const = -hdr08B['CD1_1'] # constant that defines unit conversion for FWHM
r = np.arange(0,42,1) * const * 3600 #define radius values
centre = [29,29]
psf_data = fits.open('UDS_catalogues/DR11_stars_for_PSFs.fits')[1].data
sdata = fits.open('mag_flux_tables/H/stars_mag_flux_table_H_cleaned.fits')[1].data
#set up time variable for plot
t = np.linspace(2, 8, num=7)
for n, sem in enumerate(semesters):### Define coordinates ###
refcoord = SkyCoord(psf_data['ALPHA_J2000_1']*u.degree, psf_data['DELTA_J2000_1']*u.degree)
semcoord = SkyCoord(sdata['ALPHA_J2000_'+sem]*u.degree, sdata['DELTA_J2000_'+sem]*u.degree)
### Match catalogues and create new table ###
idx, d2d , _ = match_coordinates_sky(refcoord, semcoord) #match these 'good' stars to create table
if sem == '06B':
ids = sdata['NUMBER_06B'][idx]
else:
ids = np.intersect1d(ids, sdata['NUMBER_06B'][idx])
mask = np.isin(sdata['NUMBER_06B'], ids)
tempsdata = sdata[mask]
print(len(tempsdata['MAG_APER_'+sem][:,4]))
squad1data, squad2data, squad3data, squad4data = quadrants(tempsdata,'06B')
### get average FWHM ###
avgFWHM1 = np.zeros(len(semesters))
avgFWHM2 = np.zeros(len(semesters))
avgFWHM3 = np.zeros(len(semesters))
avgFWHM4 = np.zeros(len(semesters))
for n, sem in enumerate(semesters):
avgFWHM1[n] = np.nanmedian(squad1data['FWHM_WORLD_'+sem]) * 3600
avgFWHM2[n] = np.nanmedian(squad2data['FWHM_WORLD_'+sem]) * 3600
avgFWHM3[n] = np.nanmedian(squad3data['FWHM_WORLD_'+sem]) * 3600
avgFWHM4[n] = np.nanmedian(squad4data['FWHM_WORLD_'+sem]) * 3600
### get average flux ###
avgflux1 = get_avg_flux(squad1data)
avgflux2 = get_avg_flux(squad2data)
avgflux3 = get_avg_flux(squad3data)
avgflux4 = get_avg_flux(squad4data)
## get psfs, aper flux, and profiles ###
pixelr = (1.5/3600) / const
aperture = CircularAperture(centre, pixelr)
smallaperture = CircularAperture(centre, pixelr)
psf = {}
rp = {}
sqrtrp = {}
aperflux = {1:np.empty(len(semesters)),
2:np.empty(len(semesters)),
3:np.empty(len(semesters)),
4:np.empty(len(semesters))}
for m, sem in enumerate(semesters):
psf[sem] = {}
rp[sem] = {}
sqrtrp[sem] ={}
for n in [1,2,3,4]:
psf[sem][n], rp[sem][n], sqrtrp[sem][n] = psf_and_profile(n,sem)
### Determine flux within 3 arcsec apertures ###
phot = aperture_photometry(psf[sem][n], aperture)
aperflux[n][m] = phot['aperture_sum'][0]
### Plot the psfs ###
plt.figure(m+5)
plt.subplot(2,2,n)
plt.imshow(np.log(psf[sem][n]), vmax=-4.0, vmin=-20)
vari_funcs.no_ticks()
plt.title(sem+str(n))
# print(np.nanmin(np.log(psf[sem][n])))
### Plot FWHM curves ###
plt.figure(1, figsize=[9,6])
plt.subplot(221)
plt.plot(t,avgFWHM1,'o')
plt.ylim(ymax=0.95, ymin=0.780)
plt.xticks(t, semesters)
plt.ylabel('FWHM')
plt.xlabel('Semester')
plt.subplot(222)
plt.plot(t,avgFWHM2,'o')
plt.ylim(ymax=0.95, ymin=0.780)
plt.xticks(t, semesters)
plt.ylabel('FWHM')
plt.xlabel('Semester')
plt.subplot(223)
plt.plot(t,avgFWHM3,'o')
plt.ylim(ymax=0.95, ymin=0.780)
plt.xticks(t, semesters)
plt.ylabel('FWHM')
plt.xlabel('Semester')
plt.subplot(224)
plt.plot(t,avgFWHM4,'o')
plt.ylim(ymax=0.95, ymin=0.780)
plt.xticks(t, semesters)
plt.ylabel('FWHM')
plt.xlabel('Semester')
plt.tight_layout()
### Plot median flux curves ###
plt.figure(2, figsize=[9,6])
plt.subplot(221)
plt.plot(t,avgflux1,'o')
plt.ylabel('Median Flux of stars')
plt.ylim(ymax=1.03, ymin=0.97)
plt.xticks(t, semesters)
plt.xlabel('Semester')
plt.subplot(222)
plt.plot(t,avgflux2,'o')
plt.ylabel('Median Flux of stars')
plt.ylim(ymax=1.03, ymin=0.97)
plt.xticks(t, semesters)
plt.xlabel('Semester')
plt.subplot(223)
plt.plot(t,avgflux3,'o')
plt.ylabel('Median Flux of stars')
plt.ylim(ymax=1.03, ymin=0.97)
plt.xticks(t, semesters)
plt.xlabel('Semester')
plt.subplot(224)
plt.plot(t,avgflux4,'o')
plt.ylabel('Median Flux of stars')
plt.ylim(ymax=1.03, ymin=0.97)
plt.xticks(t, semesters)
plt.xlabel('Semester')
plt.tight_layout()
### Plot radial profiles ###
plt.figure(3, figsize=[12,9])
for sem in sqrtrp:
for n in sqrtrp[sem]:
plt.subplot(2,2,n)
plt.plot(r, sqrtrp[sem][n], label=sem)
plt.xlabel('Radius (arcsec)')
plt.ylabel('sqrt(Flux)')
plt.ylim(ymax=0.16, ymin=0)
plt.legend()
plt.tight_layout()
### Plot aper flux curves ###
plt.figure(4, figsize=[9,6])
for n in [1,2,3,4]:
plt.subplot(2,2,n)
plt.plot(t,aperflux[n],'o')
plt.ylabel('Aperture Flux of PSF')
plt.ylim(ymax=0.96, ymin=0.925)
plt.xticks(t, semesters)
plt.xlabel('Semester')
plt.tight_layout()
|
import torch
from collections import Counter
from typing import Iterable, Union, Dict
class DropoutMC(torch.nn.Module):
def __init__(self, p: float, activate=False):
super().__init__()
self.activate = activate
self.p = p
self.p_init = p
def forward(self, x: torch.Tensor):
return torch.nn.functional.dropout(
x, self.p, training=self.training or self.activate
)
class LockedDropoutMC(DropoutMC):
"""
Implementation of locked (or variational) dropout. Randomly drops out entire parameters in embedding space.
"""
def __init__(self, p: float, activate: bool = False, batch_first: bool = True):
super().__init__(p, activate)
self.batch_first = batch_first
def forward(self, x):
if self.training:
self.activate = True
# if not self.training or not self.p:
if not self.activate or not self.p:
return x
if not self.batch_first:
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.p)
else:
m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.p)
mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.p)
mask = mask.expand_as(x)
return mask * x
class WordDropoutMC(DropoutMC):
"""
Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space.
"""
def forward(self, x):
if self.training:
self.activate = True
# if not self.training or not self.p:
if not self.activate or not self.p:
return x
m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.p)
mask = torch.autograd.Variable(m, requires_grad=False)
return mask * x
MC_DROPOUT_SUBSTITUTES = {
"Dropout": DropoutMC,
"LockedDropout": LockedDropoutMC,
"WordDropout": WordDropoutMC,
}
def convert_to_mc_dropout(
model: torch.nn.Module, substitution_dict: Dict[str, torch.nn.Module] = None
):
for i, layer in enumerate(list(model.children())):
proba_field_name = "dropout_rate" if "flair" in str(type(layer)) else "p"
module_name = list(model._modules.items())[i][0]
layer_name = layer._get_name()
if layer_name in substitution_dict.keys():
model._modules[module_name] = substitution_dict[layer_name](
p=getattr(layer, proba_field_name), activate=False
)
else:
convert_to_mc_dropout(model=layer, substitution_dict=substitution_dict)
def activate_mc_dropout(
model: torch.nn.Module, activate: bool, random: float = 0.0, verbose: bool = False
):
for layer in model.children():
if isinstance(layer, DropoutMC):
if verbose:
print(layer)
print(f"Current DO state: {layer.activate}")
print(f"Switching state to: {activate}")
layer.activate = activate
if activate and random:
layer.p = random
if not activate:
layer.p = layer.p_init
else:
activate_mc_dropout(
model=layer, activate=activate, random=random, verbose=verbose
)
|
import numpy as np
import pandas as pd
from math import radians, cos, sin, asin, sqrt
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
def haversine(a, b):
r = 6371
lon1, lat1, lon2, lat2 = map(radians, [ a[1], a[0], b[1], b[0]])
dist_lon = lon2 - lon1
dist_lat = lat2 - lat1
hav = sin(dist_lat/2)**2 + cos(lat1) * cos(lat2) * sin(dist_lon/2)**2
return round((2 * r * asin( sqrt(hav) )))
def calcula_distancias(df):
return [list([int(b) for b in a]) for a in df[['latitude','longitude']].apply(lambda x: df.apply(lambda y: haversine(x, y),axis=1) ,axis=1).values]
def gera_time_windows(df):
return [(int(a[0]),int(a[1])) for a in df[['inicio_janela','termino_janela']].values]
def gera_volumetria(df):
return [int(a) for a in df.volumetria.values]
def gera_peso(df):
return [int(a) for a in df.peso.values]
def create_data_model():
df = pd.read_csv('data.csv')
data = {}
data['distance_matrix'] = calcula_distancias(df)
data['time_matrix'] = calcula_distancias(df)
data['time_windows'] = gera_time_windows(df)
data['demands'] = gera_volumetria(df)
data['demands_p'] = gera_peso(df)
data['vehicle_capacities'] = [55, 80, 100]
data['vehicle_capacities_peso'] = [50, 70, 90]
data['num_vehicles'] = 3
data['depot_capacity'] = 0
data['vehicle_load_time'] = 0
data['vehicle_unload_time'] = 0
data['depot'] = 0
return data
def print_solution(data, manager, routing, assignment):
time_dimension = routing.GetDimensionOrDie('Time')
total_time = 0
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
plan_output = 'Rota por Veiculo {}:\n'.format(vehicle_id)
while not routing.IsEnd(index):
time_var = time_dimension.CumulVar(index)
plan_output += '{0} Tempo({1},{2}) -> '.format(
manager.IndexToNode(index), assignment.Min(time_var),
assignment.Max(time_var))
index = assignment.Value(routing.NextVar(index))
time_var = time_dimension.CumulVar(index)
plan_output += '{0} Tempo({1},{2})\n'.format(manager.IndexToNode(index),
assignment.Min(time_var),
assignment.Max(time_var))
plan_output += 'Tempo da Rota: {}min\n'.format(
assignment.Min(time_var))
print(plan_output)
total_time += assignment.Min(time_var)
print('Tempo total de todas as rotas: {}min'.format(total_time))
def main():
data = create_data_model()
manager = pywrapcp.RoutingIndexManager(len(data['time_matrix']),
data['num_vehicles'], data['depot'])
routing = pywrapcp.RoutingModel(manager)
def time_callback(from_index, to_index):
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['time_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(time_callback)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
time = 'Time'
routing.AddDimension(
transit_callback_index,
0,
500,
False,
time)
time_dimension = routing.GetDimensionOrDie(time)
for location_idx, time_window in enumerate(data['time_windows']):
if location_idx == 0:
continue
index = manager.NodeToIndex(location_idx)
time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
time_dimension.CumulVar(index).SetRange(data['time_windows'][0][0],
data['time_windows'][0][1])
# Adiciona restrição de Volume
def demand_callback(from_index):
from_node = manager.IndexToNode(from_index)
return data['demands'][from_node]
demand_callback_index = routing.RegisterUnaryTransitCallback(
demand_callback)
routing.AddDimensionWithVehicleCapacity(
demand_callback_index,
0,
data['vehicle_capacities'],
True,
'Capacity')
# Adiciona restrição de Peso
def peso_callback(from_index):
from_node = manager.IndexToNode(from_index)
return data['demands_p'][from_node]
peso_callback_index = routing.RegisterUnaryTransitCallback(
peso_callback)
routing.AddDimensionWithVehicleCapacity(
demand_callback_index,
0,
data['vehicle_capacities_peso'],
True,
'Capacity')
solver = routing.solver()
intervals = []
for i in range(data['num_vehicles']):
intervals.append(
solver.FixedDurationIntervalVar(
time_dimension.CumulVar(routing.Start(i)),
data['vehicle_load_time'], 'depot_interval'))
intervals.append(
solver.FixedDurationIntervalVar(
time_dimension.CumulVar(routing.End(i)),
data['vehicle_unload_time'], 'depot_interval'))
for i in range(data['num_vehicles']):
routing.AddVariableMinimizedByFinalizer(
time_dimension.CumulVar(routing.Start(i)))
routing.AddVariableMinimizedByFinalizer(
time_dimension.CumulVar(routing.End(i)))
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
print_solution(data, manager, routing, assignment)
else:
print('No solution found !')
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
"""
Created on Thu May 23 16:03:29 2019
# Analysis Script for:
# Allocation of Spatial Attention (ASA) Vs Eye Movement Statistics (EMS),
# using visual behaviour upon the 'University' themed OSNS webpage.
#
# Tested and developed using python 3.7, in Ubuntu 18.04.2 'Bionic Beaver'
#
@author: Callum Woods
"""
#################################################
########### Import Modules
# basic underlying structures
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import os
# Model Evaluation Modules
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV, LeaveOneOut
from sklearn.preprocessing import MinMaxScaler, StandardScaler
# Models
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.naive_bayes import GaussianNB
# We convert integers to float often, and this throws a conversion warning:
import warnings
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
from sklearn import preprocessing
#Custom Classes and Methods
from modules import *
######### Establish file locations
## Note:
# Please set current working directory to the location of this file first!
cwd = Path(os.getcwd())
feature_dir = Path(cwd / 'Data')
result_dir = Path(cwd / 'Results')
# define preprocessing function:
def preprocess_tobii_aoi_ouput(df):
copy = df.copy()
# from sum and N metrics as these are not informative
copy = copy.loc[:, ~copy.columns.str.endswith('Sum')]
copy = copy.loc[:, ~ copy.columns.str.endswith('N')]
# remove metrics not within the central five elements
copy = copy.loc[:, ~ copy.columns.str.endswith('Banner_Mean')]
copy = copy.loc[:, ~ copy.columns.str.endswith('Cover_Mean')]
# remove the sum across all recordings
try: copy.drop('All Recordings', inplace=True)
except IndexError: pass
# total fixation duration metrics
tfd = copy.loc[:, copy.columns.str.startswith('Total Fixation')]
#tidy column names
tfd.columns = ['tfd_' + '_'.join(x.split('_')[-3:]) for x in tfd.columns]
# visit count metrics
visits = copy.loc[:, copy.columns.str.startswith('Visit')]
visits.columns = ['visits_' + '_'.join(x.split('_')[-3:]) for x in tfd.columns]
# time to first fixation metrics (only valid for university webpage)
ttff = copy.loc[:, copy.columns.str.startswith('Time to')]
ttff.columns = ['ttff_' + '_'.join(x.split('_')[-3:]) for x in ttff.columns]
return pd.concat([tfd, ttff, visits], axis= 1)
####################################
##### START ANALYSIS
####################################
####################################
labels = pd.read_csv(Path(cwd / 'Data' / 'personality_labels.csv'),
index_col=[0])
# Step One: Establish feature groups
Feature_Groups = {}
## Statistical Descriptions: Model One
df = pd.read_csv(Path(feature_dir / 'Preprocessed_Statistical_Features.csv'), index_col = [0])
Feature_Groups['EMS'] = df.loc[:, df.columns.str.startswith('University')]
## AOI Descriptions: Models Two
Feature_Groups['ASA'] = preprocess_tobii_aoi_ouput(pd.read_csv(
Path(feature_dir / 'University_Page_AOI_Metrics.tsv'),
sep='\t',
index_col= [0],
na_values= '-'))
# Step Two: Process Features
## Statistical Features
## Here we remove features that reflect non-oculomotor properties
## e.g. sampling frequency or event filter parameters
## --> Saccade Minimum Duration
## We also drop repetitions of information e.g. Number of Saccadic Amplitudes == Number of Fixations
statistical_features = Process_Statistical_Features(features= Feature_Groups['EMS'],
labels = labels,
pattern_seen_first = None)
statistical_features.preprocess_features()
Feature_Groups['EMS'] = statistical_features.preprocessed_features_
## AOI Features
## Drop participants with poor tracking data
Feature_Groups['ASA'] = Feature_Groups['ASA'].drop(['P003',
'P009',
'P017'])
# replacing missing time to first fixation value with 30
Feature_Groups['ASA'] = Feature_Groups['ASA'].fillna(30)
# Step Three: Run the machine learning pipeline
# Init the class instance
algorithm_evaluation = Evaluate_Algorithms(Feature_Groups, labels)
# Assign the scaling method
algorithm_evaluation.scaler_ = preprocessing.StandardScaler()
# Assign the catagorization method
algorithm_evaluation.binariser_ = algorithm_evaluation.binarise_methods_['equal_width']
# Investigate the algorithms preloaded
algorithm_evaluation.algorithms_
# Investigate the algorithms hyperparameter space to be searched
algorithm_evaluation.all_parameters_
# add naive bayes classifier
algorithm_evaluation.algorithms_['naive_bayes'] = GaussianNB()
algorithm_evaluation.all_parameters_['naive_bayes'] = {'var_smoothing':[float(1e-09)]}
# print baseline accuracy scores for each class
algorithm_evaluation.print_baseline_accuracy()
## Label Metrics
dichotimised_labels = algorithm_evaluation.binariser_.fit_transform(labels)
# get metrics describing distribution of personality scores per class
count = 0
label_category = {}
for col in labels.columns:
label_category[labels.columns[count] + '_high'] = labels[col].values[dichotimised_labels.T[count].astype('bool')]
label_category[labels.columns[count] + '_low'] = labels[col].values[~ dichotimised_labels.T[count].astype('bool')]
count += 1
for key, value in label_category.items():
print(f"{key} mean = {value.mean()}, \n"
f"{key} std = {value.std()} \n ----")
##### Run algorithm evaluation pipeline
algorithm_results = algorithm_evaluation.algorithm_loo_pipeline()
# Record results
print(algorithm_evaluation.algorithm_accuracy_.T)
#algorithm_evaluation.algorithm_accuracy_.T.to_csv('algorithm_accuracy_by_model.csv')
#################################################
########## Step Four: Investigate Results
top_combinations = algorithm_evaluation.algorithm_accuracy_.idxmax()
for col in algorithm_evaluation.algorithm_accuracy_.columns:
print("----- \n"
f"Top 5 algorithm and feature group combinations for {col} \n"
f"{algorithm_evaluation.algorithm_accuracy_.sort_values(by = col, ascending=False)[col][:5]}"
" ----- \n")
# Investigate what parameters are best for the best combinations
algorithm_summary = algorithm_evaluation.get_algorithm_summary()
#algorithm_evaluation.get_algorithm_summary()
#################################################
########## Step Five: Build Best Models
## Manual visualisation of results is required
# Here, the 'algorithm_summary' attribute tells us that:
# Label | Algorithm| Feature Group | Param
# Agreeableness | 'svm' | One | C = 0.01
# Conscientiousness| 'knn' | Two | k=1
# Extroversion | 'knn' | Two | k=1
# Neuroticism | 'ridge' | One | alpha = 0.1
# Openness | 'knn' | One | k = 3
## However, we are also interested not just in the best model, but which
# feature groups can predict what traits above chance. So, we create:
"""
| Label | Algorithm | Feature Group | Expected Accuracy |
|-------------------|--------------------|---------------------------------|-------------------|
| Conscientiousness | knn (k=1) | ‘Spatial attention statistics’ | 0.685714 |
| | Knn (k=1) | ‘Eye movement statistics’ | 0.628571 |
| | Naive_Bayes | 'Spatial attention statistics' | 0.657143 |
|-------------------|--------------------|---------------------------------|-------------------|
| Extroversion | Knn (k=1) | ‘Spatial attention statistics’ | 0.571429 |
|-------------------|--------------------|---------------------------------|-------------------|
| Neuroticism | Ridge (alpha =0.1) | ‘Eye movement statistics’ | 0.714286 |
| | Svm (C = 4) | ‘Eye movement statistics’ | 0.657143 |
| | Naive_Bayes | 'Eye movement statistics' | 0.657143 |
|-------------------|--------------------|---------------------------------|-------------------|
| Openness | Knn (k=3) | ‘Eye movement statistics’ | 0.714286 |
| | Naive_Bayes | 'Eye movement statistics' | 0.571429 |
"""
algorithm_summary['Best_Models'] = [
SVC(C=0.01, kernel= 'linear'), # note we don't enable probability here,
KNeighborsClassifier(n_neighbors=1), # as decision function is fine for AUROC
KNeighborsClassifier(n_neighbors=1),
RidgeClassifier(alpha=0.1),
KNeighborsClassifier(n_neighbors=3)
]
# Additional models to evaluate
new_entries = pd.DataFrame(columns= algorithm_summary.columns)
# extra conscientiousness models: Knn and Bayes
consc_knn = [('knn', 'EMS'),
0,
0.629,
KNeighborsClassifier(n_neighbors=1)]
consc_bayes = [('naive_bayes', 'ASA'),
0,
0.657,
GaussianNB()]
extra_consc_models = pd.DataFrame(
data = [consc_knn, consc_bayes],
index= ['Conscientiousness', 'Conscientiousness'],
columns = algorithm_summary.columns
)
new_entries = new_entries.append(extra_consc_models)
# extra neuroticism models: svm and bayes
neurot_svm = [('svm', 'EMS'),
0,
0.657,
SVC(C=4, kernel = 'linear')]
neurot_bayes = [('naive_bayes', 'EMS'),
0,
0.657,
GaussianNB()]
extra_neurot_models = pd.DataFrame(
data = [neurot_svm, neurot_bayes],
index= ['Neuroticism', 'Neuroticism'],
columns = algorithm_summary.columns
)
new_entries = new_entries.append(extra_neurot_models)
# extra openness models: bayes
openness_bayes = [('naive_bayes', 'EMS'),
0,
0.571,
GaussianNB()]
extra_openness_models = pd.DataFrame(
data = [openness_bayes],
columns = algorithm_summary.columns,
index= ['Openness']
)
new_entries = new_entries.append(extra_openness_models)
# add these to the algorithm summary
algorithm_summary = algorithm_summary.append(new_entries)
# update with 'Best_Models' column
algorithm_evaluation.algorithm_summary_ = algorithm_summary
## Model Evaluation
final_models = Evaluate_Models(feature_groups= Feature_Groups,
labels= labels,
algorithm_summary = algorithm_summary)
#final_models.binarise_methods_
final_models.scaler_ = preprocessing.StandardScaler()
final_models.binariser_ = final_models.binarise_methods_['equal_width']
final_models.model_loo_pipeline()
## ROC curve for the two conscientiousness models
final_models.roc_curve(X = Feature_Groups['ASA'],
y= labels['Conscientiousness'],
classifier= KNeighborsClassifier(n_neighbors=1),
n_folds = 10,
title = "Conscientiousness: ASA Metrics with k-NN (k=1)")
final_models.roc_curve(X = Feature_Groups['EMS'],
y= labels['Conscientiousness'],
classifier= KNeighborsClassifier(n_neighbors=1),
n_folds = 10,
title = "Conscientiousness: EMS Metrics with k-NN (k=1)")
final_models.roc_curve(X = Feature_Groups['EMS'],
y = labels['Neuroticism'],
classifier= RidgeClassifier(alpha= 0.1),
n_folds = 10,
title = "Neuroticism: EMS Metrics with Ridge (alpha=0.1)"
)
final_models.roc_curve(X = Feature_Groups['EMS'],
y = labels['Openness'],
classifier= KNeighborsClassifier(n_neighbors=3),
n_folds = 10,
title = "Openness: EMS Metrics with Ridge (alpha=0.1)"
)
|
from multiprocessing import Process
import time
class ClockProcess(Process):
#重写初始化方法
def __init__(self,a):
super().__init__(target=a)
#Process.__init__(self)
#self.interval=interval
#重写run方法
def run(interval):
print('子进程开始执行{}'.format(time.ctime()))
time.sleep(interval)
print('子进程结束执行{}'.format(time.ctime()))
if __name__=='__main__':
p=ClockProcess(run(3))
p.start() #调用run方法
p.join()
print('主进程结束')
|
"""
count = 0
while True:
i = input("入力>")
if i == "":
print("終了します")
break
print(f"入力:{i}")
count += 1
print(count)
"""
"""
while True:
print("よろしいですか?")
i = input("入力>")
if i == "YES" or i == "はい":
print("終了します")
break
print(f"入力:{i}")
"""
"""
import turtle
t = turtle.Pen()
while True:
i = input("turtle>")
if i == "forward":
t.forward(100)
elif i == "left":
t.left(90)
elif i == "right":
t.right(90)
elif i == "turn":
t.left(180)
elif i == "quit":
break
"""
"""
import random
value = random.choice(range(0, 37))
print(value)
"""
"""
import random
value = random.choice([1, 2, 3, 4, 4, 5, 5, 6, 6, 6])
print(value)
"""
import random
number = random.choice(range(1, 100))
count = 0
while True:
value = int(input("数字?"))
count += 1
if value == number:
print("正解です")
print(f"入力回数:{count}回")
break
elif value > number:
print("大きすぎます")
else:
print("小さすぎます")
# if value - number == 1 or value - number == -1:
if abs(value - number) == 1: # abs は絶対値
print("1だけ違います")
if value % number == 0:
print("正解の数字で割り切れる")
if number % value == 0:
print("正解の数字の倍数")
# |
#!/usr/bin/env python3
"""
Dispense a fixed volume (by time) of water from the spouts.
"""
from reach.backends.raspberrypi import Utilities
rpi = Utilities(reward_duration=0.070)
rpi.enable_leds()
rpi.dispense_reward_volume()
rpi.cleanup()
|
import inspect
from collections import OrderedDict
from django.db import models
from rest_framework.serializers import ValidationError
from landscapesim.common import config
from landscapesim.common.types import default_num_to_empty_or_int, bool_to_empty_or_yes
from landscapesim.serializers import scenarios as serializers
class BaseImportSerializer(object):
"""
Base serializer for transforming serialized data into csv sheets.
We take inspiration from rest_framework's BaseSerializer and use our own mapping routine,
defined in landscapesim.io.config
"""
drf_serializer = None
sheet_map = ()
def __init__(self, initial_data):
self._initial_data = initial_data
self._many = type(initial_data) is list
def _ignore(self, val):
return val in ['id', 'scenario']
def _transform(self, data):
"""
Transforms the names in the model to the names used in STSim.
:return: An OrderedDict of initial_data with fieldnames ordered how they should be imported.
"""
transformed_data = OrderedDict()
# Fill fields from validated data
for internal_key, ssim_name in self.sheet_map:
for config_key, config_data in data.items():
if self._ignore(config_key):
continue
if config_key == internal_key:
transformed_data[ssim_name] = config_data if config_data is not None else ''
# Convert pk-related fields to the value of it's name field
is_django_model = inspect.isclass(type(transformed_data[ssim_name])) and \
issubclass(type(transformed_data[ssim_name]), models.Model)
# Handle type tranforms
if is_django_model:
transformed_data[ssim_name] = transformed_data[ssim_name].name
# Convert bools to Yes or empty string
if type(transformed_data[ssim_name]) is bool:
transformed_data[ssim_name] = bool_to_empty_or_yes(transformed_data[ssim_name])
# Filter default integers and floats (-1) to empty string
elif type(transformed_data[ssim_name]) is int:
transformed_data[ssim_name] = default_num_to_empty_or_int(transformed_data[ssim_name])
break
if len(list(transformed_data.keys())) != len(self.sheet_map):
# Determine missing keys
missing = [x[0] for x in self.sheet_map if x[0] not in transformed_data.keys()]
raise ValidationError("Invalid conversion occured. Didn't satisfy all values stored in this serializer's "
"sheet_map configuration. Missing: {}".format(', '.join(missing)))
return transformed_data
@property
def validated_data(self):
"""
Validates and transforms (lists of) data to match importable csv data.
:return:
"""
deserialized_data = self.drf_serializer(data=self._initial_data, many=self._many)
if deserialized_data.is_valid(True):
deserialized_data = deserialized_data.validated_data
if self._many:
return [self._transform(x) for x in deserialized_data]
else:
return self._transform(deserialized_data)
else:
raise ValidationError("Error deserializing drf_serializer.")
class DistributionValueImport(BaseImportSerializer):
drf_serializer = serializers.DistributionValueSerializer
sheet_map = config.DISTRIBUTION_VALUE
class OutputOptionImport(BaseImportSerializer):
drf_serializer = serializers.OutputOptionSerializer
sheet_map = config.OUTPUT_OPTION
class RunControlImport(BaseImportSerializer):
drf_serializer = serializers.RunControlSerializer
sheet_map = config.RUN_CONTROL
class DeterministicTransitionImport(BaseImportSerializer):
drf_serializer = serializers.DeterministicTransitionSerializer
sheet_map = config.DETERMINISTIC_TRANSITION
class TransitionImport(BaseImportSerializer):
drf_serializer = serializers.TransitionSerializer
sheet_map = config.TRANSITION
class InitialConditionsNonSpatialImport(BaseImportSerializer):
drf_serializer = serializers.InitialConditionsNonSpatialSerializer
sheet_map = config.INITIAL_CONDITIONS_NON_SPATIAL
class InitialConditionsNonSpatialDistributionImport(BaseImportSerializer):
drf_serializer = serializers.InitialConditionsNonSpatialDistributionSerializer
sheet_map = config.INITIAL_CONDITIONS_NON_SPATIAL_DISTRIBUTION
class InitialConditionsSpatialImport(BaseImportSerializer):
drf_serializer = serializers.InitialConditionsSpatialSerializer
sheet_map = config.INITIAL_CONDITIONS_SPATIAL
class TransitionTargetImport(BaseImportSerializer):
drf_serializer = serializers.TransitionTargetSerializer
sheet_map = config.TRANSITION_TARGET
class TransitionMultiplierValueImport(BaseImportSerializer):
drf_serializer = serializers.TransitionMultiplierValueSerializer
sheet_map = config.TRANSITION_MULTIPLIER_VALUE
class TransitionSizeDistributionImport(BaseImportSerializer):
drf_serializer = serializers.TransitionSizeDistributionSerializer
sheet_map = config.TRANSITION_SIZE_DISTRIBUTION
class TransitionSizePrioritizationImport(BaseImportSerializer):
drf_serializer = serializers.TransitionSizePrioritizationSerializer
sheet_map = config.TRANSITION_SIZE_PRIORITIZATION
class TransitionSpatialMultiplierImport(BaseImportSerializer):
drf_serializer = serializers.TransitionSpatialMultiplierSerializer
sheet_map = config.TRANSITION_SPATIAL_MULTIPLIER
class StateAttributeValueImport(BaseImportSerializer):
drf_serializer = serializers.StateAttributeValueSerializer
sheet_map = config.STATE_ATTRIBUTE_VALUE
class TransitionAttributeValueImport(BaseImportSerializer):
drf_serializer = serializers.TransitionAttributeValueSerializer
sheet_map = config.TRANSITION_ATTRIBUTE_VALUE
class TransitionAttributeTargetImport(BaseImportSerializer):
drf_serializer = serializers.TransitionAttributeTargetSerializer
sheet_map = config.TRANSITION_ATTRIBUTE_TARGET
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 12:41:22 2020
@author: JGSHIN
"""
import cv2 as cv
import os
import numpy as np
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.models import Model
#%%
#def img_clip()
def get_model(layer = 'fc2'):
base_model = VGG16(weights = 'imagenet', include_top = True)
model = Model(inputs = base_model.input, outputs = base_model.get_layer(layer).output)
return model
"""
from google.colab import drive
drive.mount('\\content\\drive')
"""
def get_files(path_to_files, size):
fn_imgs = []
files = [file for file in os.listdir(path_to_files)]
for file in files:
img = cv.resize(cv.imread(path_to_files + file), size)
fn_imgs.append([file, img])
return dict(fn_imgs)
def feature_vector(img_arr, model):
if img_arr.shape[2] == 1:
img_arr = img_arr.repeat(3, axis = 2)
#(1, 224, 224, 3)
arr4d = np.expand_dims(img_arr, axis = 0)
arr4d_pp = preprocess_input(arr4d)
return model.predict(arr4d_pp)[0, :]
def feature_vectors(imgs_dict, model):
f_vect = {}
for fn, img in imgs_dict.items():
f_vect[fn] = feature_vector(img, model)
return f_vect
|
# -*- coding: utf-8 -*-
"""
@created on: 02/16/20,
@author: Shreesha N,
@version: v0.0.1
@system name: badgod
Description:
..todo::
"""
from alcoaudio.experiments.convnet_runner import ConvNetRunner
from alcoaudio.utils.class_utils import AttributeDict
import json
import argparse
def parse():
parser = argparse.ArgumentParser(description="alcoaudio_configs")
parser.add_argument('--train_net', type=bool)
parser.add_argument('--test_net', type=bool)
parser.add_argument('--configs_file', type=str)
args = parser.parse_args()
return args
def run(args):
network = ConvNetRunner(args=args)
if args.train_net:
network.train()
if args.test_net:
network.test()
if __name__ == '__main__':
args = parse().__dict__
configs = json.load(open(args['configs_file']))
configs = {**configs, **args}
configs = AttributeDict(configs)
run(configs)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 21:44:50 2018
@author: kwctl
"""
import sys
import cv2
from PyQt5.QtWidgets import QApplication, QDialog
from PyQt5.uic import loadUi
from PyQt5.QtCore import QTimer
from PyQt5 import QtCore
from PyQt5.QtGui import QImage, QPixmap
import math
import numpy as np
import pyautogui
check1=0
check2=0
check1_1=0
f=0
class wook(QDialog):
def __init__(self):
super(wook,self).__init__()
loadUi('C:\\Users\\kwctl\\Desktop\\wook1.ui',self)
self.image=None
self.processedImage=None
self.startButton.clicked.connect(self.start_webcam)
self.stopButton.clicked.connect(self.stop_webcam)
self.cannyButton.toggled.connect(self.canny_webcam)
self.cannyButton.setCheckable(True)
self.canny_Enabled=False
def canny_webcam(self,status):
if status:
self.canny_Enabled=True
self.cannyButton.setText('Canny stop')
else:
self.canny_Enabled=True
self.cannyButton.setText('Canny')
def start_webcam(self):
self.capture=cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,640)
self.timer=QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(30)
def update_frame(self):
ret,self.image=self.capture.read()
frame=self.image
cv2.rectangle(frame,(0,200),(300,480),(0,255,0),0)
crop_image = frame[200:480, 0:300]
blur = cv2.GaussianBlur(crop_image, (3,3), 0)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
mask2 = cv2.inRange(hsv, np.array([0,20,50]), np.array([20,255,180]))
kernel = np.ones((5,5))
dilation = cv2.dilate(mask2, kernel, iterations = 2)
erosion = cv2.erode(dilation, kernel, iterations = 3)
filtered = cv2.GaussianBlur(erosion, (3,3), 0)
ret,thresh = cv2.threshold(filtered, 127, 255, 0)
# Find contours
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
try:
# Find contour with maximum area
contour = max(contours, key = lambda x: cv2.contourArea(x))
M=cv2.moments(contour)
cx=int(M['m10']/M['m00'])
cy=int(M['m01']/M['m00'])
centroid=(cx,cy+200)
topmost=tuple(contour[contour[:,:,1].argmin()][0])
px=topmost[0]
py=topmost[1]+200
topmost=(px,py)
cv2.circle(frame,centroid,5,[0,255,255],-1)
cv2.circle(frame,topmost,5,[0,255,255],-1)
cv2.line(frame,centroid,topmost,[255,0,0],4)
# Create bounding rectangle around the contour
x,y,w,h = cv2.boundingRect(contour)
cv2.rectangle(crop_image,(x,y),(x+w,y+h),(0,0,255),0)
cx=centroid[0]
cy=centroid[1]+200
# Find convex hull
hull = cv2.convexHull(contour)
# Draw contour
drawing = np.zeros(crop_image.shape,np.uint8)
cv2.drawContours(drawing,[contour],-1,(0,255,0),0)
cv2.drawContours(drawing,[hull],-1,(0,0,255),0)
# Find convexity defects
hull = cv2.convexHull(contour, returnPoints=False)
defects = cv2.convexityDefects(contour,hull)
# Use cosine rule to find angle of the far point from the start and end point i.e. the convex points (the finger
# tips) for all defects
count_defects = 0
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(contour[s][0])
end = tuple(contour[e][0])
far = tuple(contour[f][0])
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
angle = (math.acos((b**2 + c**2 - a**2)/(2*b*c))*180)/3.14
# if angle > 90 draw a circle at the far point
if angle <= 90:
count_defects += 1
cv2.circle(crop_image,far,1,[0,0,255],-1)
cv2.line(crop_image,start,end,[0,255,0],2)
# Print number of fingers
px=int(px*1920/300)
py=py-200
py=int(py*1080/280)
if count_defects == 0 :
cv2.putText(frame,"ONE", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
pyautogui.moveTo(px,py,0.15)
print((px,py))
elif count_defects == 1:
cv2.putText(frame,"TWO", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 2:
cv2.putText(frame,"THREE", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 3 :
cv2.putText(frame,"FOUR", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
check1=1
elif count_defects == 4:
cv2.putText(frame,"FIVE", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
else:
pass
except:
pass
self.image=frame
self.displayImage(self.image,1)
if(self.canny_Enabled):
gray=cv2.cvtColor(self.image,cv2.COLOR_BGR2GRAY) if len(self.image.shape)>=3 else self.image
self.processedImage=cv2.Canny(gray,100,200)
self.displayImage(self.processedImage,2)
def stop_webcam(self):
self.timer.stop()
pass
def displayImage(self,img,window=1):
qformat=QImage.Format_Indexed8
if len(img.shape)==3: #[0]=rows, [1]=cols [2]=chaneels
if img.shape[2]==4:
qformat=QImage.Format_RGBA8888
else:
qformat=QImage.Format_RGB888
outImage=QImage(img,img.shape[1],img.shape[0],img.strides[0],qformat)
#BGR>>RGB
outImage=outImage.rgbSwapped()
if window ==1:
self.imgLabel.setPixmap(QPixmap.fromImage(outImage))
self.imgLabel.setScaledContents(True)
if window ==2:
self.processedLabel.setPixmap(QPixmap.fromImage(outImage))
self.processedLabel.setScaledContents(True)
if __name__ =='__main__':
app=QApplication(sys.argv)
window=wook()
window.setWindowTitle('fuck')
window.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 15 12:21:58 2020
@author: adeela
"""
|
class Solution(object):
def validMountainArray(self, arr):
n = len(arr)
if n<2:
return False
inc = 0
dec = 0
for i in range(n-1):
if arr[i] == arr[i+1]:
return False
elif arr[i] < arr[i+1]:
if dec != 0:
return False
inc = 1
else:
if inc != 1:
return False
dec = 1
if dec == 1 and inc == 1:
return True
return False
class Solution(object):
def validMountainArray(self, A):
N = len(A)
i = 0
# walk up
while i+1 < N and A[i] < A[i+1]:
i += 1
# peak can't be first or last
if i == 0 or i == N-1:
return False
# walk down
while i+1 < N and A[i] > A[i+1]:
i += 1
return i == N-1 |
from django.conf.urls import include, url
from .views import (
IssuesView, IssueView, IssueSummaryView, IssuePreviewView,
CreatedIssueView,
SimpleAjaxIssueView, FilesAjaxIssueView, CommitAjaxIssueView,
IssueEditState, IssueEditTitle, IssueEditBody,
IssueEditMilestone, IssueEditAssignees, IssueEditLabels, IssueEditProjects,
IssueEditRequestedReviewers,
IssueCreateView, AskFetchIssueView,
IssueCommentCreateView, PullRequestCommentCreateView, CommitCommentCreateView,
IssueCommentView, PullRequestCommentView, CommitCommentView,
IssueCommentEditView, PullRequestCommentEditView, CommitCommentEditView,
IssueCommentDeleteView, PullRequestCommentDeleteView, CommitCommentDeleteView,
IssuesFilterCreators, IssuesFilterAssigned, IssuesFilterClosers, IssuesFilterMentioned,
IssuesFilterRequestedReviewer,
PullRequestReviewCreateView, PullRequestReviewView, PullRequestReviewEditView,
IssueDeletePRBranch, CommitAjaxCompareView, CompareFilesAjaxIssueView,
)
urlpatterns = [
url(r'^$', IssuesView.as_view(), name=IssuesView.url_name),
url(r'^multiselect/', include('gim.front.repository.issues.multiselect.urls', namespace='multiselect')),
# deferrable filters
url(r'^filter/creators/', IssuesFilterCreators.as_view(), name=IssuesFilterCreators.url_name),
url(r'^filter/assigned/', IssuesFilterAssigned.as_view(), name=IssuesFilterAssigned.url_name),
url(r'^filter/requested_reviewer/', IssuesFilterRequestedReviewer.as_view(), name=IssuesFilterAssigned.url_name),
url(r'^filter/closers/', IssuesFilterClosers.as_view(), name=IssuesFilterClosers.url_name),
url(r'^filter/mentioned/', IssuesFilterMentioned.as_view(), name=IssuesFilterMentioned.url_name),
# issue views
url(r'^(?P<issue_number>\d+)/$', IssueView.as_view(), name=IssueView.url_name),
url(r'^(?P<issue_number>\d+)/summary/$', IssueSummaryView.as_view(), name=IssueSummaryView.url_name),
url(r'^(?P<issue_number>\d+)/preview/$', IssuePreviewView.as_view(), name=IssuePreviewView.url_name),
url(r'^(?P<issue_number>\d+)/ask-fetch/$', AskFetchIssueView.as_view(), name=AskFetchIssueView.url_name),
url(r'^(?P<issue_number>\d+)/base-branch/delete/$', IssueDeletePRBranch.as_view(), name=IssueDeletePRBranch.url_name),
# parts
url(r'^(?P<issue_number>\d+)/(?:state/(?P<state_sha>[a-f0-9]{40})/)?files/$', FilesAjaxIssueView.as_view(), name='issue.files'),
url(r'^(?P<issue_number>\d+)/state/(?P<state_sha>[a-f0-9]{40})/files/compare/(?P<other_state_sha>[a-f0-9]{40})/$', CompareFilesAjaxIssueView.as_view(), name='issue.files.compare'),
url(r'^(?P<issue_number>\d+)/commits/$', SimpleAjaxIssueView.as_view(ajax_template_name='front/repository/issues/commits/include_issue_commits.html'), name='issue.commits'),
url(r'^(?P<issue_number>\d+)/review/$', SimpleAjaxIssueView.as_view(ajax_template_name='front/repository/issues/comments/include_pr_review.html'), name='issue.review'),
url(r'^(?P<issue_number>\d+)/commit/(?P<commit_sha>[a-f0-9]{40})/$', CommitAjaxIssueView.as_view(), name=CommitAjaxIssueView.url_name),
# edit views
url(r'^(?P<issue_number>\d+)/edit/state/$', IssueEditState.as_view(), name=IssueEditState.url_name),
url(r'^(?P<issue_number>\d+)/edit/title/$', IssueEditTitle.as_view(), name=IssueEditTitle.url_name),
url(r'^(?P<issue_number>\d+)/edit/body/$', IssueEditBody.as_view(), name=IssueEditBody.url_name),
url(r'^(?P<issue_number>\d+)/edit/milestone/$', IssueEditMilestone.as_view(), name=IssueEditMilestone.url_name),
url(r'^(?P<issue_number>\d+)/edit/assignees/$', IssueEditAssignees.as_view(), name=IssueEditAssignees.url_name),
url(r'^(?P<issue_number>\d+)/edit/requested_reviewers/$', IssueEditRequestedReviewers.as_view(), name=IssueEditRequestedReviewers.url_name),
url(r'^(?P<issue_number>\d+)/edit/labels/$', IssueEditLabels.as_view(), name=IssueEditLabels.url_name),
url(r'^(?P<issue_number>\d+)/edit/projects/$', IssueEditProjects.as_view(), name=IssueEditProjects.url_name),
# create views
url(r'^create/$', IssueCreateView.as_view(), name=IssueCreateView.url_name),
url(r'^created/(?P<issue_pk>\d+)/$', CreatedIssueView.as_view(), name=CreatedIssueView.url_name),
# comments
url(r'^(?P<issue_number>\d+)/comment/add/$', IssueCommentCreateView.as_view(), name=IssueCommentCreateView.url_name),
url(r'^(?P<issue_number>\d+)/comment/(?P<comment_pk>\d+)/$', IssueCommentView.as_view(), name=IssueCommentView.url_name),
url(r'^(?P<issue_number>\d+)/comment/(?P<comment_pk>\d+)/edit/$', IssueCommentEditView.as_view(), name=IssueCommentEditView.url_name),
url(r'^(?P<issue_number>\d+)/comment/(?P<comment_pk>\d+)/delete/$', IssueCommentDeleteView.as_view(), name=IssueCommentDeleteView.url_name),
# reviews
url(r'^(?P<issue_number>\d+)/reviews/add/$', PullRequestReviewCreateView.as_view(), name=PullRequestReviewCreateView.url_name),
url(r'^(?P<issue_number>\d+)/reviews/(?P<review_pk>\d+)/$', PullRequestReviewView.as_view(), name=PullRequestReviewView.url_name),
url(r'^(?P<issue_number>\d+)/reviews/(?P<review_pk>\d+)/edit/$', PullRequestReviewEditView.as_view(), name=PullRequestReviewEditView.url_name),
# code comments
url(r'^(?P<issue_number>\d+)/code-comment/add/$', PullRequestCommentCreateView.as_view(), name=PullRequestCommentCreateView.url_name),
url(r'^(?P<issue_number>\d+)/code-comment/(?P<comment_pk>\d+)/$', PullRequestCommentView.as_view(), name=PullRequestCommentView.url_name),
url(r'^(?P<issue_number>\d+)/code-comment/(?P<comment_pk>\d+)/edit/$', PullRequestCommentEditView.as_view(), name=PullRequestCommentEditView.url_name),
url(r'^(?P<issue_number>\d+)/code-comment/(?P<comment_pk>\d+)/delete/$', PullRequestCommentDeleteView.as_view(), name=PullRequestCommentDeleteView.url_name),
# commits
url(r'^(?P<issue_number>\d+)/commit/(?P<commit_sha>[a-f0-9]{40})/compare/(?P<other_commit_sha>[a-f0-9]{40})/$', CommitAjaxCompareView.as_view(), name=CommitAjaxCompareView.url_name),
url(r'^(?P<issue_number>\d+)/commit/(?P<commit_sha>[a-f0-9]{40})/comment/add/$', CommitCommentCreateView.as_view(), name=CommitCommentCreateView.url_name),
url(r'^(?P<issue_number>\d+)/commit/(?P<commit_sha>[a-f0-9]{40})/comment/(?P<comment_pk>\d+)/$', CommitCommentView.as_view(), name=CommitCommentView.url_name),
url(r'^(?P<issue_number>\d+)/commit/(?P<commit_sha>[a-f0-9]{40})/comment/(?P<comment_pk>\d+)/edit/$', CommitCommentEditView.as_view(), name=CommitCommentEditView.url_name),
url(r'^(?P<issue_number>\d+)/commit/(?P<commit_sha>[a-f0-9]{40})/comment/(?P<comment_pk>\d+)/delete/$', CommitCommentDeleteView.as_view(), name=CommitCommentDeleteView.url_name),
]
|
class MyClass(object):
var = 10
this_obj = MyClass()
print(this_obj.var)
that_obj = MyClass()
print(that_obj.var)
# Output
# 10
# 10
# [my_machine oop_python]$
class Joe(object):
greeting = "hello, Joe"
def callme(self):
print('calling callme with instance')
print(self)
thisjoe = Joe()
print(thisjoe.greeting)
thisjoe.callme()
print(thisjoe)
# Output
# hello, Joe
# calling callme with instance
# <__main__.Joe object at 0x7f8c8b681cf8>
# <__main__.Joe object at 0x7f8c8b681cf8>
# [my_machine oop_python]$
import random
class MyClass(object):
def dothis(self):
self.rand_val = random.randint(1,10)
myinst = MyClass()
myinst.dothis()
print(myinst.rand_val)
##############################################
class MyClass2(object):
def set_val(self,val):
self.value = val
def get_val(self):
return self.value
a = MyClass2()
b = MyClass2()
a.set_val(10)
b.set_val(100)
a.value = 'hello'
print(a.get_val())
print(b.get_val())
##############################################
class MyInteger(object):
def set_val(self,val):
try:
val = int(val)
except ValueError:
return
self.val = val
def get_val(self):
return self.val
def increment_val(self):
self.val = sel.val + 1
i = MyInteger()
i.set_val(9)
print(i.get_val())
i.set_val('hi')
print(i.get_val())
i.val = 'hi'
print(i.get_val())
#print(i.increment_val()) ## Error
# Output
# 1
# hello
# 100
# 9
# 9
# hi
# [my_machine oop_python]$
class MyNum(object):
def __init__(self):
print('calling __init__')
self.val = 0
def increment(self):
self.val = self.val +1
dd = MyNum()
dd.increment()
dd.increment()
print(dd.val)
class MyNum1(object):
def __init__(self,val):
print('calling __init__')
try:
val = int(val)
except ValueError:
val = 0
self.val = val
def increment(self):
self.val = self.val +1
dd = MyNum1(5)
dd.increment()
dd.increment()
print(dd.val)
dd = MyNum1('hi')
dd.increment()
print(dd.val)
# Output
# calling __init__
# 2
# calling __init__
# 7
# calling __init__
# 1
# [my_machine oop_python]$
class YourClass(object):
classy = 10
def set_val(self):
self.insty = 100
dd = YourClass()
dd.set_val()
print(dd.classy)
print(dd.insty)
class YourClass1(object):
classy = 'class value!'
dd = YourClass1()
print(dd.classy)
dd.classy = 'inst value!'
print(dd.classy)
del dd.classy
print(dd.classy)
# Output
# 10
# 100
# class value!
# inst value!
# class value!
# [my_machine oop_python]$
|
# -*- coding:utf-8 -*-
import os
import sys
rootPath = os.path.dirname(os.getcwd())
sys.path.append(rootPath)
import time
from sqlalchemy import create_engine
import tushare as ts
import pymysql as MySQLdb
import numpy as np
import pandas as pd
from pandas import DataFrame as DF
from quotation.realQuotation import realQuotation
from quotation.realKDayData import realKDayData
#追加数据到现有表
#df.to_sql('tick_data',engine,if_exists='append')
class GetStocksAllData(object):
all_stock_Id = []
mRealQuotation = realQuotation()
mRealKDay = realKDayData()
def __init__(self):
self.mRealQuotation.start_work((3,))
self.mRealKDay.start_work((3,))
def getRealTimeData_from_Network(self, stockId):
try:
df = ts.get_realtime_quotes(stockId)
re=df[0:1]
except:
print("try to get RealTime Data Filed:",stockId)
return None
return re
def getRealQuotationData(self, stocksId):
return self.mRealQuotation.getQuotation(stocksId)
def getDataByReqFiled(self, good_id, req_fields):
# print(good_id, type(good_id))
result = self.getRealQuotationData(good_id)
data = []
c = result.index[0]
for fid in req_fields:
rFid = fid
try:
ret = result[rFid][c]
except:
ret = None
if ret != None:
data.append(ret)
else:
data.append(0)
#print(data)
return data
def int2StrStockId(self, stockId):
strId = "%s" % stockId
if len(strId) < 6:
left = 6 - len(strId)
str = ""
while left > 0:
str += "0"
left -= 1
strId = str + strId
return strId
def getKlineData(self, stockId, size, begin, end):
if type(stockId) == type(123):
stockId = self.int2StrStockId(stockId)
ret=self.mRealKDay.get_one_data_form_databases(stockId)
if ret is None:
return ret
count = len(ret)
if count <= size:
return ret
else:
retSize = 0 - size
return ret[retSize:]
pass
def __del__(self):
print("Enter getStosksData __del__")
self.mRealQuotation.stop_work()
self.mRealKDay.stop_work()
print("Leaver del")
getStocksData = GetStocksAllData()
#retData = getStocksData.getKlineData('603999', 20, 0,0)
#print(retData, type(retData))
#getStocksData.getRealQuotationData('603999')
|
import tkinter as tk
class ModalWindow(tk.Toplevel):
"""
The basic modal window which all others are inherited from.
"""
def __init__(self, parent, title=None):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
self.sock = None
self.server = None
self.error_message = None
if title:
self.title(title)
self.parent = parent
self.result = None
body = tk.Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5,pady=5)
self.button_box()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol('WM_DELETE_WINDOW', self.cancel)
self.geometry("+%d+%d" % (
parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def cancel(self, event=None):
"""
Destroys modal when called
"""
self.destroy()
|
def balanced(a_str):
stack = []
pair_lookup = {
'[': ']',
'(': ')',
'{': '}'
}
for char in a_str:
if char in pair_lookup:
stack.append(pair_lookup[char])
else:
if not stack or char != stack[-1]:
return False
else:
stack.pop()
return len(stack) == 0
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from . import pack_1 # 导入当前目录中的 pack_1.py 文件
# from .. import mod_1 # 导入上级目录中的 mod_1.py 文件
# import pack_1 # 使用这种导入方法,单独运行 relative.py 是可以的,但是当 relative.py 作为包导入的时候,就会报错
pack_1.pack_1_pr()
mod_1.mod_1_pr()
def pr():
print('这是相对导入的一个包!')
|
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from ui_shoplist import *
from socket import *
import json
import threading
from msgwindow import MsgBoxWindow
from recordingwindow import RecordingBoxWindow
from inputwindow import InputWindow
import images_qr
def byteify(input):
if isinstance(input, dict):
return {byteify(key): byteify(value) for key, value in input.items()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, str):
return input.encode('utf-8')
else:
return input
class ShoplistWindow(QtWidgets.QWidget, Ui_Form):
#msg list
r_msg = []
#now user information
now_user_info ={}
def __init__(self, user,address,parent=None):
super(ShoplistWindow, self).__init__(parent)
self.setupUi(self)
self.setFixedSize(self.width(), self.height())
self.setWindowTitle("欢迎使用森普商城")
self.input_shop_id.setAttribute(Qt.WA_MacShowFocusRect, False)
# listen msg by maintaining a long connection
t = threading.Thread(target=self.listen_msg, args=(user,))
t.setDaemon(True)
t.start()
#init some infomation
self.username.setText(user)
shops = self.getshop()
self.now_user_info = self.get_user_info()
self.loadshop(shops)
self.has_shop()
self.msg_num.setText(str(len(self.r_msg)))
#icon and some eles img
icon_exit = QIcon(":/img/exit.png")
icon_mail = QIcon(":/img/mail.png")
self.exit.setIcon(icon_exit)
self.see_msg.setIcon(icon_mail)
pixmap = QPixmap(":/img/logo.png")
self.logo.setPixmap(pixmap)
#connect function
self.exit.clicked.connect(self.exit_request)
self.next_page.clicked.connect(self.to_next_page)
self.last_page.clicked.connect(self.to_last_page)
self.enter_shop_1.clicked.connect(lambda:self.enter_shop_request("1"))
self.enter_shop_2.clicked.connect(lambda:self.enter_shop_request("2"))
self.enter_shop_3.clicked.connect(lambda:self.enter_shop_request("3"))
self.enter_shop_4.clicked.connect(lambda:self.enter_shop_request("4"))
self.enter_shop_5.clicked.connect(lambda:self.enter_shop_request("5"))
self.enter_shop.clicked.connect(lambda:self.enter_shop_request("0"))
self.back_shoplist.clicked.connect(self.back_list)
self.enter_my_shop.clicked.connect(self.enter_own_shop_request)
self.shop_custom.clicked.connect(self.get_custom)
self.see_msg.clicked.connect(lambda :self.open_msgbox(self.r_msg))
self.login_info.clicked.connect(self.get_logininfo)
self.bought.clicked.connect(self.get_shopping_recording)
self.buy_good_1.clicked.connect(lambda :self.buy_goods(1))
self.buy_good_2.clicked.connect(lambda: self.buy_goods(2))
self.buy_good_3.clicked.connect(lambda: self.buy_goods(3))
self.buy_good_4.clicked.connect(lambda: self.buy_goods(4))
self.buy_good_5.clicked.connect(lambda: self.buy_goods(5))
self.enter_sold_recording.clicked.connect(self.get_sold_recording)
self.add_goods.clicked.connect(self.add_goods_request)
def closeEvent(self,event):
self.exit_request()
event.accept()
def getshop(self):
host = '127.0.0.1'
port = 62000
s = socket(AF_INET,SOCK_DGRAM)
s.settimeout(1)
while True:
info = {"method": "send_shoplist"}
message = json.dumps(info)
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data,(host,port) = s.recvfrom(2048)
except IOError or timeout:
self.tip_window = MsgWindow("","网络错误了~")
self.close()
return
shops = json.loads(data)
if data:
return shops
break
def get_user_info(self):
host = '127.0.0.1'
port = 62000
s = socket(AF_INET, SOCK_DGRAM)
s.settimeout(1)
while True:
info = {"method": "load_info","user":self.username.text()}
message = json.dumps(info)
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data ,(host,port)= s.recvfrom(1024)
except IOError or timeout:
self.tip_window = MsgWindow("", "网络错误了~")
self.close()
return
user_info = json.loads(data)
if data:
user_info["shop"] = str(user_info['shop'])
return user_info
#modify the shop list
def modify_shoplist1(self,id,name,owner):
self.shop_id_1.setText(id)
self.shop_name_1.setText(name)
self.shop_owner_1.setText(owner)
def modify_shoplist2(self,id,name,owner):
self.shop_id_2.setText(id)
self.shop_name_2.setText(name)
self.shop_owner_2.setText(owner)
def modify_shoplist3(self, id, name, owner):
self.shop_id_3.setText(id)
self.shop_name_3.setText(name)
self.shop_owner_3.setText(owner)
def modify_shoplist4(self, id, name, owner):
self.shop_id_4.setText(id)
self.shop_name_4.setText(name)
self.shop_owner_4.setText(owner)
def modify_shoplist5(self, id, name, owner):
self.shop_id_5.setText(id)
self.shop_name_5.setText(name)
self.shop_owner_5.setText(owner)
#show function
def loadshop_range(self,r,data):
if 5 * r >= len(data):
for i in range(1,len(data) - 5 *(r - 1) + 1):
method = "modify_shoplist" + str(i)
key = list(data.keys())[5 * (r - 1) + i - 1]
getattr(self, method)(key, data[key]["name"], data[key]["owner"])
for i in range(len(data) - 5 * (r - 1) + 1, 6):
method = "modify_shoplist" + str(i)
getattr(self, method)("", "", "")
else:
for i in range(1,6):
method = "modify_shoplist" + str(i)
key = list(data.keys())[5 * (r - 1) + i - 1]
getattr(self, method)(key, data[key]["name"], data[key]["owner"])
def loadgood_range(self,r,data):
if 5 * r >= len(data):
for i in range(1, len(data) - 5*(r-1) + 1):
method = "modify_shoplist" + str(i)
getattr(self, method)(data[5*(r - 1)+i - 1]["id"], data[5*(r - 1)+i - 1]["name"], data[5*(r - 1)+i - 1]["price"])
for i in range(len(data) - 5*(r-1) + 1,6):
method = "modify_shoplist" + str(i)
getattr(self, method)("","","")
else:
for i in range(1, 6):
method = "modify_shoplist" + str(i)
getattr(self, method)(data[5*(r - 1)+i - 1]["id"], data[5*(r - 1)+i - 1]["name"], data[5*(r - 1)+i - 1]["price"])
def to_next_page(self):
state = self.shop_name_head.text()
hidden_operation = {
"1num": self.num_1.setHidden,
"2num": self.num_2.setHidden,
"3num": self.num_3.setHidden,
"4num": self.num_4.setHidden,
"5num": self.num_5.setHidden,
"1buy": self.buy_good_1.setHidden,
"2buy": self.buy_good_2.setHidden,
"3buy": self.buy_good_3.setHidden,
"4buy": self.buy_good_4.setHidden,
"5buy": self.buy_good_5.setHidden,
"1shop": self.enter_shop_1.setHidden,
"2shop": self.enter_shop_2.setHidden,
"3shop": self.enter_shop_3.setHidden,
"4shop": self.enter_shop_4.setHidden,
"5shop": self.enter_shop_5.setHidden
}
current = int(self.page.text())
if state == "店铺名":
shops = self.getshop()
else:
goods = self.enter_shop_request("6")
if (current+1)*5<(int)(self.shop_num.text()):
self.last_page.setHidden(False)
self.next_page.setHidden(False)
self.page.setText(str(current+1))
if state == "店铺名":
self.loadshop_range(current+1,shops)
else:
self.loadgood_range(current+1,goods)
elif (current+1)*5 >= (int)(self.shop_num.text()):
self.last_page.setHidden(False)
self.next_page.setHidden(True)
self.page.setText(str(current + 1))
if state == "店铺名":
self.loadshop_range(current+1,shops)
for i in range(6 - (current+1)*5 + (int)(self.shop_num.text()) ,6):
hidden_operation[str(i)+"shop"](True)
else:
self.loadgood_range(current + 1, goods)
for i in range(6 - (current + 1) * 5 + (int)(self.shop_num.text()), 6):
hidden_operation[str(i)+"num"](True)
hidden_operation[str(i) + "buy"](True)
def to_last_page(self):
state = self.shop_name_head.text()
current = int(self.page.text())
if state == "店铺名":
shops = self.getshop()
else:
goods = self.enter_shop_request("6")
if (current-1)==1:
self.last_page.setHidden(True)
self.next_page.setHidden(False)
self.page.setText(str(current - 1))
self.enter_shop_1.setHidden(False)
self.enter_shop_2.setHidden(False)
self.enter_shop_3.setHidden(False)
self.enter_shop_4.setHidden(False)
self.enter_shop_5.setHidden(False)
if state =="店铺名":
self.loadshop_range(current-1,shops)
else:
self.loadgood_range(current-1,goods)
else:
self.last_page.setHidden(False)
self.next_page.setHidden(True)
self.page.setText(str(current - 1))
if state == "店铺名":
self.loadshop_range(current - 1, shops)
else:
self.loadgood_range(current - 1, goods)
def modify_msgbox(self,data):
self.r_msg.append(data)
self.msg_num.setText(str(len(self.r_msg)))
def open_msgbox(self,data):
self.msgwindow = MsgBoxWindow(data)
self.msgwindow.show()
#request function
def loadshop(self,data):
self.shop_name_head.setText("店铺名")
self.shop_owner_head.setText("拥有者")
self.page_2.setText("个店铺 当前是第")
self.update()
self.now_shop_head.setHidden(True)
self.now_shop.setHidden(True)
self.back_shoplist.setHidden(True)
self.now_shop_name.setHidden(True)
hidden_operation = {
"1num": self.num_1.setHidden,
"2num": self.num_2.setHidden,
"3num": self.num_3.setHidden,
"4num": self.num_4.setHidden,
"5num": self.num_5.setHidden,
"1buy": self.buy_good_1.setHidden,
"2buy": self.buy_good_2.setHidden,
"3buy": self.buy_good_3.setHidden,
"4buy": self.buy_good_4.setHidden,
"5buy": self.buy_good_5.setHidden,
"1shop": self.enter_shop_1.setHidden,
"2shop": self.enter_shop_2.setHidden,
"3shop": self.enter_shop_3.setHidden,
"4shop": self.enter_shop_4.setHidden,
"5shop": self.enter_shop_5.setHidden
}
self.num_head.setHidden(True)
for i in range(1, 6):
hidden_operation[str(i) + "num"](True)
hidden_operation[str(i) + "buy"](True)
hidden_operation[str(i) + "shop"](False)
#load the first five shops
shopnum = data.__len__()
if shopnum <=5:
for i in range(1, shopnum+1):
method = "modify_shoplist" + str(i)
key = list(data.keys())[i - 1]
getattr(self, method)(key, data[key]["name"], data[key]["owner"])
for i in range(shopnum+1,6):
hidden_operation[str(i) + "shop"](True)
self.last_page.setHidden(True)
self.next_page.setHidden(True)
self.shop_num.setText(str(shopnum))
else:
for i in range(1, 6):
method = "modify_shoplist" + str(i)
key = list(data.keys())[i - 1]
getattr(self, method)(key, data[key]["name"], data[key]["owner"])
self.last_page.setHidden(True)
self.next_page.setHidden(False)
self.shop_num.setText(str(shopnum))
self.page.setText("1")
def exit_request(self):
user_current = self.username.text()
host = '127.0.0.1'
port = 62000
s = socket(AF_INET, SOCK_DGRAM)
s.settimeout(1)
while True:
info = {"method": "exit_request", "user": user_current}
message = json.dumps(info)
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data,(host,port)= s.recvfrom(1024)
except IOError or timeout:
self.tip_window = MsgWindow("", "网络错误了~")
self.close()
return
if data == b"0":
self.close()
return
else:
print("exit fail")
return
def load_goods(self, goods, sname):
hidden_operation={
"1num":self.num_1.setHidden,
"2num": self.num_2.setHidden,
"3num": self.num_3.setHidden,
"4num": self.num_4.setHidden,
"5num": self.num_5.setHidden,
"1buy": self.buy_good_1.setHidden,
"2buy": self.buy_good_2.setHidden,
"3buy": self.buy_good_3.setHidden,
"4buy": self.buy_good_4.setHidden,
"5buy": self.buy_good_5.setHidden,
"1shop": self.enter_shop_1.setHidden,
"2shop": self.enter_shop_2.setHidden,
"3shop": self.enter_shop_3.setHidden,
"4shop": self.enter_shop_4.setHidden,
"5shop": self.enter_shop_5.setHidden
}
self.shop_name_head.setText("商品名")
self.shop_owner_head.setText("价格")
self.page_2.setText("个商品 当前是第")
self.num_head.setHidden(False)
for i in range(1,6):
hidden_operation[str(i)+"num"](False)
hidden_operation[str(i) + "buy"](False)
hidden_operation[str(i) + "shop"](True)
self.num_1.setText("1")
self.num_2.setText("1")
self.num_3.setText("1")
self.num_4.setText("1")
self.num_5.setText("1")
self.now_shop_head.setHidden(False)
self.now_shop.setHidden(False)
self.back_shoplist.setHidden(False)
goodsnum = len(goods)
self.now_shop_name.setHidden(False)
self.now_shop_name.setText(sname)
if goodsnum < 5:
for i in range(1, goodsnum + 1):
method = "modify_shoplist" + str(i)
getattr(self, method)(goods[i - 1]["id"], goods[i - 1]["name"], goods[i - 1]["price"])
for i in range(goodsnum + 1,6):
hidden_operation[str(i)+"num"](True)
hidden_operation[str(i) + "buy"](True)
method = "modify_shoplist" + str(i)
getattr(self, method)("", "", "")
self.last_page.setHidden(True)
self.next_page.setHidden(True)
self.shop_num.setText(str(goodsnum))
self.page.setText("1")
else:
for i in range(1, 6):
method = "modify_shoplist" + str(i)
getattr(self, method)(goods[i - 1]["id"], goods[i - 1]["name"], goods[i - 1]["price"])
self.last_page.setHidden(True)
self.next_page.setHidden(False)
self.shop_num.setText(str(goodsnum))
self.page.setText("1")
def enter_shop_request(self, num):
id_dic = {"1": self.shop_id_1.text(),
"2": self.shop_id_2.text(),
"3": self.shop_id_3.text(),
"4": self.shop_id_4.text(),
"5": self.shop_id_5.text(),
"0": self.input_shop_id.text(),
"6": self.now_shop.text()
}
shop_id = id_dic[num]
if shop_id == self.now_user_info['shop']:
self.enter_own_shop_request()
return
info = {"method": "enter_shop", "id": str(shop_id), "user": self.username.text()}
host = '127.0.0.1'
port = 62000
s = socket(AF_INET, SOCK_DGRAM)
s.settimeout(1)
while True:
message = json.dumps(info)
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data,(host,port)= s.recvfrom(4096)
except IOError or timeout:
self.tip_window = MsgWindow("", "网络错误了~")
self.close()
return
if data:
goods = json.loads(data)
if goods["state"] == "open":
if num == '0' and self.now_shop.text() != "":
self.leave_shop()
self.now_shop.setText(str(shop_id))
# self.now_shop_name.setText()
self.load_goods(goods["goods"], goods["name"])
self.shop_custom.setHidden(False)
return goods["goods"]
elif goods["state"] == "close":
# shop has closed
self.msgwindow = MsgWindow("","此店铺已被关闭")
self.msgwindow.show()
break
elif goods["state"] == "null":
self.msgwindow = MsgWindow("", "此店铺不存在")
self.msgwindow.show()
break
else:
continue
def enter_own_shop_request(self):
info = {"method": "enter_own_shop", "user": self.username.text()}
host = '127.0.0.1'
port = 62000
s = socket(AF_INET, SOCK_DGRAM)
s.settimeout(1)
while True:
message = json.dumps(info)
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data ,(host,port)= s.recvfrom(1024)
except IOError or timeout:
self.tip_window = MsgWindow("", "网络错误了~")
self.close()
return
if data:
goods = json.loads(data)
if goods["state"] == "open":
self.now_shop.setText(goods["id"])
self.load_goods(goods["goods"], goods['name'])
self.shop_custom.setHidden(False)
self.num_head.setHidden(True)
self.num_1.setHidden(True)
self.num_2.setHidden(True)
self.num_3.setHidden(True)
self.num_4.setHidden(True)
self.num_5.setHidden(True)
self.buy_good_1.setHidden(True)
self.buy_good_2.setHidden(True)
self.buy_good_3.setHidden(True)
self.buy_good_4.setHidden(True)
self.buy_good_5.setHidden(True)
return goods["goods"]
elif goods["state"] == "close":
# shop has closed
msg_window = MsgWindow("进入店铺失败", "你的店铺..好像已经被关了")
msg_window.show()
break
elif goods["state"] == "null":
# no this shop
msg_window = MsgWindow("进入店铺失败", "你明明没有店!")
msg_window.show()
break
else:
continue
def back_list(self):
if self.leave_shop() == 0:
self.now_shop_head.setHidden(True)
self.now_shop.setHidden(True)
self.back_shoplist.setHidden(True)
self.num_head.setHidden(True)
self.num_1.setHidden(True)
self.num_2.setHidden(True)
self.num_3.setHidden(True)
self.num_4.setHidden(True)
self.num_5.setHidden(True)
self.buy_good_1.setHidden(True)
self.buy_good_2.setHidden(True)
self.buy_good_3.setHidden(True)
self.buy_good_4.setHidden(True)
self.buy_good_5.setHidden(True)
self.enter_shop_1.setHidden(False)
self.enter_shop_2.setHidden(False)
self.enter_shop_3.setHidden(False)
self.enter_shop_4.setHidden(False)
self.enter_shop_5.setHidden(False)
self.now_shop.setText("")
shop = self.getshop()
self.loadshop(shop)
self.has_shop()
else:
return
def has_shop(self):
if str(self.now_user_info["shop"]) == '0':
self.shop_custom.setHidden(True)
self.enter_sold_recording.setHidden(True)
self.add_goods.setHidden(True)
else:
self.shop_custom.setHidden(False)
self.enter_sold_recording.setHidden(False)
self.add_goods.setHidden(False)
def get_custom(self):
host = '127.0.0.1'
port = 62000
s = socket(AF_INET, SOCK_DGRAM)
info = {"method": "show_custom", "id": ""}
if self.shop_name_head.text() == "商品名":
info['id'] = self.now_shop.text()
else:
info['id'] = self.now_user_info['shop']
message = json.dumps(info)
data1={}
s.settimeout(1)
while True:
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data,(host,port)= s.recvfrom(1024)
except IOError or timeout:
self.tip_window = MsgWindow("", "网络错误了~")
self.close()
return
data = json.loads(data)
break
data1['id'] = info['id']
data1['custom'] = data[info['id']]
self.recodwindow = RecordingBoxWindow("custom", data1)
self.recodwindow.show()
def get_logininfo(self):
host = '127.0.0.1'
port = 62000
s = socket(AF_INET, SOCK_DGRAM)
info = {"method": "send_logininfo", "user": self.username.text()}
message = json.dumps(info)
data=""
s.settimeout(1)
while True:
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data,(host,port)= s.recvfrom(4096)
except IOError or timeout:
self.tip_window = MsgWindow("", "网络错误了~")
self.close()
return
data = json.loads(data)
break
self.recodwindow = RecordingBoxWindow("login", data)
self.recodwindow.show()
def get_shopping_recording(self):
host = '127.0.0.1'
port = 62000
s = socket(AF_INET, SOCK_DGRAM)
info = {"method": "send_shopping_recording", "user": self.username.text()}
message = json.dumps(info)
s.settimeout(1)
while True:
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data,(host,port)= s.recvfrom(10240)
except IOError or timeout:
self.tip_window = MsgWindow("", "网络错误了~")
self.close()
return
data = json.loads(data)
break
self.recodwindow = RecordingBoxWindow("buy", data)
self.recodwindow.show()
def get_sold_recording(self):
host = '127.0.0.1'
port = 62000
s = socket(AF_INET, SOCK_DGRAM)
info = {"method": "send_sold_recording", "user": self.username.text()}
message = json.dumps(info)
s.settimeout(1)
while True:
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data,(host,port)= s.recvfrom(10240)
except IOError or timeout:
self.tip_window = MsgWindow("", "网络错误了~")
self.close()
return
data = json.loads(data)
break
self.recodwindow = RecordingBoxWindow("sold", data)
self.recodwindow.show()
def buy_goods(self,i):
info_get={
"1g_id":self.shop_id_1.text,
"2g_id": self.shop_id_2.text,
"3g_id": self.shop_id_3.text,
"4g_id": self.shop_id_4.text,
"5g_id": self.shop_id_5.text,
"1g_name": self.shop_name_1.text,
"2g_name": self.shop_name_2.text,
"3g_name": self.shop_name_3.text,
"4g_name": self.shop_name_4.text,
"5g_name": self.shop_name_5.text,
"1_num": self.num_1.text,
"2_num": self.num_2.text,
"3_num": self.num_3.text,
"4_num": self.num_4.text,
"5_num": self.num_5.text,
}
host = '127.0.0.1'
port = 62000
s = socket(AF_INET, SOCK_DGRAM)
info = {"method":"buy_goods",
"goods_id":info_get[str(i)+"g_id"](),
"goods_name":info_get[str(i)+"g_name"](),
"user":self.username.text(),
"num":info_get[str(i)+"_num"](),
"shop_id":self.now_shop.text()}
message = json.dumps(info)
s.settimeout(1)
while True:
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data,(host,port)= s.recvfrom(1024)
except IOError or timeout:
self.tip_window = MsgWindow("", "网络错误了~")
self.close()
return
data = json.loads(data)
break
msgwindow = MsgWindow("购买成功","订单号为:"+data['shopping_num']+"\n可以在购买记录中查看")
msgwindow.show()
def add_goods_request(self):
self.input_window = InputWindow(self.now_user_info['shop'])
self.input_window.setWindowTitle("商品上架")
self.input_window.show()
def leave_shop(self):
host = '127.0.0.1'
port = 62000
s = socket(AF_INET, SOCK_DGRAM)
info = {"method": "leave_shop", "user": self.username.text(), "id": self.now_shop.text()}
message = json.dumps(info)
s.settimeout(1)
while True:
s.sendto(message.encode(encoding='utf-8'),(host,port))
try:
data,(host,port)= s.recvfrom(1024)
except IOError or timeout:
self.tip_window = MsgWindow("", "网络错误了~")
self.close()
return
if data == b"0":
return 0
else:
self.msgwindow = MsgWindow("", "离开店铺失败,请检查网络")
self.msgwindow.show()
return 1
#recvfrom message function
def listen_msg(self,user):
s = socket(AF_INET, SOCK_DGRAM)
host = '127.0.0.1'
port = 62000
info = {"method":"listen","user":user}
message = json.dumps(info)
if s.connect((host, port)) == 0:
return
while True:
s.sendall(message.encode(encoding='utf-8'))
try:
data ,(host,port)= s.recvfrom(1024)
except IOError:
break
data = json.loads(data)
# some tips to tell the user
self.modify_msgbox(data)
continue
class MsgWindow(QtWidgets.QWidget):
def __init__(self,m1,m2):
super().__init__()
self.setWindowTitle(m1)
QMessageBox.information(self, m1, m2, QMessageBox.Yes)
|
import numpy as np
import matplotlib.pyplot as plt
from src.util import *
from src.load_mnist import *
from load_cifar import load_cifar_classSelect
from src.GCE import GenerativeCausalExplainer
import os
# --- parameters ---
c_dim = 3
z_dim = 2
K = 1
L = 16
img_size = 32
class_use = np.array([7, 9])
latent_sweep_vals = np.linspace(-2,2,25)
latent_sweep_plot = [0,4,8,12,16,20,24]
classifier_path = './models/classifiers/base_cifar_79_classifier'
gce_path = './models/GCEs/base_cifar_79_gce_K1_L16_lambda005'
export_figs = True
# --- initialize ---
class_use_str = np.array2string(class_use)
y_dim = class_use.shape[0]
newClass = range(0,y_dim)
nsweep = len(latent_sweep_vals)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# --- load test data ---
test_size = 64
X, Y, _ = load_cifar_classSelect('train', class_use, range(0, len(class_use)))
vaX, vaY, _ = load_cifar_classSelect('val', class_use, range(0, len(class_use)))
X, vaX = X / 255, vaX / 255
sample_inputs = vaX[0:test_size]
sample_labels = vaY[0:test_size]
sample_inputs_torch = torch.from_numpy(sample_inputs)
sample_inputs_torch = sample_inputs_torch.permute(0,3,1,2).float().to(device)
ntrain = X.shape[0]
# --- load GCE ---
gcedata = torch.load(os.path.join(gce_path,'model.pt'), map_location=device)
# --- load classifier ---
from src.models.CNN_classifier import CNN
checkpoint_model = torch.load(os.path.join(classifier_path,'model.pt'), map_location=device)
classifier = CNN(y_dim, c_dim, img_size=img_size).to(device)
classifier.load_state_dict(checkpoint_model['model_state_dict_classifier'])
from src.models.CVAE import Encoder, Decoder
encoder = Encoder(K+L, c_dim, X.shape[1]**2).to(device)
decoder = Decoder(K+L, c_dim, X.shape[1]**2).to(device)
encoder.load_state_dict(gcedata['model_state_dict_encoder'])
decoder.load_state_dict(gcedata['model_state_dict_decoder'])
# --- generate latent factor sweep plot ---
sample_ind = np.concatenate((np.where(vaY == 0)[0][:1],
np.where(vaY == 1)[0][:1]))
cols = [[0.047,0.482,0.863],[1.000,0.761,0.039],[0.561,0.788,0.227]]
border_size = 0
nsamples = len(sample_ind)
latentsweep_vals = [-3., -2., -1., 0., 1., 2., 3.]
Xhats = np.zeros((z_dim,nsamples,len(latentsweep_vals),img_size,img_size,c_dim))
yhats = np.zeros((z_dim,nsamples,len(latentsweep_vals)))
# create plots
for isamp in range(nsamples):
x = torch.from_numpy(np.expand_dims(vaX[sample_ind[isamp]],0))
x_torch = x.permute(0,3,1,2).float().to(device)
z = encoder(x_torch)[0][0].cpu().detach().numpy()
for latent_dim in range(z_dim):
for (ilatentsweep, latentsweep_val) in enumerate(latentsweep_vals):
ztilde = z.copy()
ztilde[latent_dim] += latentsweep_val
xhat = decoder(torch.unsqueeze(torch.from_numpy(ztilde),0).to(device))
yhat = np.argmax(classifier(xhat)[0].cpu().detach().numpy())
img = 1.-xhat.permute(0,2,3,1).cpu().detach().numpy().squeeze()
Xhats[latent_dim,isamp,ilatentsweep,:,:,:] = img
yhats[latent_dim,isamp,ilatentsweep] = yhat
# format and export plots
for isamp in range(nsamples):
fig, axs = plt.subplots(z_dim, len(latentsweep_vals))
for latent_dim in range(z_dim):
for (ilatentsweep, latentsweep_val) in enumerate(latentsweep_vals):
img = Xhats[latent_dim,isamp,ilatentsweep,:,:,:].squeeze()
axs[latent_dim,ilatentsweep].imshow(img, interpolation='nearest')
axs[latent_dim,ilatentsweep].set_xticks([])
axs[latent_dim,ilatentsweep].set_yticks([])
if export_figs:
print('Exporting sample %d/%d (%d)...' % (isamp+1, nsamples, class_use[isamp]))
plt.savefig('./reports/figures/figure7/fig7_samp%d.svg' % (isamp), bbox_inches=0)
print('Columns - latent values in sweep: ' + str(latentsweep_vals))
print('Rows - latent dimension') |
#pip install meetup-api
import imp
## first arg is folder name, second arg is navigating to file
meetup = imp.load_source('pythoncourse2018-prep', 'C:/Users/wooki/Desktop/meetup_api.py')
api = meetup.client
## methods we can use
## https://meetup-api.readthedocs.io/en/latest/meetup_api.html#api-client-details
polgroups = api.GetFindGroups({"zip" : "63130", "text" : "political"})
len(polgroups)
a=[g.members for g in polgroups] #find the num of members that is the biggest
a.sort()
a[len(a)-1]
for i in polgroups: #find the id that has 2840
if i.members == 2840:
print i.name
print i.urlname
pop_group=api.GetGroup({"urlname" : "politicalcafe-66"}) #answer for num1
pop_group.__dict__.keys()
print pop_group.members
pop_group_members = api.GetMembers({"group_urlname" : "politicalcafe-66"}) #find the group
pop_group_members.__dict__.keys()
pop_group_members.results[0].keys()
ppl = pop_group_members.__dict__["results"]
len(ppl)
ppl[1]["id"]
ids=[] #find the ids of member in the group
for g in ppl:
a=g["id"]
ids.append(a)
ids[0]
import time
def find_most_pop_member(ids): #find the most active member of the group
tt=[]
kk=[]
for i in ids:
try:
aa = api.GetGroups({"member_id" : i})
except ValueError:
time.sleep(3) #change time sleep if crashes
aa = api.GetGroups({"member_id" : i})
bb = len(aa.results)
kk.append(i)
tt.append(bb)
together=zip(kk,tt)
return together
find_most_pop_member(ids)
import pandas as pd
import numpy as np
df = pd.DataFrame(together, columns = ["id","num"])
df.sort_values(by = "num",ascending=False)
max_person=df["id"][102] #answer for num2
max_person_groups=api.GetGroups({"member_id" : max_person})
print max_person_groups.results
max_person_groups.results #most active member's groups
import pandas as pd
import numpy as np
df = pd.DataFrame(together, columns = ["id","num"])
df.sort_values(by = "num",ascending=False)
max_person=df["id"][102]
max_person_groups=api.GetGroups({"member_id" : max_person}) #find groups
max_person_groups.results
max_person_groups.results[0].keys()
max_person_groups.results[0]["urlname"]
numofmem=[]
names=[]
for g in max_person_groups.results:
num=g["members"]
name=g["name"]
numofmem.append(num)
names.append(name)
get=zip(names,numofmem)
df2 = pd.DataFrame(get, columns = ["name","num"])
df2.sort_values(by = "num",ascending=False) #sort and find the biggest group
df2["name"][5] #answer for num3 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import pytest
from pants.backend.helm.goals.tailor import PutativeHelmTargetsRequest
from pants.backend.helm.goals.tailor import rules as helm_tailor_rules
from pants.backend.helm.target_types import HelmChartTarget, HelmUnitTestTestsGeneratorTarget
from pants.core.goals.tailor import AllOwnedSources, PutativeTarget, PutativeTargets
from pants.core.target_types import ResourcesGeneratorTarget
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
target_types=[HelmChartTarget],
rules=[
*helm_tailor_rules(),
QueryRule(PutativeTargets, (PutativeHelmTargetsRequest, AllOwnedSources)),
],
)
def test_find_helm_charts(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{"src/owned/Chart.yaml": "", "src/foo/Chart.yaml": "", "src/bar/Chart.yml": ""}
)
putative_targets = rule_runner.request(
PutativeTargets,
[
PutativeHelmTargetsRequest(("src/owned", "src/foo", "src/bar")),
AllOwnedSources(["src/owned/Chart.yaml"]),
],
)
def expected_target(path: str, triggering_source: str) -> PutativeTarget:
return PutativeTarget.for_target_type(
HelmChartTarget,
name=os.path.basename(path),
path=path,
triggering_sources=[triggering_source],
)
assert putative_targets == PutativeTargets(
[expected_target("src/foo", "Chart.yaml"), expected_target("src/bar", "Chart.yml")]
)
def test_find_helm_unittests(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/owned/Chart.yaml": "",
"src/owned/tests/owned_test.yaml": "",
"src/owned/tests/__snapshot__/owned_test.yaml.snap": "",
"src/foo/BUILD": "helm_chart()",
"src/foo/Chart.yaml": "",
"src/foo/tests/foo_test.yaml": "",
"src/foo/tests/__snapshot__/foo_test.yaml.snap": "",
}
)
putative_targets = rule_runner.request(
PutativeTargets,
[
PutativeHelmTargetsRequest(
(
"src/owned",
"src/foo",
)
),
AllOwnedSources(
[
"src/owned/Chart.yaml",
"src/owned/tests/owned_test.yaml",
"src/owned/tests/__snapshot__/owned_test.yaml.snap",
"src/foo/Chart.yaml",
]
),
],
)
def expected_unittest_target(path: str, triggering_source: str) -> PutativeTarget:
return PutativeTarget.for_target_type(
HelmUnitTestTestsGeneratorTarget,
name=os.path.basename(path),
path=path,
triggering_sources=[triggering_source],
)
def expected_snapshot_target(path: str, triggering_source: str) -> PutativeTarget:
return PutativeTarget.for_target_type(
ResourcesGeneratorTarget,
name=os.path.basename(path),
path=path,
triggering_sources=[triggering_source],
kwargs={"sources": ("*_test.yaml.snap", "*_test.yml.snap")},
)
assert putative_targets == PutativeTargets(
[
expected_unittest_target("src/foo/tests", "foo_test.yaml"),
expected_snapshot_target("src/foo/tests/__snapshot__", "foo_test.yaml.snap"),
]
)
|
import sys
from os import listdir
from os.path import join as oj
import matplotlib.image as mpimg
import numpy as np
# pytorch stuff
import torch.utils.data as data
from PIL import Image
sys.path.insert(1, oj(sys.path[0], '..')) # insert parent path
def is_image_file(filename):
return any(filename.endswith(extension) for extension in ['.png', '.jpg', '.jpeg'])
def load_img(filepath):
img = mpimg.imread(filepath).astype(np.float32)
img = np.array(Image.fromarray(img).resize((32, 32)))
return np.transpose(img, [2, 0, 1]).tobytes() # return CHW
class GlaucomaDataset(data.Dataset):
def __init__(self, data_dir, input_transform=None):
super(GlaucomaDataset, self).__init__()
fnames_control = [oj(data_dir, 'control', x)
for x in listdir(oj(data_dir, 'control'))
if is_image_file(x)]
fnames_glaucoma = [oj(data_dir, 'glaucoma', x)
for x in listdir(oj(data_dir, 'glaucoma'))
if is_image_file(x)]
self.fnames = np.array(fnames_control + fnames_glaucoma)
self.labels = np.hstack((np.ones(len(fnames_control)) * -1,
np.ones(len(fnames_glaucoma)) * 1))
self.input_transform = input_transform
def __getitem__(self, index):
img = load_img(self.fnames[index])
label = self.labels[index]
if self.input_transform:
img = self.input_transform(img)
return img, label
def __len__(self):
return self.fnames.size
|
import time
from selenium import webdriver
class Scraper:
def __init__(self):
priori_url = "https://www.prioridata.com/search/apps?search_text=autism"
self.driver = webdriver.Firefox()
self.driver.get(priori_url)
time.sleep(2)
def get_app_urls(self):
element = self.driver.find_elements_by_tag_name('a')
final = []
for items in element:
try:
href = items.get_attribute('href')
if href.find("https://prioridata.com/apps/") != -1:
final.append(href.replace('https://', ""))
except:
print("error")
return final
def switch_100(self):
dropdown = self.driver.find_element_by_class_name("select2-selection")
dropdown.click()
time.sleep(2)
final_target = self.driver.find_element_by_xpath('/html/body/span[2]/span/span[2]/ul/li[4]')
action = webdriver.ActionChains(self.driver)
action.move_by_offset(443.3999938964844, 893.0499877929688).click(final_target)
action.perform()
time.sleep(2)
return self
def click_next(self):
self.driver.find_element_by_xpath('//*[@id="DataTables_Table_0_next"]/a').click()
return self
|
file1 = open('running-config.cfg', 'r')
file2 =open('newrunning-config.cfg','w')
n=1
for line in file1:
for word in line:
if word == '172':
file2.write(word.replace('172','192'))
n+=1
else:
file2.write(word)
file1.close()
|
def fib1(n):
if (n == 1 or n == 2):
return 1
return fib1(n-1) + fib1(n-2)
def fib2(n):
A = []
A.append(1)
A.append(1)
if (n <= 2):
return A[0]
for i in range(2,n):
A.append( A[i-1] + A[i-2] )
return A[n-1]
def fib3(n):
a = b = c = 1
if (n <= 2):
return a
for i in range(3,n+1):
c = a
a = a + b
b = c
return a
num = int(input("Enter a number to compute with fib:"))
a3 = fib3(num)
print("fib3: %d" % a3)
a2 = fib2(num)
print("fib2: %d" % a2)
a1 = fib1(num)
print("fib1: %d" % a1) |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
Altered for Cork_AI meetup to illustrate keras
See original documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
from scipy import misc
import tensorflow as tf
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
from tensorflow.python.keras.models import Sequential, load_model
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
FLAGS = None
# build a model using keras
def deepnn_keras(model_input_shape):
my_model = Sequential()
my_model.add(Conv2D(input_shape=model_input_shape, kernel_size=(5, 5), filters=32, padding="same", activation="relu"))
my_model.add(MaxPooling2D())
my_model.add(Conv2D(kernel_size=(5, 5), filters=64, padding="same", activation="relu"))
my_model.add(Flatten())
my_model.add(Dense(units=1024, activation="relu"))
my_model.add(Dropout(0.5))
my_model.add(Dense(10, activation='softmax'))
return my_model
def main(_):
# Import data
print('Reading in data from ', FLAGS.data_dir)
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
my_model = Sequential()
# If pre-trained model exists on disk, then just load that
if os.path.isfile(os.path.join(os.getcwd(), 'saved_model/cork_ai_model_keras_deep.h5')):
my_model = load_model("saved_model/cork_ai_model_keras_deep.h5")
print("Model restored from disk")
# Build and train a model using keras
else:
my_model = deepnn_keras(model_input_shape=(28, 28, 1))
my_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
train_images = np.reshape(mnist.train.images, [-1, 28, 28, 1])
print("train set shape is ", train_images.shape)
print("train labels shape is ", mnist.train.labels.shape)
my_model.fit(train_images, mnist.train.labels, epochs=18, batch_size=50)
# model is trained, let's save it to disk
if not os.path.exists(os.path.join(os.getcwd(), 'saved_model')):
os.makedirs(os.path.join(os.getcwd(), 'saved_model'))
my_model.save("saved_model/cork_ai_model_keras_deep.h5")
test_images = np.reshape(mnist.test.images, [-1, 28, 28, 1])
metrics = my_model.evaluate(test_images, mnist.test.labels, batch_size=50)
print('\n\nevaluation test: loss, accuracy : ', metrics)
# Test on individual test examples, writing examples of
# successful and failed classifications to disk
if FLAGS.write_samples:
print('Will write sample outputs to output_images folder')
file_prefix = ''
if 'fashion' in FLAGS.data_dir:
print('Using fashion data')
file_prefix = 'fashion_deep_'
if not os.path.exists(os.path.join(os.getcwd(), 'output_images')):
os.makedirs(os.path.join(os.getcwd(), 'output_images'))
num_each_to_store = 5
stored_correct = 0
stored_incorrect = 0
idx = 0
while (stored_correct < num_each_to_store or stored_incorrect < num_each_to_store) and idx < len(mnist.test.images):
pred = np.argmax(my_model.predict(np.reshape(mnist.test.images[idx], [-1, 28, 28, 1])))
real_label = np.argmax(mnist.test.labels[idx])
correct = pred == real_label
if file_prefix is 'fashion_deep_':
real_label = fashion_label_to_name(real_label)
pred = '[' + fashion_label_to_name(pred) + ']'
else:
real_label = real_label.astype(str)
pred = pred.astype(str)
img = np.reshape(mnist.test.images[idx], [28, 28])
plt.imshow(img, cmap='gray')
if correct and stored_correct < num_each_to_store:
stored_correct += 1
plt.savefig("output_images/{}success_{}.png".format(file_prefix, real_label))
elif not correct and stored_incorrect < num_each_to_store:
stored_incorrect += 1
plt.savefig("output_images/{}fail_{}_{}.png".format(file_prefix, real_label, pred))
idx += 1
# Test on extra test images made from photos of handwritten digits
# or from digitally created 'hand' written digits
if FLAGS.extra_test_imgs:
print('Using manually hand-written digits')
if not os.path.exists(os.path.join(os.getcwd(), 'output_images')):
os.makedirs(os.path.join(os.getcwd(), 'output_images'))
file_prefix = 'extra_'
for idx in range(1, 7):
img_file = 'extra_test_digits/{}.jpg'.format(idx)
img = misc.imread(img_file)
pred = np.argmax(my_model.predict(np.reshape(img, [1, 28, 28, 1])))
plt.imshow(img, cmap='gray')
plt.savefig("output_images/{}{}predicted_{}.png".format(file_prefix, idx, pred))
# function to convert fashion MNIST label (number) to clothing type string
def fashion_label_to_name(label):
if label == 0:
return 'tshirt_top'
elif label == 1:
return 'trousers'
elif label == 2:
return 'pullover'
elif label == 3:
return 'dress'
elif label == 4:
return 'coat'
elif label == 5:
return 'sandal'
elif label == 6:
return 'shirt'
elif label == 7:
return 'sneaker'
elif label == 8:
return 'bag'
elif label == 9:
return 'ankle_boot'
else:
return 'category_unknown'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory for storing input data')
parser.add_argument('--write_samples', type=int, default=0)
parser.add_argument('--extra_test_imgs', type=int, default=0)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:helper.py
# @Author: Michael.liu
# @Date:2020/6/3 14:05
# @Desc: this code is ....
import json
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import StratifiedShuffleSplit
tr_csv_all_path = 'avazu_ctr/train.csv'
tr_csv_path = 'avazu_ctr/train_sample.csv'
ts_csv_path = 'avazu_ctr/test.csv'
data_type = {'id': 'U', 'hour': 'U', 'device_type': 'U', 'C1': 'U', 'C15': 'U', 'C16': 'U'}
# 分层采样数据
def genSampleData():
train_all = pd.read_csv(tr_csv_all_path)
split_data = StratifiedShuffleSplit(n_splits=1, train_size=0.05, random_state=42)
for train_index, test_index in split_data.split(train_all, train_all["click"]):
strat_train_set = train_all.loc[train_index]
strat_train_set.to_csv("avazu_ctr/train_sample.csv", header=True)
# 特征工程
def futureEngineer():
print("step 2-- future engineer")
train = pd.read_csv(tr_csv_path, dtype=data_type, index_col='id')
test = pd.read_csv(ts_csv_path, dtype=data_type, index_col='id')
test.insert(0, 'click', 0)
tr_ts = pd.concat([test, train], copy=False)
# site_id
site_id_count = tr_ts.site_id.value_counts()
site_id_category = {}
site_id_category[0] = site_id_count.loc[site_id_count > 20].index.values
site_id_category[1] = site_id_count.loc[site_id_count <= 20].index.values
site_id_C_type_dict = {}
for key, values in site_id_category.items():
for item in values:
site_id_C_type_dict[str(item)] = key
json.dump(site_id_C_type_dict, open("site_id_C_type_dict.json", "w"))
# site_domain
site_domain_count = tr_ts.site_domain.value_counts()
site_domain_category = {}
site_domain_category[0] = site_domain_count.loc[site_domain_count > 20].index.values
site_domain_category[1] = site_domain_count.loc[site_domain_count <= 20].index.values
site_domain_C_type_dict = {}
for key, values in site_domain_category.items():
for item in values:
site_domain_C_type_dict[str(item)] = key
json.dump(site_domain_C_type_dict, open("site_domain_C_type_dict.json", "w"))
# app_id
app_id_count = tr_ts.app_id.value_counts()
app_id_category = {}
app_id_category[0] = app_id_count.loc[app_id_count > 20].index.values
app_id_category[1] = app_id_count.loc[app_id_count <= 20].index.values
app_id_C_type_dict = {}
for key, values in app_id_category.items():
for item in values:
app_id_C_type_dict[str(item)] = key
json.dump(app_id_C_type_dict, open("app_id_C_type_dict.json", "w"))
# device_model
device_model_count = tr_ts.device_model.value_counts()
device_model_category = {}
device_model_category[0] = device_model_count.loc[device_model_count > 200].index.values
device_model_category[1] = device_model_count.loc[device_model_count <= 200].index.values
device_model_C_type_dict = {}
for key, values in device_model_category.items():
for item in values:
device_model_C_type_dict[str(item)] = key
json.dump(device_model_C_type_dict, open("device_model_C_type_dict.json", "w"))
def train_test_split():
train = pd.read_csv(tr_csv_path, dtype=data_type, index_col='id')
test = pd.read_csv(ts_csv_path, dtype=data_type, index_col='id')
test.insert(0, 'click', 0)
tr_ts = pd.concat([test, train], copy=False)
tr_ts['day'] = tr_ts['hour'].apply(lambda x: x[-4:-2])
tr_ts['hour'] = tr_ts['hour'].apply(lambda x: x[-2:])
tr_ts['is_device'] = tr_ts['device_id'].apply(lambda x: 0 if x == 'a99f214a' else 1) # 详见探索性数据分析部分
app_id_C_type_dict = json.load(open("app_id_C_type_dict.json", "r"))
site_id_C_type_dict = json.load(open("site_id_C_type_dict.json", "r"))
site_domain_C_type_dict = json.load(open("site_domain_C_type_dict.json", "r"))
device_model_C_type_dict = json.load(open("device_model_C_type_dict.json", "r"))
tr_ts['C_app_id'] = tr_ts["app_id"].apply(lambda x: x if app_id_C_type_dict.get(x) == 0 else "other_app_id")
tr_ts['C_site_id'] = tr_ts['site_id'].apply(lambda x: x if site_id_C_type_dict.get(x) == 0 else "other_site_id")
tr_ts['C_site_domain'] = tr_ts['site_domain'].apply(
lambda x: x if site_domain_C_type_dict.get(x) == 0 else "other_site_domain")
tr_ts['C_device_model'] = tr_ts['device_model'].apply(
lambda x: x if device_model_C_type_dict.get(x) == 0 else "other_device_model")
tr_ts["C_pix"] = tr_ts["C15"] + '&' + tr_ts["C16"]
tr_ts["C_device_type_1"] = tr_ts["device_type"] + '&' + tr_ts["C1"]
tr_ts.drop(
['device_id', "device_type", 'app_id', 'site_id', 'site_domain', 'device_model', "C1", "C17", 'C15', 'C16'],
axis=1, inplace=True)
lenc = preprocessing.LabelEncoder()
C_fields = ['hour', 'banner_pos', 'site_category', 'app_domain', 'app_category',
'device_conn_type', 'C14', 'C18', 'C19', 'C20', 'C21', 'is_device', 'C_app_id', 'C_site_id',
'C_site_domain', 'C_device_model', 'C_pix', 'C_device_type_1']
for f, column in enumerate(C_fields):
print("convert " + column + "...")
tr_ts[column] = lenc.fit_transform(tr_ts[column])
dummies_site_category = pd.get_dummies(tr_ts['site_category'], prefix='site_category')
dummies_app_category = pd.get_dummies(tr_ts['app_category'], prefix='app_category')
scaler = preprocessing.StandardScaler()
age_scale_param = scaler.fit(tr_ts[['C14', 'C18', 'C19', 'C20', 'C21']])
tr_ts[['C14', 'C18', 'C19', 'C20', 'C21']] = age_scale_param.transform(tr_ts[['C14', 'C18', 'C19', 'C20', 'C21']])
tr_ts_new = pd.concat([tr_ts, dummies_site_category, dummies_app_category], axis=1)
tr_ts_new.drop(['site_category', 'app_category'], axis=1, inplace=True)
tr_ts_new.iloc[:test.shape[0], ].to_csv('test_FE.csv')
tr_ts_new.iloc[test.shape[0]:, ].to_csv('train_FE.csv')
if __name__ == '__main__':
print(">>>>>start...")
# genSampleData()
# futureEngineer()
train_test_split()
print("finished!") |
# Find remainder when a polynomial involving x is divided by x-k, where k is a real number.
p = int(input("Highest power of x: "))
a = []
for i in range(p+1):
if i > 1:
a.append(int(input("Coefficient of x^" + str(i) + ": ")))
if i == 1:
a.append(int(input("Coefficient of x: ")))
if i == 0:
a.append(int(input("Constant: ")))
k = int(input("Value of k: "))
total = 0
for i in range(p+1):
total += (k**i) * a[i]
print(total)
|
def cli(command, username):
"""Interprets commands and returns output"""
output = ""
if command == "stats":
output = "Test output"
else:
output = command
return output |
from data import Data
from tree import Tree
from tree import Node
import numpy as np
import math
from random import randint
################################################################################
# Takes an array of probabilities and turns into entropy
################################################################################
def entropy(p):
ent = 0
for i in range(len(p)):
ent = ent - p[i]*math.log(p[i],2) #entropy is to the base 2
return ent
################################################################################
# Calculates information of data based on an attribute or the whole data
################################################################################
def info(data_obj,attribute = None):
info = 0
# if no attribute is specified, get distribution of whole data
if attribute is not None:
# get how many unique values of that attribute are there.
uniq_vals = data_obj.attributes[attribute].possible_vals
num_uniq_vals = len(uniq_vals)
else:
num_uniq_vals = 1
num_points = len(data_obj.get_column('label'))
for i in range(num_uniq_vals):
if attribute is not None:
subset = data_obj.get_row_subset(attribute,uniq_vals[i])
else:
subset = data_obj
num_points_subset = len(subset)
wt_subset = float(num_points_subset)/num_points
# get counts of individual labels
subset_labels = subset.get_column('label')
bins, counts = np.unique(subset_labels, return_counts=True)
# get probabilities
p = np.zeros(len(bins),)
for j in range(len(bins)):
p[j] = float(counts[j])/num_points_subset
ent = entropy(p)
info = info + wt_subset*ent
return info
################################################################################
# Information gain based on an attribute
################################################################################
def info_gain(data_obj,attribute = None):
info_gain = info(data_obj) - info(data_obj,attribute)
return info_gain
################################################################################
# Returns a new dictionary after making a copy of the original dictionary and
# then removing that key
################################################################################
def remove_key(d,key):
d_new = d.copy()
del d_new[key]
return d_new
################################################################################
# Implementation of Decision Tree using ID3 algorithm
#
# Inputs:
# data_obj = data object for which we have to run ID3
# attributes = a dictionary which tells which features to look for
# parent = parent of the root node
# tree = tree object
# limit_depth = a boolean value which says whether we have to limit depth or not
# max_depth =
################################################################################
def ID3(data_obj, attributes, parent, tree, limit_depth = False, max_depth = 10):
label = data_obj.get_column('label')
if(len(label)<=0):
# No data
return
elif(len(np.unique(label))==1):
# Object of only 1 label in the tree
# Add a node with labels
n = Node(np.unique(label)[0],True)
tree.add_node(n, parent)
elif(len(attributes)<=0):
# Add majority label to the tree as the node
# first get counts of individual labels
bins, counts = np.unique(label, return_counts=True)
n = Node(bins[np.argmax(counts)],True)
tree.add_node(n, parent)
else:
if(limit_depth):
if(max_depth < 0):
print("Max-depth should be greater than 0. Aborting!!!")
return
# Information gain for each features
info_gain_per_feature = {}
for key in attributes:
info_gain_per_feature[key] = info_gain(data_obj,key)
# print(key + "," + str(info_gain_per_feature[key]))
# Choose the best feature and the possible values
best_feature = max(info_gain_per_feature, key=info_gain_per_feature.get)
best_feature_values = data_obj.attributes[best_feature].possible_vals
# Add a node
n = Node(best_feature, False)
# Add all possible directions in which node can go
for i in range(len(best_feature_values)):
# partition into subset based on different values
data_subset_obj = data_obj.get_row_subset(best_feature, best_feature_values[i])
# if non-zero items in the subset data
if(data_subset_obj.raw_data.shape[0] > 0):
n.add_value(best_feature_values[i])
tree.add_node(n, parent)
# Check depth of the tree after adding this node.
if(limit_depth):
depth = tree.get_depth(tree.get_root())
if(depth > max_depth):
# Donot grow the tree instead add label nodes
tree.del_node(n, parent)
# Add majority label to the tree as the node
# first get counts of individual labels
bins, counts = np.unique(label, return_counts=True)
n = Node(bins[np.argmax(counts)],True)
tree.add_node(n, parent)
return
# pop this feature from dictionary
attributes_new = remove_key(attributes,best_feature)
for i in range(len(n.value)):
# partition into subsets based on different values
data_subset_obj = data_obj.get_row_subset(best_feature, n.value[i])
if(parent is None):
new_parent = tree.get_root()
else:
new_parent = parent.child[-1]
ID3(data_subset_obj, attributes_new, new_parent, tree, limit_depth, max_depth)
################################################################################
# Prediction using Decision Tree
#
# Inputs:
# root = node from which looking started
# tree = tree object
# data = data_row
# attributes = dictionary of attributes
################################################################################
def predict(root, tree, data, attributes):
if(root is not None):
# label is the leaf node
if(len(root.child) == 0):
return root.feature
else:
node_attribute = root.feature
node_attr_ind = attributes[node_attribute].index
data_val = data[node_attr_ind+1] #1st one is label
if(data_val in root.value):
child_index = root.value.index(data_val)
else:
#print("No node direction matches the value of attribute")
child_index = randint(0, len(root.value)-1)
label = predict(root.child[child_index], tree, data, attributes)
return label
else:
return
################################################################################
# Prediction Accuracy using Decision Tree
#
# Inputs:
# data_obj = data_obj
# tree = tree object
################################################################################
def prediction_accuracy(data_obj, tree):
test_data = data_obj.raw_data
num_data_points = test_data.shape[0]
#print("\nComputing Accuracy on " + str(num_data_points) + " dataset")
correct = 0
for i in range(num_data_points):
predicted = predict(tree.get_root(), tree, test_data[i], data_obj.attributes)
ground = test_data[i][0]
if (predicted == ground):
correct = correct + 1
#if (i%1000 == 0):
# print(str(i) + "\tPoints processed. Accuracy = " + str(100*float(correct)/(i+1)))
accuracy = 100*float(correct)/num_data_points
#print("-------------------------------------------------------------------")
print("Final Accuracy ovr " + str(num_data_points) + " datapoints = " + str(accuracy))
#print("Error (in %) over " + str(num_data_points) + " datapoints = " + str(100-accuracy))
#print("-------------------------------------------------------------------")
return accuracy
################################################################################
# Prediction Label using Decision Tree
#
# Inputs:
# data_obj = data_obj
# tree = tree object
################################################################################
def prediction_label(data_obj, tree):
test_data = data_obj.raw_data
num_data_points = test_data.shape[0]
output = np.zeros(num_data_points)
for i in range(num_data_points):
predicted = predict(tree.get_root(), tree, test_data[i], data_obj.attributes)
output[i] = predicted
return output
################################################################################
# Gets majority entry of each column in train data
################################################################################
def get_majority_column_data(data_obj):
majority = {}
for key in data_obj.attributes:
# print (key)
column_data = data_obj.get_column([key]).flatten();
values,counts = np.unique(column_data,return_counts=True)
majority[key] = values[np.argmax(counts)]
return majority
################################################################################
# Fills missing entry by majority values of the train data
################################################################################
def fill_data(data_obj, majority, data):
missing_data = '?'
if data_obj is None:
return None
for key in data_obj.attributes:
index = data_obj.attributes[key].index
for i in range(data.shape[0]-1): #1st one is label
# if missing data found
if(data[i][index+1] == missing_data):
data[i][index+1] = majority[key] #1st one is label
# Replace data_obj with the new data
data_obj= Data(data = data)
return data_obj
|
# Generated by Django 2.2.14 on 2020-07-08 03:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course', '0003_auto_20200708_1158'),
('user', '0003_auto_20200224_1847'),
]
operations = [
migrations.CreateModel(
name='PointHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reason', models.CharField(max_length=100, verbose_name='積點原因')),
('point', models.IntegerField(default=0, verbose_name='點數')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='積點時間')),
('assignment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='course.Assignment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='point_list', to=settings.AUTH_USER_MODEL)),
],
),
]
|
import py_thorlabs_ctrl.kinesis
import clr, time
POLLING_INTERVAL = 250
ENABLE_SLEEP_TIME = 0.1
py_thorlabs_ctrl.kinesis.check_import()
from System import String
from System import Decimal
clr.AddReference('System.Collections')
clr.AddReference("Thorlabs.MotionControl.GenericMotorCLI")
clr.AddReference("Thorlabs.MotionControl.DeviceManagerCLI")
import Thorlabs.MotionControl.DeviceManagerCLI
from Thorlabs.MotionControl.DeviceManagerCLI import DeviceManagerCLI
import Thorlabs.MotionControl.GenericMotorCLI
class Motor:
"""
Base class for Thorlabs motion controllers. Contains basic functions that apply to most controllers.
"""
INIT_TIMEOUT = 5000
def __init__(self, serial_number):
self.serial_number = str(serial_number)
def create(self):
# abstract
pass
def get_device(self):
try:
device = self.device
except AttributeError:
print('device not created!')
raise
return device
def enable(self):
device = self.get_device()
device.Connect(self.serial_number)
if not device.IsSettingsInitialized():
device.WaitForSettingsInitialized(self.INIT_TIMEOUT)
device.StartPolling(POLLING_INTERVAL)
time.sleep(ENABLE_SLEEP_TIME)
device.EnableDevice()
time.sleep(ENABLE_SLEEP_TIME)
device.LoadMotorConfiguration(self.serial_number)
def get_serial_number(self):
device = self.get_device()
device_info = device.GetDeviceInfo()
return device_info.SerialNumber
def get_name(self):
device = self.get_device()
device_info = device.GetDeviceInfo()
return device_info.Name
def get_position(self):
device = self.get_device()
return Decimal.ToDouble(device.DevicePosition)
def set_velocity(self, max_velocity = None, acceleration = None):
device = self.get_device()
velocity_params = device.GetVelocityParams()
max_velocity = Decimal.ToDouble(params.MaxVelocity) if max_velocity == None else max_velocity
acceleration = Decimal.ToDouble(params.Acceleration) if acceleration == None else acceleration
device.SetVelocityParams(Decimal(max_velocity), Decimal(acceleration))
def is_homed(self):
device = self.get_device()
return device.Status.IsHomed
def home(self):
device = self.get_device()
device.Home(0)
def move_relative(self, dis):
device = self.get_device()
device.SetMoveRelativeDistance(Decimal(dis))
device.MoveRelative(0)
def move_absolute(self, pos):
device = self.get_device()
device.MoveTo(Decimal(pos), 0)
def disable(self):
device = self.get_device()
device.DisableDevice()
def disconnect(self):
device = self.get_device()
device.Disconnect()
class KCubeMotor(Motor):
"""
Base class for K-Cubes.
"""
def set_joystickmode_velocity(self):
device = self.get_device()
params = device.GetMMIParams()
try:
# prior to kinesis 1.14.6
params.WheelMode = Thorlabs.MotionControl.GenericMotorCLI.Settings.KCubeMMISettings.KCubeWheelMode.Velocity
except AttributeError:
try:
params.JoystickMode = Thorlabs.MotionControl.GenericMotorCLI.Settings.KCubeMMISettings.KCubeJoystickMode.Velocity
except AttributeError:
raise AttributeError('cannot find this attribute. APIs have changed. look up latest documentation.')
device.SetMMIParams(params)
def set_display_intensity(self, intensity):
device = self.get_device()
params = device.GetMMIParams()
params.DisplayIntensity = intensity
device.SetMMIParams(params)
def set_display_timeout(self, timeout):
device = self.get_device()
params = device.GetMMIParams()
params.DisplayTimeout = timeout
device.SetMMIParams(params)
class TCubeMotor(Motor):
"""
Base class for K-Cubes.
"""
pass
class KCubeDCServo(KCubeMotor):
def create(self):
clr.AddReference("ThorLabs.MotionControl.KCube.DCServoCLI")
from Thorlabs.MotionControl.KCube.DCServoCLI import KCubeDCServo
DeviceManagerCLI.BuildDeviceList()
self.device = KCubeDCServo.CreateKCubeDCServo(self.serial_number)
class TCubeDCServo(TCubeMotor):
def create(self):
clr.AddReference("ThorLabs.MotionControl.TCube.DCServoCLI")
from Thorlabs.MotionControl.TCube.DCServoCLI import TCubeDCServo
DeviceManagerCLI.BuildDeviceList()
self.device = TCubeDCServo.CreateTCubeDCServo(self.serial_number)
class TCubeStepper(TCubeMotor):
def create(self):
clr.AddReference("Thorlabs.MotionControl.TCube.StepperMotorCLI")
from Thorlabs.MotionControl.TCube.StepperMotorCLI import TCubeStepper
DeviceManagerCLI.BuildDeviceList()
self.device = TCubeStepper.CreateTCubeStepper(self.serial_number) |
from techventory.models import *
from django.contrib import admin
class OperatingSystemVersionInline(admin.TabularInline):
model = OperatingSystemVersion
extra = 1
class OperatingSystemAdmin(admin.ModelAdmin):
inlines = [OperatingSystemVersionInline]
class ApplicationVersionInline(admin.TabularInline):
model = ApplicationVersion
extra = 1
class ApplicationAdmin(admin.ModelAdmin):
inlines = [ApplicationVersionInline]
class ServerAdmin(admin.ModelAdmin):
list_display = (
'hostname',
'domain',
'dop',
)
admin.site.register(Server, ServerAdmin)
admin.site.register(Application, ApplicationAdmin)
admin.site.register(OperatingSystem, OperatingSystemAdmin)
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ExportSubscriber.processors'
db.add_column('feedjack_wp_export_exportsubscriber', 'processors',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ExportSubscriber.processors'
db.delete_column('feedjack_wp_export_exportsubscriber', 'processors')
models = {
'feedjack.feed': {
'Meta': {'ordering': "('name', 'feed_url')", 'object_name': 'Feed'},
'etag': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'filters': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'feeds'", 'blank': 'True', 'to': "orm['feedjack.Filter']"}),
'filters_logic': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'skip_errors': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tagline': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
'feedjack.filter': {
'Meta': {'object_name': 'Filter'},
'base': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filters'", 'to': "orm['feedjack.FilterBase']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'feedjack.filterbase': {
'Meta': {'object_name': 'FilterBase'},
'crossref': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'crossref_rebuild': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'crossref_span': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'crossref_timeline': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'handler_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'feedjack_wp_export.export': {
'Meta': {'ordering': "('url', 'blog_id', 'username')", 'unique_together': "(('url', 'blog_id'),)", 'object_name': 'Export'},
'blog_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '63'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2047'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '63'})
},
'feedjack_wp_export.exportsubscriber': {
'Meta': {'ordering': "('export', '-is_active', 'feed')", 'object_name': 'ExportSubscriber'},
'export': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriber_set'", 'to': "orm['feedjack_wp_export.Export']"}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exports'", 'to': "orm['feedjack.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'processors': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'taxonomies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['feedjack_wp_export.TaxonomyTerm']", 'null': 'True', 'blank': 'True'})
},
'feedjack_wp_export.taxonomyterm': {
'Meta': {'ordering': "('taxonomy', 'term_name', 'term_id')", 'unique_together': "(('taxonomy', 'term_name'), ('taxonomy', 'term_id'))", 'object_name': 'TaxonomyTerm'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'taxonomy': ('django.db.models.fields.CharField', [], {'max_length': '63'}),
'term_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'term_name': ('django.db.models.fields.CharField', [], {'max_length': '254', 'blank': 'True'})
}
}
complete_apps = ['feedjack_wp_export'] |
import numpy as np
def expectation(sequence,pi,A,pb):
[alpha_n, beta_n] = forward_backward(sequence,pi,A,pb)
gamma_n = compute_gamma(alpha_n,beta_n)
chi_n = compute_chi(sequence,alpha_n,beta_n,A,pb)
return [alpha_n, beta_n ,gamma_n, chi_n]
def forward_backward(sequence, pi, A, pb):
K = np.shape(A)[0] #num hidden states
T = np.shape(sequence)[1] #num observations
# Forward (calculate alpha)
alpha_n = np.zeros((K, T))
alpha_aux = np.zeros((K, T))
alpha_n[:,0] = pi * pb[:,0] #alpha_0
alpha_aux[:, 0] = alpha_n[:,0]
for t in range(1, T):
for k in range(K):
for k_ in range(K):
alpha_aux[k,t] += alpha_aux[k_,t-1] * A[k,k_]
alpha_n[k,t] = alpha_aux[k,t]* pb[k,t]
#-----------------------
# Backward (Calculate beta)
beta_n = np.zeros((K,T))
beta_n[:, T-1] = np.ones(K) #beta_T
for t in range(T - 2, -1, -1):
for k in range(K):
for k_ in range(K):
beta_n[k,t] += A[k,k_] * pb[k_,t+1] * beta_n[k_,t+1]
return [alpha_n, beta_n]
def compute_gamma(alpha_n, beta_n):
K = np.shape(alpha_n)[0]
T = np.shape(alpha_n)[1]
gamma = np.zeros((K, T))
for j in range(K):
gamma[j,:] = beta_n[j,:]*alpha_n[j,:]
gamma = gamma/gamma.sum(axis=0)
return gamma
def compute_chi(sequence,alpha,beta,A,pb):
K = np.shape(A)[0]
T = np.shape(sequence)[1]
chi = np.zeros((T,K,K))
chi_t = np.zeros((K,K))
for t in range(T-1):
chi_it = alpha[:,t]*A * pb[:,t+1]*beta[:,t+1]
chi_t = chi_t + chi_it/chi_it.sum()
chi[t,:,:] = chi_t
return chi
|
# def solution(array, target_value, start, end):
# array.sort()
# if start > end:
# return None
# mid = (start + end) // 2
# if array[mid] == target_value:
# return mid
# elif array[mid] > target_value:
# return solution(array, target_value, start, mid - 1)
# else:
# return solution(array, target_value, mid, end)
# print(solution([1,2,3,4],2,0,3))
def solution(array, target_value):
left, right = 0, len(array) - 1
while left <= right:
mid = (left + right) // 2
if target_value < array[mid]:
right = mid - 1
elif target_value > array[mid]:
left = mid + 1
else:
return mid
return -1
def binary_search_recursion(target, start, end, data):
if start > end:
return None
mid = (start + end) // 2
if data[mid] == target:
return mid
elif data[mid] > target:
end = mid - 1
else:
start = mid + 1
return binary_search_recursion(target, start, end, data)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from . import models
class UserAdmin(BaseUserAdmin):
# def groups(self, obj):
# return "".join([g.group for g in obj.groups.all()])
list_display = ('email', 'username', 'first_name', 'last_name', 'is_superuser', 'is_staff')
search_fields = ('email', 'username', 'first_name', 'last_name')
fieldsets = BaseUserAdmin.fieldsets + (
('User Profile',
{
'fields': (
'shipping_first_name', 'shipping_last_name', 'shipping_country',
'shipping_state', 'shipping_city', 'shipping_street',
'shipping_zip', 'shipping_phone'
)
}),
)
admin.site.register(models.User, UserAdmin) |
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import imread
from skimage import feature
from skimage.color import rgb2gray
import os
inputImage = ''
inputSigma = 0
# ratios = {}
ratios = np.load('ratios.npy').item()
basedir = '../artSamples/'
images = []
for im in np.sort(os.listdir(basedir)):
images.append(im[:-4])
num_of_images = len(images)
sigmas = [0] * num_of_images
results = []
results_indicies = []
def getCoordinatesFromImage(img1):
fig, ax1 = plt.subplots(1,1)
plt.suptitle('Select Corresponding Points')
ax1.set_title("Input Image")
# ax1.axis('off')
ax1.imshow(img1)
axis1_xValues = []
axis1_yValues = []
# Handle Onclick
def onclick(event):
if event.inaxes == ax1:
xVal = event.xdata
yVal = event.ydata
point = (xVal, yVal)
plt.plot(xVal, yVal, ',')
fig.canvas.draw()
print 'image 1: ', point
axis1_xValues.append(xVal)
axis1_yValues.append(yVal)
fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
if (len(axis1_xValues) < 4):
print 'Must have at least 4 coresponding points.'
return None, None
# Store points in a 2xn numpy array
points1 = np.zeros((2, len(axis1_xValues)))
points1[0] = axis1_yValues
points1[1] = axis1_xValues
return points1
def getCroppedImage(im, corners):
top_left = corners[:,0]
top_right = corners[:,1]
bottom_right = corners[:,2]
bottom_left = corners[:,3]
min_row = int(min(top_left[0], top_right[0]))
max_row = int(max(bottom_right[0], bottom_left[0]))
min_col = int(min(top_left[1], bottom_left[1]))
max_col = int(max(top_right[1], bottom_right[1]))
return im[min_row:max_row + 1, min_col:max_col + 1,:]
def preprocessImages():
for image in range(len(images)):
im = imread(basedir + images[image] + '.jpg')
edges = getEdges(im, sigmas[image])
# displayOriginalAndEdges(im, edges)
ratio = getEdgesRatio(edges, images[image])
ratios[images[image]] = ratio
def getEdges(im, s):
# Run Canny Edge Detector on input image and return edges
edges = feature.canny(rgb2gray(im), sigma=s)
return edges
def displayOriginalAndEdges(im, edges):
# Display the original image and the edges detected by the Canny Edge Detector
fig, axs = plt.subplots(1, 2, constrained_layout=True)
axs[0].imshow(im)
axs[0].set_title('Original Image')
axs[1].imshow(edges, cmap='gray', interpolation=None)
axs[1].set_title('Edges')
plt.show()
def getEdgesRatio(edges, name):
intensities = np.reshape(edges, (-1, 1)) # Reshape to 1-D
counts, bins, bars = plt.hist(intensities, bins=2, edgecolor='black', linewidth=1.2)
# plt.title("Histogram of " + name + " Image")
# plt.show()
return counts[1]/(counts[0] + counts[1])
def findMatch(im, r, threshold):
for image, ratio in ratios.iteritems():
percentDifference = abs(r - ratio) / (0.5*(r + ratio))
if percentDifference <= threshold:
results.append(image)
results_indicies.append(images.index(image))
results_indicies.sort()
def displayMatches():
print 'do not need'
# i = len(results) + 1
# plt.suptitle("Matches for " + inputImage)
# plt.subplot(2, len(results), 1)
# plt.title("Input Image - " + inputImage)
# plt.imshow(imread('../images/' + inputImage + '.jpg'))
#
# for image in results:
# plt.subplot(2, len(results), i)
# plt.title("Output Image - " + image)
# plt.imshow(imread('../images/' + image + '.jpg'))
# i = i+1
# plt.show()
def getSubsetWithEdgeAnalysis(inputImage, t):
threshold = t
im = imread('../queryImages/' + inputImage + '.jpg')
corners = getCoordinatesFromImage(im)
im = getCroppedImage(im, corners)
plt.title('Cropped Input Image')
plt.imshow(im)
plt.show()
# preprocessImages()
# np.save('ratios.npy', ratios)
edges = getEdges(im, inputSigma)
ratio = getEdgesRatio(edges, inputImage)
findMatch(im, ratio, threshold)
# displayMatches()
return results_indicies
if __name__ == '__main__':
# returns 14 matches
inputImage = 'old-artist-chicago-picasso'
realImage = 'old-guitarist-chicago'
t = 0.50
# returns 109 matches
inputImage = 'wall-clocks'
realImage = 'the-persistence-of-memory-1931'
t = 0.40
# returns 52 matches
inputImage = 'the-scream'
realImage = 'the-scream-1893'
t = 0.20
# returns 15 matches
inputImage = 'starry-night'
realImage = 'the-starry-night'
t = 0.03
# doesn't work with the monaLisa - returns everything
inputImage = 'mona-lisa'
realImage = 'mona-lisa'
t = 2.00
# returns 141
inputImage = 'house-of-parliment-NotIdentical'
realImage = 'houses-of-parliament'
t = 0.30
results_indicies = getSubsetWithEdgeAnalysis(inputImage, t)
print "Success" if realImage in results else "Failure"
print len(results_indicies)
|
from flask import Flask
from config import BaseConfig, VkConfig, LoggerConfig
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from vk_api import VkApi
import logging
from core.controler import EventControler
from handlers_settings import HANDLERS_LIST
app = Flask(__name__)
app.config.from_object(BaseConfig)
api = VkApi(token=VkConfig.TOKEN)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
logger_handler = logging.FileHandler(LoggerConfig.FILE_NAME)
logger_formatter = logging.Formatter(LoggerConfig.FORMAT)
logger_handler.setLevel(LoggerConfig.LEVEL)
logger_handler.setFormatter(logger_formatter)
app.logger.addHandler(logger_handler)
controler = EventControler(HANDLERS_LIST, api, db, app.logger) |
import abc
import hashlib
import itertools
import pathlib
from typing import Any, Callable, IO, Literal, NoReturn, Optional, Sequence, Set, Tuple, Union
from urllib.parse import urlparse
from torchdata.datapipes.iter import (
FileLister,
FileOpener,
IterableWrapper,
IterDataPipe,
RarArchiveLoader,
TarArchiveLoader,
ZipArchiveLoader,
)
from torchvision.datasets.utils import (
_decompress,
_detect_file_type,
_get_google_drive_file_id,
_get_redirect_url,
download_file_from_google_drive,
download_url,
extract_archive,
)
class OnlineResource(abc.ABC):
def __init__(
self,
*,
file_name: str,
sha256: Optional[str] = None,
preprocess: Optional[Union[Literal["decompress", "extract"], Callable[[pathlib.Path], None]]] = None,
) -> None:
self.file_name = file_name
self.sha256 = sha256
if isinstance(preprocess, str):
if preprocess == "decompress":
preprocess = self._decompress
elif preprocess == "extract":
preprocess = self._extract
else:
raise ValueError(
f"Only `'decompress'` or `'extract'` are valid if `preprocess` is passed as string,"
f"but got {preprocess} instead."
)
self._preprocess = preprocess
@staticmethod
def _extract(file: pathlib.Path) -> None:
extract_archive(str(file), to_path=str(file).replace("".join(file.suffixes), ""), remove_finished=False)
@staticmethod
def _decompress(file: pathlib.Path) -> None:
_decompress(str(file), remove_finished=True)
def _loader(self, path: pathlib.Path) -> IterDataPipe[Tuple[str, IO]]:
if path.is_dir():
return FileOpener(FileLister(str(path), recursive=True), mode="rb")
dp = FileOpener(IterableWrapper((str(path),)), mode="rb")
archive_loader = self._guess_archive_loader(path)
if archive_loader:
dp = archive_loader(dp)
return dp
_ARCHIVE_LOADERS = {
".tar": TarArchiveLoader,
".zip": ZipArchiveLoader,
".rar": RarArchiveLoader,
}
def _guess_archive_loader(
self, path: pathlib.Path
) -> Optional[Callable[[IterDataPipe[Tuple[str, IO]]], IterDataPipe[Tuple[str, IO]]]]:
try:
_, archive_type, _ = _detect_file_type(path.name)
except RuntimeError:
return None
return self._ARCHIVE_LOADERS.get(archive_type) # type: ignore[arg-type]
def load(
self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False
) -> IterDataPipe[Tuple[str, IO]]:
root = pathlib.Path(root)
path = root / self.file_name
# Instead of the raw file, there might also be files with fewer suffixes after decompression or directories
# with no suffixes at all. `pathlib.Path().stem` will only give us the name with the last suffix removed, which
# is not sufficient for files with multiple suffixes, e.g. foo.tar.gz.
stem = path.name.replace("".join(path.suffixes), "")
def find_candidates() -> Set[pathlib.Path]:
# Although it looks like we could glob for f"{stem}*" to find the file candidates as well as the folder
# candidate simultaneously, that would also pick up other files that share the same prefix. For example, the
# test split of the stanford-cars dataset uses the files
# - cars_test.tgz
# - cars_test_annos_withlabels.mat
# Globbing for `"cars_test*"` picks up both.
candidates = {file for file in path.parent.glob(f"{stem}.*")}
folder_candidate = path.parent / stem
if folder_candidate.exists():
candidates.add(folder_candidate)
return candidates
candidates = find_candidates()
if not candidates:
self.download(root, skip_integrity_check=skip_integrity_check)
if self._preprocess is not None:
self._preprocess(path)
candidates = find_candidates()
# We use the path with the fewest suffixes. This gives us the
# extracted > decompressed > raw
# priority that we want for the best I/O performance.
return self._loader(min(candidates, key=lambda candidate: len(candidate.suffixes)))
@abc.abstractmethod
def _download(self, root: pathlib.Path) -> None:
pass
def download(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> pathlib.Path:
root = pathlib.Path(root)
self._download(root)
path = root / self.file_name
if self.sha256 and not skip_integrity_check:
self._check_sha256(path)
return path
def _check_sha256(self, path: pathlib.Path, *, chunk_size: int = 1024 * 1024) -> None:
hash = hashlib.sha256()
with open(path, "rb") as file:
while chunk := file.read(chunk_size):
hash.update(chunk)
sha256 = hash.hexdigest()
if sha256 != self.sha256:
raise RuntimeError(
f"After the download, the SHA256 checksum of {path} didn't match the expected one: "
f"{sha256} != {self.sha256}"
)
class HttpResource(OnlineResource):
def __init__(
self, url: str, *, file_name: Optional[str] = None, mirrors: Sequence[str] = (), **kwargs: Any
) -> None:
super().__init__(file_name=file_name or pathlib.Path(urlparse(url).path).name, **kwargs)
self.url = url
self.mirrors = mirrors
self._resolved = False
def resolve(self) -> OnlineResource:
if self._resolved:
return self
redirect_url = _get_redirect_url(self.url)
if redirect_url == self.url:
self._resolved = True
return self
meta = {
attr.lstrip("_"): getattr(self, attr)
for attr in (
"file_name",
"sha256",
"_preprocess",
)
}
gdrive_id = _get_google_drive_file_id(redirect_url)
if gdrive_id:
return GDriveResource(gdrive_id, **meta)
http_resource = HttpResource(redirect_url, **meta)
http_resource._resolved = True
return http_resource
def _download(self, root: pathlib.Path) -> None:
if not self._resolved:
return self.resolve()._download(root)
for url in itertools.chain((self.url,), self.mirrors):
try:
download_url(url, str(root), filename=self.file_name, md5=None)
# TODO: make this more precise
except Exception:
continue
return
else:
# TODO: make this more informative
raise RuntimeError("Download failed!")
class GDriveResource(OnlineResource):
def __init__(self, id: str, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.id = id
def _download(self, root: pathlib.Path) -> None:
download_file_from_google_drive(self.id, root=str(root), filename=self.file_name, md5=None)
class ManualDownloadResource(OnlineResource):
def __init__(self, instructions: str, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.instructions = instructions
def _download(self, root: pathlib.Path) -> NoReturn:
raise RuntimeError(
f"The file {self.file_name} cannot be downloaded automatically. "
f"Please follow the instructions below and place it in {root}\n\n"
f"{self.instructions}"
)
class KaggleDownloadResource(ManualDownloadResource):
def __init__(self, challenge_url: str, *, file_name: str, **kwargs: Any) -> None:
instructions = "\n".join(
(
"1. Register and login at https://www.kaggle.com",
f"2. Navigate to {challenge_url}",
"3. Click 'Join Competition' and follow the instructions there",
"4. Navigate to the 'Data' tab",
f"5. Select {file_name} in the 'Data Explorer' and click the download button",
)
)
super().__init__(instructions, file_name=file_name, **kwargs)
|
'''
Daniel's Friday Data
'''
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
'''
Data Order: 1.220 4.220 4.250 1.250 1.280 4.280
'''
D = pd.read_excel('allDansdata.xlsx')
#print(D[[24]])
an,af,ac = [],[],[]
bn,bf,bc = [],[],[]
cn,cf,cc = [],[],[]
dn,df,dc = [],[],[]
en,ef,ec = [],[],[]
fn,ff,fc = [],[],[]
for index,row in D[[24]].iterrows():
an.append(row[0])
for index,row in D[[25]].iterrows():
af.append(row[0])
for index,row in D[[26]].iterrows():
ac.append(row[0])
for index,row in D[[27]].iterrows():
bn.append(row[0])
for index,row in D[[28]].iterrows():
bf.append(row[0])
for index,row in D[[29]].iterrows():
bc.append(row[0])
for index,row in D[[30]].iterrows():
cn.append(row[0])
for index,row in D[[31]].iterrows():
cf.append(row[0])
for index,row in D[[32]].iterrows():
cc.append(row[0])
for index,row in D[[33]].iterrows():
dn.append(row[0])
for index,row in D[[34]].iterrows():
df.append(row[0])
for index,row in D[[35]].iterrows():
dc.append(row[0])
for index,row in D[[36]].iterrows():
en.append(row[0])
for index,row in D[[37]].iterrows():
ef.append(row[0])
for index,row in D[[38]].iterrows():
ec.append(row[0])
for index,row in D[[39]].iterrows():
fn.append(row[0])
for index,row in D[[40]].iterrows():
ff.append(row[0])
for index,row in D[[41]].iterrows():
fc.append(row[0])
an =np.asarray(an)
af =np.asarray(af)
ac =np.asarray(ac)
bn =np.asarray(bn)
bf =np.asarray(bf)
bc =np.asarray(bc)
cn =np.asarray(cn)
cf =np.asarray(cf)
cc =np.asarray(cc)
dn =np.asarray(dn)
df =np.asarray(df)
dc =np.asarray(dc)
en =np.asarray(en)
ef =np.asarray(ef)
ec =np.asarray(ec)
fn =np.asarray(fn)
ff =np.asarray(ff)
fc =np.asarray(fc)
ac = ac[~ np.isnan(ac)]
af = af[~ np.isnan(af)]
an = an[~ np.isnan(an)]
bc = bc[~ np.isnan(bc)]
bf = bf[~ np.isnan(bf)]
bn = bn[~ np.isnan(bn)]
cc = cc[~ np.isnan(cc)]
cf = cf[~ np.isnan(cf)]
cn = cn[~ np.isnan(cn)]
dc = dc[~ np.isnan(dc)]
df = df[~ np.isnan(df)]
dn = dn[~ np.isnan(dn)]
ec = ec[~ np.isnan(ec)]
ef = ef[~ np.isnan(ef)]
en = en[~ np.isnan(en)]
fc = fc[~ np.isnan(fc)]
ff = ff[~ np.isnan(ff)]
fn = fn[~ np.isnan(fn)]
D1 = [an,af]
D2 = [bn,bf]
D3 = [cn,cf]
D4 = [dn,df]
D5 = [en,ef]
D6 = [fn,ff]
'''# # # # # # #'''# # # # # # # ''' # # # # # # # ''' # # # #
fig = plt.figure()
ax1 = plt.subplot2grid((2,1), (0,0), colspan=1)
ax2 = plt.subplot2grid((2,1), (1,0),sharex=ax1 )
'''
Data Order: 1.220 4.220 4.250 1.250 1.280 4.280
'''
ax1.axvline(115,c='pink',linewidth=10)
ax2.axvline(115,c='pink',linewidth=10)
ax1.scatter(D1[0],D1[1],c='r',s=250)
ax1.scatter(D4[0],10*D4[1],c='g',s=250)
ax1.scatter(D5[0],10*D5[1],c='b',s=250)
ax2.scatter(D2[0],D2[1],c='r',s=250)
ax2.scatter(D3[0],D3[1],c='g',s=250)
ax2.scatter(D6[0],10*D6[1],c='b',s=250)
plt.show()
|
result = 50
for _ in range(50):
result = result + 1
print(result) |
import os
import sys
import numpy as np
import pandas as pd
from itertools import product
import matplotlib.pyplot as plt
import seaborn as sns
from util import load_data, save_results, load_results, save_fig
from preprocess import preprocess
from obfuscation import obfuscate_keystrokes, mean_lag
from keystroke import extract_keystroke_features
from classify import classification_acc, predictions_smape
SEED = 1234
N_FOLDS = 10
MIXES = ['delay', 'interval']
MIX_PARAMS = {'delay': [50, 100, 200, 500, 1000],
'interval': [0.1, 0.5, 1.0, 1.5, 2.0]}
DATASETS = ['short_fixed', 'long_fixed', 'long_free']
TARGETS = ['user', 'age', 'gender', 'handedness']
def describe(name):
"""
Describe the dataset
"""
df = load_data(name)
s = df.groupby(level=[0, 1]).size()
print('Dataset :', name)
print('Users :', len(s.groupby(level=0)))
print('Sessions/user :', s.groupby(level=0).size().mean())
print('Sample size :', s.mean(), '+/-', s.std())
print('Mean pp interval (ms) :',
df.groupby(level=[0, 1]).apply(lambda x: x['timepress'].diff().dropna().mean()).mean())
print('Mean duration (ms) :',
df.groupby(level=[0, 1]).apply(lambda x: (x['timerelease'] - x['timepress']).mean()).mean())
for target in TARGETS[1:]:
s = df.reset_index().groupby([target, 'session']).size().groupby(level=0).size()
print(target)
print(s / s.sum())
return
def extract_features(df):
def make_features(x):
return pd.Series({
'age': x.iloc[0]['age'],
'gender': x.iloc[0]['gender'],
'handedness': x.iloc[0]['handedness'],
'features': extract_keystroke_features(x)
})
return df.groupby(level=[0, 1]).apply(make_features)
def acc_figure(name):
df = load_results(name)
df = df.set_index(['dataset', 'strategy'])
fig, axes = plt.subplots(4, 3, sharey=True, squeeze=True, figsize=(6, 5))
for dataset, col in zip(DATASETS, axes.T):
for target, ax in zip(TARGETS, col):
ax.plot(np.r_[0, df.loc[(dataset, 'delay'), 'mean_delta'].values / 1000],
np.r_[df.loc[(dataset, 'none'), target].iloc[0], df.loc[(dataset, 'delay'), target]], linewidth=1, label='Delay')
ax.plot(np.r_[0, df.loc[(dataset, 'interval'), 'mean_delta'].values / 1000],
np.r_[df.loc[(dataset, 'none'), target].iloc[0], df.loc[(dataset, 'interval'), target]], linewidth=1, linestyle='--', label='Interval')
ax.set_ylim(0, 1)
if dataset == 'short_fixed':
ax.set_xlim(0, 1)
ax.set_xticks([0, 0.25, 0.5, 0.75])
else:
ax.set_xlim(0, 2)
ax.set_xticks([0, 0.5, 1, 1.5])
axes[0, 0].set_title('Short fixed-text')
axes[0, 1].set_title('Long fixed-text')
axes[0, 2].set_title('Long free-text')
axes[0, 0].set_ylabel('Identity ACC')
axes[1, 0].set_ylabel('Age ACC')
axes[2, 0].set_ylabel('Gender ACC')
axes[3, 0].set_ylabel('Handedness ACC')
for i,j in product(range(3), range(3)):
axes[i,j].set_xticklabels([])
axes[-1,-1].legend(loc='lower right')
fig.text(0.5, 0.0, 'Lag (s)', ha='center')
# fig.text(0.0, 0.5, 'ACC', va='center', rotation='vertical')
plt.tight_layout()
plt.subplots_adjust(wspace=0.1, hspace=0.15)
save_fig(name)
return
if __name__ == '__main__':
if len(sys.argv) > 2:
print('Usage: python main.py [seed]')
sys.exit(1)
if len(sys.argv) == 2:
seed = int(sys.argv[1])
else:
seed = SEED
np.random.seed(seed)
# Download and preprocess the data
# preprocess()
# Describe each dataset
# for dataset in DATASETS:
# describe(dataset)
# Mask the keystrokes in each dataset
# for dataset, param in product(DATASETS, DELAY_PARAMS):
# mask_keystrokes(dataset, 'delay', param)
#
# for dataset, param in product(DATASETS, INTERVAL_PARAMS):
# mask_keystrokes(dataset, 'interval', param)
# Classify each target and make predictions
# results = []
# for mix, dataset in product(MIXES, DATASETS):
# unmasked = load_data(dataset)
# unmasked_features = extract_features(unmasked)
#
# user_acc, age_acc, gender_acc, hand_acc = (
# classification_acc(unmasked_features, target, N_FOLDS) for target in TARGETS
# )
# pp_smape, dur_smape = predictions_smape(unmasked)
#
# results.append(('none', dataset, 0, 0, user_acc, age_acc, gender_acc, hand_acc, pp_smape, dur_smape))
#
# for param in MIX_PARAMS[mix]:
# masked = load_data(dataset, masking=(mix, param))
# masked_features = extract_features(masked)
# lag = mean_lag(unmasked, masked)
#
# user_acc, age_acc, gender_acc, hand_acc = (
# classification_acc(masked_features, target, N_FOLDS) for target in TARGETS
# )
# pp_smape, dur_smape = predictions_smape(masked)
#
# results.append((mix, dataset, param, lag,
# user_acc, age_acc, gender_acc, hand_acc, pp_smape, dur_smape))
#
# results = pd.DataFrame.from_records(results,
# columns=['strategy', 'dataset', 'param',
# 'mean_delta'] + TARGETS + ['pp_SMAPE', 'dur_SMAPE'])
#
# save_results(results, 'results')
# Make a figure
acc_figure('results')
|
f = open('018.txt', 'r')
n = 15
trokut = []
def rek(poz):
if poz in maks:
return maks[poz]
else:
return trokut[poz][0] + max(rek(poz+trokut[poz][1]),
rek(poz+trokut[poz][1]+1))
i = 1
for line in f:
trokut += [(int(x),i) for x in line.split()]
i+=1
maks = { poz : trokut[poz][0] for poz in range(len(trokut)-15, len(trokut)) }
print rek(0)
|
def make_sentences(parts):
output = ""
for x in parts:
if x == ",":
output+=x
elif x != ".":
output+=" {}".format(x)
return "{}{}".format(output[1:],".")
'''
Implement a function, so it will produce a sentence out of the given parts.
Array of parts could contain:
words;
commas in the middle;
multiple periods at the end.
Sentence making rules:
there must always be a space between words;
there must not be a space between a comma and word on the left;
there must always be one and only one period at the end of a sentence.
Example:
makeSentence(['hello', ',', 'my', 'dear']) // returns 'hello, my dear.'
'''
|
# 外部公開用 / 一部の固有名詞を匿名化
# 機能:汎用Functionの外部モジュール化
# ライブラリインポート
import sys
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
import calendar
import datetime
# Function
# コミッション対象 該当/非該当 判別
def prePreocessing01(index, dfMs01, dfMs02, dfDb01, dfDb02):
# 支払サイト入力
customer = dfMs01.iloc[index, dfMs01.columns.get_loc("客先名")]
customeridx = dfDb01.query("客先名 == @customer").index
dfMs01.iloc[index, dfMs01.columns.get_loc("支払サイト")] = pd.Timedelta(dfDb01.iloc[customeridx[0], 1], unit="D")
# 客先コード入力
dfMs01.iloc[index, dfMs01.columns.get_loc("客先コード")] = dfDb01.iloc[customeridx[0], dfDb01.columns.get_loc("得意先コード")]
# 入金予定日入力
dfMs01.iloc[index, dfMs01.columns.get_loc("入金予定日")] = dfMs01.iloc[index, dfMs01.columns.get_loc("出荷日")] + dfMs01.iloc[index, dfMs01.columns.get_loc("支払サイト")]
# コミッション対象Rep有無の判断
customer = dfMs01.iloc[index, dfMs01.columns.get_loc("客先名")]
parts = dfMs01.iloc[index, dfMs01.columns.get_loc("品番")]
dbidx = (dfDb02[(dfDb02["客先名"] == customer) & (dfDb02["品番"] == parts)]).index
# Databaseに登録がない客先/品番組み合わせ場合、[コミッション対象Rep数]colにエラー
if dbidx.size > 0:
dfMs01.iloc[index, dfMs01.columns.get_loc("コミッション対象Rep数")] = dfDb02.iloc[dbidx[0], dfDb02.columns.get_loc("コミッション対象Rep数")]
elif dbidx.size == 0:
dfMs01.iloc[index, dfMs01.columns.get_loc("コミッション対象Rep数")] = "未登録客先/品番"
# MasterのSheet1からコミッション対象分をSheet2にコピー, ダブルコミッションは2列分コピー
if dfMs01.iloc[index, dfMs01.columns.get_loc("コミッション対象Rep数")] == 1:
dfMs02 = pd.concat([dfMs02, dfMs01.iloc[[index]]])
elif dfMs01.iloc[index, dfMs01.columns.get_loc("コミッション対象Rep数")] == 2:
dfMs02 = pd.concat([dfMs02, dfMs01.iloc[[index]]])
dfMs02 = pd.concat([dfMs02, dfMs01.iloc[[index]]])
return [dfMs01, dfMs02]
# Function
# コミッション対象オーダーに対する処理(入金完了前)
def prePreocessing02(index, dfMs02, dfDb03, dfDb04, idx=0):
# コミッション対象Rep入力
if idx == 0:
customer = dfMs02.iloc[index, dfMs02.columns.get_loc("客先名")]
parts = dfMs02.iloc[index, dfMs02.columns.get_loc("品番")]
idx = (dfDb03[(dfDb03["客先名"] == customer) & (dfDb03["品番"] == parts)]).index
dfMs02.iloc[index, dfMs02.columns.get_loc("コミッション対象Rep")] = dfDb03.iloc[idx[0], dfDb03.columns.get_loc("コミッション対象Rep")]
# エンドカスタマー入力
dfMs02.iloc[index, dfMs02.columns.get_loc("エンドカスタマー")] = dfDb03.iloc[idx[0], dfDb03.columns.get_loc("エンドカスタマー")]
# コミッションレート入力
dfMs02.iloc[index, dfMs02.columns.get_loc("コミッションレート")] = dfDb03.iloc[idx[0], dfDb03.columns.get_loc("コミッションレート")]
# Rep地域コード入力
rep = dfMs02.iloc[index, dfMs02.columns.get_loc("コミッション対象Rep")]
repidx = (dfDb04[(dfDb04["コミッション対象Rep"] == rep)]).index
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep地域コード")] = dfDb04.iloc[repidx[0], dfDb04.columns.get_loc("Rep地域コード")]
# コミッション金額入力
# 日本円
if dfMs02.iloc[index, dfMs02.columns.get_loc("通貨")] == "JPY":
dfMs02.iloc[index, dfMs02.columns.get_loc("コミッションJPY金額")] = dfMs02.iloc[index, dfMs02.columns.get_loc("金額")] * dfMs02.iloc[index, dfMs02.columns.get_loc("コミッションレート")]
# USドル
else:
dfMs02.iloc[index, dfMs02.columns.get_loc("コミッションUSD金額")] = dfMs02.iloc[index, dfMs02.columns.get_loc("金額")] * dfMs02.iloc[index, dfMs02.columns.get_loc("コミッションレート")]
# Repへの支払予定月入力
payday = dfDb04.iloc[repidx[0], dfDb04.columns.get_loc("Rep支払日")]
epm = dfMs02.iloc[index, dfMs02.columns.get_loc("入金予定日")].month
epy = dfMs02.iloc[index, dfMs02.columns.get_loc("入金予定日")].year
### Rep支払日1 -> 1/25, 4/25, 7/25, 10/25 (地域A)
if payday == 1:
if 1 <= epm <= 3:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")] = pd.Timestamp(year=epy, month=4, day=25).date()
elif 4 <= epm <= 6:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")] = pd.Timestamp(year=epy, month=7, day=25).date()
elif 7 <= epm <= 9:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")] = pd.Timestamp(year=epy, month=10, day=25).date()
else:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")] = pd.Timestamp(year=epy + 1, month=1, day=25).date()
### Rep支払日2 -> 1/30, 4/30, 7/30, 10/30 (地域B)
elif payday == 2:
if 1 <= epm <= 3:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")] = pd.Timestamp(year=epy, month=4, day=30).date()
elif 4 <= epm <= 6:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")] = pd.Timestamp(year=epy, month=7, day=30).date()
elif 7 <= epm <= 9:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")] = pd.Timestamp(year=epy, month=10, day=30).date()
else:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")] = pd.Timestamp(year=epy + 1, month=1, day=30).date()
### Rep支払日3 -> 毎月、2か月後25日 (地域C)
elif payday == 3:
temp = dfMs02.iloc[index, dfMs02.columns.get_loc("入金予定日")] + relativedelta(months=2)
tempm = temp.month
tempy = temp.year
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")] = pd.Timestamp(year=tempy, month=tempm, day=25).date()
return dfMs02
# Function
# コミッション対象オーダーに対する処理(入金完了後)
def commissionPaymentDateCalculation(dfMs01, dfMs02, dfDb04):
# 入金日入力
for index, row in dfMs02.iterrows():
if pd.isnull(dfMs02.iloc[index, dfMs02.columns.get_loc("入金日")]):
idx = (dfMs01[(dfMs01["Invoice"] == dfMs02.iloc[index, dfMs02.columns.get_loc("Invoice")])]).index
dfMs02.iloc[index, dfMs02.columns.get_loc("入金日")] = dfMs01.iloc[idx[0], dfMs01.columns.get_loc("入金日")]
# Rep支払確定日入力
for index, row in dfMs02.iterrows():
# [Rep支払確定月]colが空欄、且つ[入金日]が入力済の場合
if pd.isnull(dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")]) and pd.notnull(dfMs02.iloc[index, dfMs02.columns.get_loc("入金日")]):
rep = dfMs02.iloc[index, dfMs02.columns.get_loc("コミッション対象Rep")]
idx = (dfDb04[(dfDb04["コミッション対象Rep"] == rep)]).index
payday = dfDb04.iloc[idx[0], dfDb04.columns.get_loc("Rep支払日")]
pm = dfMs02.iloc[index, dfMs02.columns.get_loc("入金日")].month
py = dfMs02.iloc[index, dfMs02.columns.get_loc("入金日")].year
### Rep支払日1 -> 1/25, 4/25, 7/25, 10/25 (地域A)
if payday == 1:
if 1 <= pm <= 3:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")] = pd.Timestamp(year=py, month=4, day=25).date()
elif 4 <= pm <= 6:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")] = pd.Timestamp(year=py, month=7, day=25).date()
elif 7 <= pm <= 9:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")] = pd.Timestamp(year=py, month=10, day=25).date()
else:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")] = pd.Timestamp(year=py + 1, month=1, day=25).date()
### Rep支払日2 -> 1/30, 4/30, 7/30, 10/30 (地域B)
elif payday == 2:
if 1 <= pm <= 3:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")] = pd.Timestamp(year=py, month=4, day=30).date()
elif 4 <= pm <= 6:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")] = pd.Timestamp(year=py, month=7, day=30).date()
elif 7 <= pm <= 9:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")] = pd.Timestamp(year=py, month=10, day=30).date()
else:
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")] = pd.Timestamp(year=py + 1, month=1, day=30).date()
### Rep支払日3 -> 毎月、2か月後25日 (地域C) + 海外子会社→Rep支払月も同時に入力
elif payday == 3:
temp = dfMs02.iloc[index, dfMs02.columns.get_loc("入金日")] + relativedelta(months=2)
tempm = temp.month
tempy = temp.year
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")] = pd.Timestamp(year=tempy, month=tempm, day=25).date()
temp = dfMs02.iloc[index, dfMs02.columns.get_loc("入金日")] + relativedelta(months=1)
tempm = temp.month
tempy = temp.year
dfMs02.iloc[index, dfMs02.columns.get_loc("海外子会社→Rep支払月")] = pd.Timestamp(year=tempy, month=tempm, day=calendar.monthrange(tempy, tempm)[1]).date()
return dfMs02
# Function
# エクセルでのdatetime.datetime表示をdatetime.date表示に変換 for Sheet1
def datetimeToDateOnMaster01(dfMs01):
for index, row in dfMs01.iterrows():
if isinstance(dfMs01.iloc[index, dfMs01.columns.get_loc("出荷日")], datetime.datetime):
dfMs01.iloc[index, dfMs01.columns.get_loc("出荷日")] = dfMs01.iloc[index, dfMs01.columns.get_loc("出荷日")].date()
if isinstance(dfMs01.iloc[index, dfMs01.columns.get_loc("入金予定日")], datetime.datetime):
dfMs01.iloc[index, dfMs01.columns.get_loc("入金予定日")] = dfMs01.iloc[index, dfMs01.columns.get_loc("入金予定日")].date()
if isinstance(dfMs01.iloc[index, dfMs01.columns.get_loc("入金日")], datetime.datetime):
dfMs01.iloc[index, dfMs01.columns.get_loc("入金日")] = dfMs01.iloc[index, dfMs01.columns.get_loc("入金日")].date()
return dfMs01
# Function
# エクセルでのdatetime.datetime表示をdatetime.date表示に変換 for Sheet2
def datetimeToDateOnMaster02(dfMs02):
for index, row in dfMs02.iterrows():
if isinstance(dfMs02.iloc[index, dfMs02.columns.get_loc("出荷日")], datetime.datetime):
dfMs02.iloc[index, dfMs02.columns.get_loc("出荷日")] = dfMs02.iloc[index, dfMs02.columns.get_loc("出荷日")].date()
if isinstance(dfMs02.iloc[index, dfMs02.columns.get_loc("入金予定日")], datetime.datetime):
dfMs02.iloc[index, dfMs02.columns.get_loc("入金予定日")] = dfMs02.iloc[index, dfMs02.columns.get_loc("入金予定日")].date()
if isinstance(dfMs02.iloc[index, dfMs02.columns.get_loc("入金日")], datetime.datetime):
dfMs02.iloc[index, dfMs02.columns.get_loc("入金日")] = dfMs02.iloc[index, dfMs02.columns.get_loc("入金日")].date()
if isinstance(dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")], datetime.datetime):
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")] = dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払予定月")].date()
if isinstance(dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")], datetime.datetime):
dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")] = dfMs02.iloc[index, dfMs02.columns.get_loc("Rep支払確定月")].date()
if isinstance(dfMs02.iloc[index, dfMs02.columns.get_loc("海外子会社→Rep支払月")], datetime.datetime):
dfMs02.iloc[index, dfMs02.columns.get_loc("海外子会社→Rep支払月")] = dfMs02.iloc[index, dfMs02.columns.get_loc("海外子会社→Rep支払月")].date()
return dfMs02 |
# Бот для Эксмо ETH&BTC
#Импорт библиотек
import httplib
import urllib
import urllib2
import json
import hashlib
import hmac
import time
import copy
import string
import random
import socket
import sys
#описание констант kleine Exmo
BTC_ak=['K-']
BTC_as=['S-']
#Выставление уровней для торговли (коридор) для пары ETH/RUB
#Вне коридора бот уходит в режим ожидания
level_up = -1
level_down = -1
# Переход с продажи крипты с криптой (ETH/BTC) на крипта/фиат-валюта
# цена биткоина должна быть больше минимальной и наоборот
btcPrice=-1
btcPriceMin =9600
btcPriceMax =99999
decimal_part =2 #decimal_part Знаков после запятой
nBTC_USD=0
nETH_USD=1
nBTC_RUB=2
nETH_RUB=3
nETH_BTC=4
nUSD_RUB=5
####################
globalNr = nETH_RUB#
####################
# Выбирается первая(главная) пара > globalNr <
# Вторая пара выбирается автоматически, так что бы валюты не пересекались
# Вторая пара начинает работать только при наличии свободной валюты этой пары
# 0. BTC/USD + (2. ETH/RUB)
# 1. ETH/USD + (3. BTC/RUB)
# 3. BTC/RUB + (1. ETH/USD)
# 2. ETH/RUB + (0. BTC/USD)
# 4. ETH/BTC ( )
# 5. USD/RUB ()
pairs=['btc_usd','eth_usd', 'btc_rub','eth_rub','eth_btc', 'usd_rub']
# Свободная (не в ордере) актуальная крипто-валюта и ее минимально возможное значени для установки ордера
currency_A_Free = 0
min_currency_A = 1
# Свободная (не в ордере) актуальная фиат-валюта и ее минимально возможное значени для установки ордера
currency_B_Free = 0
min_currency_B=1
# Флаг для установки соединения при старте
startUp =1
#Все свободные валюты
btcFree = 0
usdFree = 0
ethFree = 0
rubFree = 0
# Валюта в резерве (в ордерах), пока не используется
# TODO можно печатать, в какой зоне стоят ордера
#usdReserved=0
#rubReserved=0
#btcReserved=0
#ethReserved=0
# Валюта в резерве и в ордерах
usdTotal=0
rubTotal=0
btcTotal=0
ethTotal=0
# Минимальные значения валют, необходисые для участия в сделках
am_min_BTC=0.0010001
am_min_USD=11
am_min_ETH=0.0105
am_min_RUB=400
#установка соединия
nonce_last=1 #
cons=0 #
# счетчик выставленных ордеров
count_tref=0
from_price = [0,0,0,0,0,0] #актуальная цена минус минимальный процент: [0]-BTC/USD... [5]-USD/RUB.
to_price = [0,0,0,0,0,0] #актуальная цена минус минимальный процент: [0]-BTC/USD... [5]-USD/RUB.
startPreis = [0,0,0,0,0,0] #актульная цена для каждой валюты: [0]-BTC/USD... [5]-USD/RUB.
diff_sell_buy = [0,0,0,0,0,0] #абсолютная разница между продажей и покупкой в соотсетсвующей валюте. Рассчитывается динамично. Здесь не используется. Применяется константа 5%
min_diff =0.005 # 0.5% двойной учет комиссии
# коэффициент для расчета начальной цены покупки или продажи
# используется для подгона начальный цены под "заглушки"
# 0.35 здесь - это суммированный минимальный объем продажи (или покупки) от середины стакана
am_lim=0.35
# TODO для разных валют это величина разная. Надо сделать для каждой отдельно или совсем убрать.
#Поэтому сделаю пока минимум, ca. 0
am_lim=0.0001
#статистические данные, собранные с помощью API за последне 24 часа
#[0]-BTC/USD... [5]-USD/RUB.
vBids = [0,0,0,0,0,0] # предложения покупки в стакане с учетом коэффициента по объемам (учет заглушек)
vAsks = [0,0,0,0,0,0] # предложения продажи в стакане с учетом коэффициента по объемам (учет заглушек)
aBids = [0,0,0,0,0,0] # актульные предложения покупки в стакане
aAsks = [0,0,0,0,0,0] # актуальные предложения продажи в стакане
low = [0,0,0,0,0,0] # минимальная цена за 24 часа
avg = [0,0,0,0,0,0] # средняя цена за 24 часа
high = [0,0,0,0,0,0] # максимальная цена за 24 часа
avg_AB = [0,0,0,0,0,0] # средня цена в стакае (Bid+Ask)/2
#шесть основных цен за последнии 24 часа , максимум и минимум получены через API
# [0] low - минимум
# [1] low+(avg-low)/2
# [2] avg - среднее значение
# [4] high-(avg-low)/2
# [5] high - максимум
xPrice = [0,0,0,0,0]
#актуальная зона последней покупки/продажи для каждой валюты;
#[0]-BTC/USD... [5]-USD/RUB.
zone=[0,0,0,0,0,0]
# Zone 5 #
#xPrice[4]------------------------------high #
# Zone 4 #
#xPrice[3]-----------------------------------#
# Zone 3 #
#xPrice[2]-------------------------------avg #
# Zone 2 #
#xPrice[1]-----------------------------------#
# Zone 1 #
#xPrice[0]-------------------------------low #
# Zone 0 #
max_11 = 11
# запоминание в массив предыдущих максимумов и минимумов
# запоминание в массив предыдущих максимумов и минимумов
saveZoneMax=[0,0,0,0,0,0]
saveZoneMin=[0,0,0,0,0,0]
#Установака соединения
def reset_con():
global cons
url="api.exmo.me"
print 'reset_con', url
try:
cons.close()
except:
print '~',
try:
cons = httplib.HTTPSConnection(url, timeout=10)
except:
print '~',
return
#глубина стакана
# В z{} записывается весь массив данных по Asks/Bids (200 ордеров)
# Вычисляется середина стакана для актуальной пары и записывается в глобальный массив avg_AB[pairs_nr]
# pairs_nr - номер пары
# 0. BTC/USD + (3. ETH/RUB)
# 1. ETH/USD + (2. BTC/RUB)
# 2. BTC/RUB + (1. ETH/USD)
# 3. ETH/RUB + (0. BTC/USD)
# 4. ETH/BTC (особый случай, здесь не реализован)
# 5. USD/RUB (особый случай, здесь не реализован)
def get_depth(pairs_url, pairs_nr):
global avg_AB
url='/v1/order_book/?pair='+pairs_url.upper()+'&limit=200' #ограничение на 200 ордеров
headers = { "Content-type": "application/x-www-form-urlencoded", 'User-Agent' : 'bot17'}
cons.request("GET", url, None, headers)
response = cons.getresponse()
y=json.load(response)
z={}
for p in y:
p2=p.lower()
ask_quantity = y[p]['ask_quantity']
bid_quantity=y[p]['bid_quantity']
z[p2]={'asks':[], 'bids':[]}
for q in y[p]['ask']:
z[p2]['asks'].append([float(q[0]), float(q[1])])
for q in y[p]['bid']:
z[p2]['bids'].append([float(q[0]), float(q[1])])
avg_AB[pairs_nr] = round ((z[pairs_url]['asks'][0][0]+z[pairs_url]['bids'][0][0])*0.5 , 4)
return z
# Запись в глобальные переменные статистических данных за последние 24 часа, полученных с помощью API
def get_statistics(pairs_url, pairs_nr):
global avg
global high
global low
pair = pairs_url.upper()
url ='https://api.exmo.com/v1/ticker/'
headers = { "Content-type": "application/x-www-form-urlencoded", 'User-Agent' : 'bot17'}
cons.request("GET", url, None, headers)
response = cons.getresponse()
a=json.load(response)
#high - максимальная цена сделки за 24 часа
#low - минимальная цена сделки за 24 часа
#avg - средняя цена сделки за 24 часа
#vol - объем всех сделок за 24 часа
#vol_curr - сумма всех сделок за 24 часа
#last_trade - цена последней сделки
#buy_price - текущая максимальная цена покупки
#sell_price - текущая минимальная цена продажи
z={}
z[pair]={}
j=0;
while j<9:
z[pair][j]={}
j +=1
i=0;
for m in a[pair]:
p2=m.lower
z[pair] [i] = float(a[pair][m])
i +=1
avg[pairs_nr] = z[pair][8]
high[pairs_nr] = z[pair][0]
low[pairs_nr] = z[pair][7]
return z
#Статус аккаунта. Определение свободной и зарезервированной валюты
#Актуальная валютная пара записывается в currency_A_Free, currency_B_Free
#TODO две переменных для одного значения... потом подправить
def get_status(pairs_nr):
global nonce_last
global currency_A_Free
global min_currency_A
global currency_B_Free
global min_currency_B
global btcFree
global usdFree
global ethFree
global rubFree
global btcTotal
global usdTotal
global ethTotal
global rubTotal
try:
nonce = int(round(time.time()*1000))
#nonce = int(time.time()*10-14830000000)
#nonce =max(nonce, nonce_last+1)
#nonce_last=nonce
params = {"nonce": nonce}
params = urllib.urlencode(params)
H = hmac.new(BTC_as[0], digestmod=hashlib.sha512)
H.update(params)
sign = H.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key":BTC_ak[0],
"Sign":sign }
cons.request("POST", "/v1/user_info", params, headers)
response = cons.getresponse()
a = json.load(response)
#print a
z={}
z['return']={}
z['return']['funds']={}
z['return']['res']={}
for m in a['balances']:
p2=m.lower()
z['return']['funds'][p2] = float(a['balances'][m])
for m in a['reserved']:
p2=m.lower()
z['return']['res'][p2] = float(a['reserved'][m])
#pair=['btc_usd','eth_usd','eth_rub','btc_rub','eth_btc', 'usd_rub']
#mm=pairs_url.split('_')
#m1=mm[0]
#m2=mm[1]
btcFree = z['return']['funds']['btc']
usdFree = z['return']['funds']['usd']
ethFree = z['return']['funds']['eth']
rubFree = z['return']['funds']['rub']
btcReserved = z['return']['res']['btc']
usdReserved = z['return']['res']['usd']
ethReserved = z['return']['res']['eth']
rubReserved = z['return']['res']['rub']
btcTotal = btcFree+btcReserved
usdTotal = usdFree+usdReserved
ethTotal = ethFree+ethReserved
rubTotal = rubFree+rubReserved
#print 'btcTotal =', round(btcTotal,6)
#print 'usdTotal =', round (usdTotal,2)
#print 'ethTotal =', round(ethTotal,6)
#print 'rubTotal =', round (rubTotal,2)
if (pairs_nr==nBTC_USD or pairs_nr==nBTC_RUB):
currency_A_Free = btcFree
min_currency_A=am_min_BTC
#print 'BTC/',
elif (pairs_nr==nETH_USD or pairs_nr==nETH_RUB):
currency_A_Free = ethFree
min_currency_A=am_min_ETH
#print 'ETC/',
if (pairs_nr==nBTC_USD or pairs_nr==nETH_USD):
currency_B_Free = usdFree
min_currency_B= am_min_USD
#print 'USD'
elif (pairs_nr==nBTC_RUB or pairs_nr==nETH_RUB):
currency_B_Free = rubFree
min_currency_B= am_min_RUB
#print 'RUB'
if (pairs_nr==nETH_BTC):
currency_A_Free = ethFree
min_currency_A=am_min_ETH
currency_B_Free = btcFree
min_currency_B= am_min_BTC
#print 'ETH/BTC'
#print 'currency_B_Free ', currency_B_Free
#print 'min_currency_B', min_currency_B
#print 'currency_A_Free ', currency_A_Free
#print 'min_currency_A', min_currency_A
#print 'btcReserved =', round (btcReserved,4)
#print 'usdReserved =', round (usdReserved,4)
#print 'ethReserved =', round (ethReserved,4)
except:
print 'Fehler get_status',
time.sleep(2)
reset_con()
return 0
return z
#Статус ордеров передаются в массив z={}
def get_my_orders(ind_ak=0):
global nonce_last
try:
nonce = int(round(time.time()*1000))
params = {"nonce": nonce}
params = urllib.urlencode(params)
H = hmac.new(BTC_as[0], digestmod=hashlib.sha512)
H.update(params)
sign = H.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key":BTC_ak[0],
"Sign":sign,
'User-Agent' : 'bot1'}
cons.request("POST", "/v1/user_open_orders", params, headers)
response = cons.getresponse()
a = json.load(response)
#print 'a', a
z={}
z['success']=0
z['error']='all ok'
z['return']={}
for p in a:
for j in range(len(a[p])):
z['success']=1
oid=a[p][j]["order_id"]
p2=a[p][j]["pair"].lower()
z['return'][oid]={"pair":p2, "type":a[p][j]["type"],
"amount":float(a[p][j]["quantity"]), "rate":float(a[p][j]["price"])}
if z['success']==0:
z['error']='no orders'
except:
print 'Fehler get_my_orders'
time.sleep(2)
reset_con()
return 0
return z
#Отмена ордера. Не используется
def cancel_order(ord, ind_ak=0):
global nonce_last
try:
nonce = int(round(time.time()*1000))
params = {"nonce": nonce}
params = urllib.urlencode(params)
params = {"nonce": nonce, "order_id":ord}
params = urllib.urlencode(params)
H = hmac.new(BTC_as[0], digestmod=hashlib.sha512)
H.update(params)
sign = H.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key":BTC_ak[0],
"Sign":sign,
'User-Agent' : 'bot1'}
cons.request("POST", "/v1/order_cancel", params, headers)
response = cons.getresponse()
a = json.load(response)
except:
print 'Fehler cancel_order'
time.sleep(2)
reset_con()
return 0
return a
#Торговля
# ord_type : sell/bey
# ord_rate : Цена
# ord_amount: Количество
# p : Пара ['btc_usd','eth_usd', 'eth_rub', 'btc_rub','eth_btc', 'usd_rub']
def trade(ord_type, ord_rate, ord_amount, p, ind_ak=0):
#print 'ord_type, ord_rate, ord_amount, p, ind_ak=0'
#print ord_type, ord_rate, ord_amount, p, ind_ak
global nonce_last
global count_tref
count = 0
try:
nonce = int(round(time.time()*1000))
params = {"nonce": nonce, "pair":p.upper(), 'quantity':ord_amount, 'price':ord_rate, 'type':ord_type}
params = urllib.urlencode(params)
H = hmac.new(BTC_as[0], digestmod=hashlib.sha512)
H.update(params)
sign = H.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key":BTC_ak[0],
"Sign":sign,
'User-Agent' : 'bot1'}
cons.request("POST", "/v1/order_create", params, headers)
response = cons.getresponse()
a = json.load(response)
count_tref = count_tref+1 #Счетчик продаж
print '| order_id =', a['order_id'], ' count', count_tref
if a['error']!='':
print 'Trade: ', a['error']
aa=a['order_id']
return aa
except:
print 'Fehler! trade!'
time.sleep(2)
reset_con()
return 0
# Массив с данными стакана (200 ордеров) обрабатывется для каждой пары
# В глобальные переменные Верхняя и Нижняя граница стаканов (продажа и покупка)
# записываются актульные значения
# В массив rate записываются значения границ стакана с учетом объемов (для определения заглушек)
def find_rate(depth, pair, typ, am_lim, pairs_nr):
global aBids #
global aAsks #
rate=depth[pair][typ][0][0]
if typ=='asks':
aAsks [pairs_nr] = depth[pair][typ][0][0]
if typ=='bids':
aBids[pairs_nr] = depth[pair][typ][0][0]
am_sum = 0.0
counter = 0
for orders in depth[pair][typ]:
am = orders[1]
rate = orders[0]
am_sum += am
counter+=1
if am_sum>=am_lim:
break
return rate
# Рассчет стартовая цены
# Для каждой пары стартовая цена примерно равна середине стакана,
# т.е. средней цене между последних покупкой и продажей
# учет объемов (немного сдвигает цены)
def getStartPrice(pairs_url, pairs_nr):
#print pairs_url, pairs_nr
global vBids
global vAsks
depth=get_depth(pairs_url,pairs_nr)
#print depth
#запросить стакан c учетом объемов (заглушки)
vAsks[pairs_nr] = round (find_rate(depth, pairs_url, 'asks', am_lim, pairs_nr),4)
vBids[pairs_nr] = round (find_rate(depth, pairs_url, 'bids', am_lim, pairs_nr),4)
startPreis = (vAsks[pairs_nr] +vBids[pairs_nr] )/2
return startPreis
# Определение актуальной зоны покупки/продажи (активной зоны)
def getZone(i):
global xPrice
sPreis=getStartPrice(pairs[i], i)
# Zone 5 #
#xPrice[4]------------------------------high #
# Zone 4 #
#xPrice[3]-----------------------------------#
# Zone 3 #
#xPrice[2]-------------------------------avg #
# Zone 2 #
#xPrice[1]-----------------------------------#
# Zone 1 #
#xPrice[0]-------------------------------low #
# Zone 0 #
xPrice[0]= low[ i]
xPrice[1]= low[ i] +(avg[i]-low[i])*0.5
xPrice[2]= avg[ i]
xPrice[3]= high[i] - (avg[i]-low[i])*0.5
xPrice[4]= high[i]
z =0
if (sPreis > xPrice[0]) and (sPreis <=xPrice[1]):
z = 1
elif (sPreis >xPrice[1] ) and (sPreis <=xPrice[2]):
z = 2
elif (sPreis > xPrice[2]) and (sPreis <=xPrice[3]):
z = 3
elif (sPreis > xPrice[3]) and (sPreis <=xPrice[4]):
z = 4
elif (sPreis > xPrice[4]):
z =5
return z
def getPairName(pairs_nr):
#print
mm=pairs [pairs_nr] .split('_')
#m1=mm[0]
#m2=mm[1]
#print m1.upper(),'/', m2.upper()
return mm
def calPrice (price, diff_SB):
max_line=len(xPrice)# Число линий, границ уровней
#print 'max_line', max_line
max_price =len(xPrice)+2# Число расчитанных по уровням цен
#print 'max_price', max_price
#----------------------------------------------------------------------
price[ 0]= xPrice[0] - diff_SB # Минимальная цена минус са. 5%
#----------------------------------------------------------------------
price[ 1]= xPrice[0] # Минимальная цена
price[ 2]= xPrice[0] + diff_SB # Минимальная цена плюс са. 5%
#----------------------------------------------------------------------
price[ 3]= xPrice[1]
price[ 4]= xPrice[2] - diff_SB # Средняя цена минус са. 5%
#----------------------------------------------------------------------
price[ 5]= xPrice[2] # Средняя цена
price[ 6]= xPrice[2] + diff_SB # Средняя цена цена плюс са. 5%
#----------------------------------------------------------------------
price[ 7]= xPrice[3]
price[ 8]= xPrice[4] - diff_SB #Максимальная цена минус са. 5%
#----------------------------------------------------------------------
price[ 9]= xPrice[4] #Максимальная цена цена
price[10]= xPrice[4] + diff_SB #Максимальная цена плюс са. 5%
#----------------------------------------------------------------------
#for i in range(max_11):
#print 'price[',i,']=', price[i]
return price
# Если остаток свободной валюты нельзя разбить на две минимальных, используется вся валюта
def checkFreeMin (currency_A_Free, min):
if (currency_A_Free > min ) and (currency_A_Free<2*min):
min =currency_A_Free
return min
#Номер в массице цен для текущей зоны при учете актуальной цены
def getPriceZone(zoneCount, akt):
i=0 # Zone 0
if (zoneCount==1):
i=1 # Zone 1-->xPrice [0]
if (akt > xPrice[0]):
i=2 # Zone 1-->xPrice [0]
elif(zoneCount==2):
i=3 # Zone 2-->xPrice [1]
if (akt > xPrice[1]):
i=4 # Zone 2-->xPrice [1]
elif(zoneCount==3):
i=5 # Zone 3-->xPrice [2]
if (akt > xPrice[2]):
i=6 # Zone 3-->xPrice [2]
elif(zoneCount==4):
i=7 # Zone 4-->xPrice [3]
if (akt > xPrice[3]):
i=8 # Zone 4-->xPrice [3]
elif(zoneCount==5):
i=9 # Zone 5-->xPrice [4]
if (akt > xPrice[4]):
i=10 # Zone 5-->xPrice [4]
return i
def printInfoSellBuy (aBid, aAsk, avg, diff_SB, m2, pairs_nr):
print '|--------------------------------------------------------|'
print '| aBid aAsk Avg from_price to_price |'
print '|', round (aBid ,decimal_part), ' ', round (aAsk ,decimal_part), ' ',
print round (avg,decimal_part), ' ', round(from_price[pairs_nr],decimal_part), ' ', round(to_price[pairs_nr],decimal_part)
print '|--------------------------------------------------------|'
print '| Diff_sell_buy :', round (diff_SB, decimal_part), m2, ' ->', round(100*diff_SB/avg,1), '%'#расчет минимальной абсолютной разницы покупки продажи от стартовой цены
print '|--------------------------------------------------------|'
if (level_down>0):
print '| level_down =', level_down, 'ETH [RUB]'
if (level_up>0):
print '| level_up =', level_up, 'ETH [RUB]'
return 0
# Продажа
def setSell_Currency (pairs_nr):
global from_price
global to_price
global min_currency_B
global min_currency_A
price = [ ] # расчетные цена на продажу
for i in range(max_11):
price.append(0)
pair = pairs[pairs_nr] # валютная пара
avg = avg_AB[pairs_nr] # средняя величина стакана #TODO - можно брать среднюю цену за день
diff_SB = avg*min_diff # расчет разницы, абсолютная величина: средняя величина стакана на мин. разницу=5%
min_currency_B= avg_AB[pairs_nr] * min_currency_A # расчет минималького количества фиата для покупки минимального количество крипты по актуальной цене
aBid = aBids[pairs_nr] #
aAsk = aAsks[pairs_nr] #
mm=getPairName(pairs_nr)
m1=(mm[0]).upper()
m2=(mm[1]).upper()
printInfoSellBuy (aBid, aAsk, avg, diff_SB, m2, pairs_nr)
price_min = to_price[pairs_nr] # минимально возможная цена продажи равна последней цене продажи
zoneCount = getZone(pairs_nr)
price = calPrice(price, diff_SB) # Расчет цен для торговли по актуальеым максимальным/минимальных ценам за день
#Выставление ордеров на продажу только на актульной зоне zoneCount и выше
n=getPriceZone(zoneCount, avg)
for i in range(100):
n=n+1
if n>max_11:
# достигнут максимум зоны, выставление ордеров повторить с актуальной зоны
n=getPriceZone(zoneCount, avg)
# крипта закончилась, выход
if (currency_A_Free < min_currency_A):
break
print '| |'
print '| price[',n,']=', price[n], m2
if (price[n] < price_min ): # цена меньше допустимой
price[n] = price_min
print '| price => price_min =', round (price[n],decimal_part), m2
if (price[n] < aAsk): #Продажа дешевле чем актуальная цена
price[n] = aAsk+aAsk/1000 #Цена продажи в стакане + 0.1%
print '| price = aAsk+aAsk/1000 :', round (price[n], decimal_part), m2
#Продажа
min_currency_A=checkFreeMin(currency_A_Free, min_currency_A)
trade('sell', round (price[n],4), min_currency_A, pair)
print '|'
print '| Sell by rate', round (price[n],decimal_part) , m2,'/', m1, ' Quantity: ', round (min_currency_A, decimal_part), m1
print '|________________________________________________________|'
get_status(pairs_nr) #Обновление данный о свободных валютах
return
# Покупка
def setBuy_Currency (pairs_nr): #Валюту купить
global from_price
global to_price
price = [ ] # расчетные цена на продажу
for i in range(max_11):
price.append(0)
pair = pairs[pairs_nr] # расчетные цена на покупку
avg = avg_AB[pairs_nr]
diff_SB = avg*min_diff
min_currency_B= avg_AB[pairs_nr] * min_currency_A
aBid = aBids[pairs_nr]
aAsk = aAsks[pairs_nr]
mm=getPairName(pairs_nr)
m1=(mm[0]).upper()
m2=(mm[1]).upper()
printInfoSellBuy (aBid, aAsk, avg, diff_SB, m2, pairs_nr)
price_max = from_price[pairs_nr]
zoneCount = getZone(pairs_nr)
price = calPrice(price, diff_SB)
print '| |'
n=getPriceZone(zoneCount, avg)
for i in range(100):
n=n-1
if n<0:
n=getPriceZone(zoneCount, avg)
break
if (currency_B_Free < min_currency_B):
break
print '| price[',n,']=', price[n], m2
if (price[n] > price_max ):
price[n] = price_max
print '| price => price_max =', round (price[n],decimal_part), m2
if (price[n] > aBid):
price[n] = aBid-aBid/1000
print '| price = aBid-aBid/1000 :', round (price[n],decimal_part), m2
min_currency_B=checkFreeMin(currency_B_Free, min_currency_B)
trade('buy', round (price[n],4), 0.999*min_currency_B/price[n], pair)
print '|'
print '| Buy by rate', round (price[n],decimal_part) , m2,'/', m1, ' Quantity: ', round(0.999*min_currency_B/price[n], decimal_part), m1
print '|________________________________________________________|'
get_status(pairs_nr)
print
return
def getStringPair(nr):
str =''
if (nr ==0):
str= 'BTC/USD'
elif (nr ==1):
str= 'ETH/USD'
elif (nr ==2):
str= 'BTC/RUB'
elif(nr ==3):
str= 'ETH/RUB'
elif(nr ==4):
str= 'ETH/BTC'
elif(nr ==5):
str= 'USD/RUB'
return str
# Cмена пар в зависимости от цены BTC
def checkMinMaxBTC(pairs_nr):
#Проверка и смена пар только без установки уровней (коридора)
if (level_up>0) and (level_down>0):
return pairs_nr
if (pairs_nr == nETH_BTC and btcPrice > btcPriceMin):
print '>> change' , getStringPair(pairs_nr), 'to', getStringPair(globalNr), 'cause btcPrice >', btcPriceMin, 'USD'
pairs_nr =globalNr
if (pairs_nr == nUSD_RUB and btcPrice < btcPriceMax):
print '>> change' , getStringPair(pairs_nr), 'to', getStringPair(nBTC_USD), 'cause btcPrice <', btcPriceMax, 'USD'
pairs_nr =globalNr
if (pairs_nr < nETH_BTC and btcPrice > btcPriceMax):
print '>> change' , getStringPair(pairs_nr), 'to', getStringPair(nUSD_RUB), 'cause btcPrice >', btcPriceMax, 'USD'
pairs_nr =nUSD_RUB
if (pairs_nr < nETH_BTC and btcPrice < btcPriceMin):
print '>> change' , getStringPair(pairs_nr), 'to', getStringPair(nETH_BTC), 'cause btcPrice <', btcPriceMin, 'USD'
pairs_nr =nETH_BTC
return pairs_nr
def calStartValues (pairs_nr):
global to_price
global from_price
global zone
global diff_sell_buy
global startPreis
i=pairs_nr
startPreis[i] = getStartPrice(pairs[i], i)
diff_sell_buy[i] = min_diff*startPreis[i]
zone [i]= getZone(i)
from_price[i] = startPreis[i] - diff_sell_buy[i]
to_price[i] = startPreis[i] + diff_sell_buy[i]
return 0
# Инициализация, проверки и выставление ордеров на покупку и продажу
def run (pairs_nr):
global to_price
global from_price
global zone
global btcPrice
global decimal_part
global diff_sell_buy
global startPreis
print
print '>> run', getStringPair(pairs_nr)
# Расчет начальной цены и максимально/минимальных цен для всех пара
i = 0
get_statistics(pairs[i],i)
btcPrice = getStartPrice(pairs[i], i)
# print current date and time
print '>>', (time.strftime("%d.%m.%Y %H:%M:%S"))
print '>> 1 BTC =', btcPrice, 'USD'
print '>> run: ', getStringPair(pairs_nr)
#Смена пар только при отсутствии свободной валюты
if (checkFreeCurrency() < 1):
pairs_nr = checkMinMaxBTC(pairs_nr)
print '>> run: ', getStringPair(pairs_nr)
mm=getPairName(pairs_nr)
m1=mm[0]
m2=mm[1]
m1=m1.upper()
m2=m2.upper()
if (pairs_nr == nETH_BTC):
decimal_part =4
i = pairs_nr
get_statistics(pairs[i],i)
calStartValues(i)
print '|---------------------------------------------|'
print '| fromPrice : 1',m1, ' =', round (from_price[i], decimal_part), m2, ' '
print '| toPrice : 1',m1, ' =', round (to_price[i], decimal_part), m2, ' '
print '| startPreis: 1',m1, ' =', round (startPreis[i], decimal_part), '+/-', round (diff_sell_buy[i], decimal_part) , m2
if (level_up > 0) :
print '| level_up :1',m1, ' =', round(level_up, decimal_part), m2
if (level_down > 0):
print '| level_down:1',m1, ' =', round(level_down, decimal_part), m2
print '|---------------------------------------------|'
get_status(pairs_nr)
#Зона и рассчет цен
analysis_Pair(pairs_nr)
printMinFreeCurrency(pairs_nr)
#Покупка, если достаточно Фиата
if (currency_B_Free >= min_currency_B):
#Aктуальная цена ETH должна находиться в корридоре
if (check_corridor(nETH_RUB)==1):
print
print '|--------------------------------------------------------|'
print '| Start Buy', m1, '[', m2, '] |'
setBuy_Currency(pairs_nr)
#Продажа, если достаточно Крипты
if (currency_A_Free >= min_currency_A):
#Aктуальная цена ETH должна находиться в корридоре
if (check_corridor(nETH_RUB)==1):
print
print '|--------------------------------------------------------|'
print '| Start Sell', m1, '[', m2, '] |'
setSell_Currency(pairs_nr)
return pairs_nr
#Печать всех зон и зоны актуальных продаж для выбранной пары
def analysis_Pair(pairs_nr):
global high
global avg
global low
z=zone[pairs_nr]
print
print getStringPair(pairs_nr), ': Zone ', z
if (saveZoneMax[pairs_nr] > high[pairs_nr]):
print 'Max DOWN: old', round(saveZoneMax[pairs_nr],decimal_part+2), '>', round(high[pairs_nr],decimal_part+2)
else:
print 'Max UP: old', round(saveZoneMax[pairs_nr],decimal_part+2), '<=', round(high[pairs_nr],decimal_part+2)
print ' _________________________'
if (z==5):
print ' |xxxxxxxxxx Zone 5 xxxxxxxxx|'
else:
print ' | Zone 5 |'
print 'high|-----------------------|-', round (high[pairs_nr],decimal_part)
if (z==4):
print ' |xxxxxxx Zone 4 xxxxxxxx|'
else:
print ' | Zone 4 |'
print ' |-----------------------|-', round(high[pairs_nr]-(avg[pairs_nr]-low[pairs_nr])*0.5, decimal_part)
if (z==3):
print ' |xxxxxx Zone 3 xxxxxxxxx|'
else:
print ' | Zone 3 |'
print 'avg-|-----------------------|-', round (avg[pairs_nr],decimal_part)
if (z==2):
print ' |xxxxxx Zone 2 xxxxxxxxx|'
else:
print ' | Zone 2 |'
print ' |-----------------------|-', round(low[pairs_nr]+(avg[pairs_nr]-low[pairs_nr])*0.5, decimal_part)
if (z==1):
print ' |xxxxxx Zone 1 xxxxxxxxx|'
else:
print ' | Zone 1 |'
print 'low-|-----------------------|-', round(low[pairs_nr],decimal_part)
if (z==0):
print ' |xxxxxxx Zone 0 xxxxxxxx|'
else:
print ' | Zone 0 |'
print ' |_______________________|'
print
if (saveZoneMin[pairs_nr] > low[pairs_nr]):
print 'Min DOWN: old', round(saveZoneMin[pairs_nr],decimal_part+2), '>', round(low[pairs_nr],decimal_part+2)
else:
print 'Min UP: old', round(saveZoneMin[pairs_nr],decimal_part+2), '>', round(low[pairs_nr],decimal_part+2)
print
return
def printAllFreeCurrency():
print
print 'btcFree =', round(btcFree,4),'BTC',
print 'usdFree =', round (usdFree,2),'USD',
print 'ethFree =', round(ethFree,4),'ETH',
print 'rubFree =', round (rubFree,2),'RUB'
return 0
def checkFreeCurrency():
if (btcFree < am_min_BTC) and (ethFree<am_min_ETH) and (usdFree < am_min_USD) and (rubFree < am_min_RUB):
result =0
else:
printAllFreeCurrency()
result =1
return result
#Сохранения последнего максимума и минимума актуальной пары, пока только для инфо
def save_min_max_Price(pairs_nr):
global saveZoneMax
global saveZoneMin
saveZoneMin[pairs_nr] = xPrice[0]# low
saveZoneMax[pairs_nr] = xPrice[4]# high
return 0
#Изменение пары в случаях достижения минимума или максимума значения БТС
#Торговля в фиате в случае высоких цен БТС (вся крипта продана, начало продажи фиата с фиатом )
#Торговля в крипте в случае нихких цен БТС ( крипта закуплена, начало продажи крипты с криптой )
def checkPairsNr(pairs_nr):
i=nBTC_USD
get_statistics(pairs[i],i)
btcPrice = getStartPrice(pairs[i], i)
nPairs_nr = checkMinMaxBTC(pairs_nr)
return nPairs_nr
#Проверка, находится ли актуальная цена БТС в корридоре, если режим проверки корридора выбран ((level_up>0) или (level_down>0)
def check_corridor(i):
result =1# обычный режим продажи (без кооридора)
get_statistics(pairs[i],i)
ethPrice = getStartPrice(pairs[i], i)
if (level_up>0) and (level_down>0):
if (ethPrice>level_up) or (ethPrice<level_down):
print 'out of corridor ', time.sleep(10)
result =0
return result
def ptintTotalCurrency():
i=0
for p in pairs:
get_statistics(p,i)
#print 'i', i,p
i=i+1
print 'Currancy Total:', round (btcTotal, 4), 'BTC ', round (usdTotal, 2), 'USD ', round (ethTotal, 4), 'ETH ', round(rubTotal,2), 'RUB'
return 0
def printMinFreeCurrency(pairs_nr):
mm=getPairName(pairs_nr)
m1=mm[0]
m2=mm[1]
m1=m1.upper()
m2=m2.upper()
min_currency_B= avg_AB[pairs_nr] * min_currency_A
print 'currency A: min=', round(min_currency_A, decimal_part), m1, 'free=', round(currency_A_Free, decimal_part), m1
print 'currency B: min=', round(min_currency_B, decimal_part), m2, 'free=', round(currency_B_Free, decimal_part), m2
return 0
#Увеличение счетчика + печать инфо и сохранение ммаксимумов и минимумов актуальной цены пары
def inc_checkCount(countPrint, pairs_nr):
countPrint = countPrint +1
if (countPrint==5):
save_min_max_Price(pairs_nr)
if (countPrint==50):
ptintTotalCurrency()
if (countPrint==1000):
countPrint=0
run(pairs_nr)
return countPrint
#Запрос всех данных
def read_data_API(pairs_nr):
#print 'get_depth: OK'
#запросить стакан
depth=get_depth(pairs[pairs_nr],pairs_nr)
#print 'get_statistics: OK'
#Запросить статистику
get_statistics(pairs[pairs_nr], pairs_nr)
#print 'get_status: OK'
#запросить остатки валют
balance=get_status(pairs_nr)
#print 'get_my_orders OK'
return 0
######################################################################################
# #
# #
# Старт программы #
# #
# #
######################################################################################
def bot():
global pairs
global startUp
# Одно соединение при старте
if (startUp==1):
startUp =2
reset_con()
countPrint =0
print
print 'Bot is ready...'
print
pairs_nr = globalNr
pairs_nr=run(pairs_nr)
print
#бесконечный цикл:
while True:
try:
#задержка чтоб не превысить лимит обращений по АПИ
time.sleep(1.0)
read_data_API (pairs_nr)
#запросить мои ордера
my_orders=get_my_orders()
mm=getPairName(pairs_nr)
m1=mm[0]
m2=mm[1]
m1=m1.upper()
m2=m2.upper()
if (checkFreeCurrency() < 1):
#Смена валюты в зависимости от цена BTC; в режиме "коридора" не происходит, к примеру btcPriceMin =9600 btcPriceMax =11999
#Проверка курса BTC и смена пары при достижении максимума или минимума
pairs_nr=checkPairsNr(pairs_nr)
time.sleep(5.0)
#Увеличение счетчика, обновление информация для вывода на экран, сохранения максимума/минимума актульной пары
countPrint=inc_checkCount(countPrint, pairs_nr)
print '.',
else: # свободная валюта обнаружена
print
#Пересчет минимально возможной цены ордера в валюте А
min_currency_B= avg_AB[pairs_nr] * min_currency_A
#проверка актуальной валюты
#print 'min_currency_A:', round(min_currency_A,decimal_part), m1
#print 'min_currency_B:', round(min_currency_B,decimal_part), m2
if (currency_B_Free >= min_currency_B) or (currency_A_Free >= min_currency_A):
print
run(pairs_nr)
#Варинт работы без задания уровней (нормальный режим)
elif (level_up < 0) and (level_down < 0):
print 'change pairs_nr:' , getStringPair(pairs_nr)
#Смена пары Крипта/Крипта из за наличия фиата
#Выбирается "глобальная" пара globalNr
if (pairs_nr == nETH_BTC) and (globalNr!=pairs_nr):
if (usdFree>am_min_USD):
print 'change' , getStringPair(pairs_nr), 'to', getStringPair(globalNr), 'cause usdFree >', am_min_USD , 'USD'
pairs_nr = nBTC_USD
run(pairs_nr)
if (rubFree > am_min_RUB):
print 'change' , getStringPair(pairs_nr), 'to', getStringPair(globalNr), 'cause rubFree >', am_min_RUB , 'RUB'
pairs_nr = nETH_RUB
run(pairs_nr)
#смена пары Крипта/Фиат на другую пару Крипта/Фиат из за наличия соотв. валюты
if (globalNr ==nBTC_USD):
pairs_nr =nETH_RUB
print 'change' , getStringPair(globalNr), 'to', getStringPair(pairs_nr),
if (ethFree > am_min_ETH):
print 'cause ethFree >', am_min_ETH, 'ETH'
elif (rubFree > am_min_RUB):
print 'cause rubFree >', am_min_RUB , 'RUB'
run(pairs_nr)
#смена пары Крипта/Фиат на другую пару Крипта/Фиат из за наличия соотв. валюты
if (globalNr ==nETH_RUB):
pairs_nr =nETH_BTC
print 'change' , getStringPair(globalNr), 'to', getStringPair(pairs_nr),
if(btcFree > am_min_BTC):
print 'cause btcFree >', am_min_BTC, 'BTC'
elif (usdFree > am_min_USD):
print 'cause usdFree >', am_min_USD , 'USD'
run(pairs_nr)
#смена пары Крипта/Фиат на другую пару Крипта/Фиат из за наличия соотв. валюты
if (globalNr == nBTC_RUB):
pairs_nr = nETH_USD
print 'change' , getStringPair(globalNr), 'to', getStringPair(pairs_nr),
if (ethFree > am_min_ETH):
print 'cause ethFree >', am_min_ETH , 'ETH or usdFree >',am_min_USD , 'USD'
elif (usdFree > am_min_USD):
print 'cause usdFree >', am_min_USD , 'USD'
run(pairs_nr)
#смена пары Крипта/Фиат на другую пару Крипта/Фиат из за наличия соотв. валюты
if (globalNr == nETH_USD):
pairs_nr =nBTC_RUB
print 'change' , getStringPair(globalNr), 'to', getStringPair(pairs_nr),
if (btcFree > am_min_BTC):
print 'cause btcFree >', am_min_BTC , 'BTC'
elif(rubFree > am_min_RUB):
print 'cause rubFree >', am_min_RUB , 'RUB'
run(pairs_nr)
continue
except:
print 'bot() Fehler ',
time.sleep(2)
reset_con()
|
from pico2d import *
open_canvas()
grass = load_image('grass.png')
character = load_image('character.png')
x = 0
while (x < 800):
clear_canvas()
grass.draw(400, 30)
character.draw(x, 90)
update_canvas()
x = x + 2
delay(0.01)
# fill here
close_canvas()
|
from . import BaseArmor
class Cloak(BaseArmor):
def __init__(self, owner):
pass
|
def number(num):
k = []
while(num>0):
k.append(num%10)
num = num/10
k = k[::-1]
sq = 0
for i in k:
sq+=i*i
print sq
return sq
su = 3
while(su>2):
su = number(su) |
# -*- coding: utf-8 -*-
from heapq import heappop, heappush
class MaxHeap:
def __init__(self):
self.count = 0
self.els = []
def __len__(self):
return self.count
def _max(self):
_, el = self.els[0]
return el
def pop(self):
self.count -= 1
_, el = heappop(self.els)
return el
def push(self, el):
self.count += 1
heappush(self.els, (-el, el))
class Solution:
def lastStoneWeight(self, stones):
heap = MaxHeap()
for stone in stones:
heap.push(stone)
while len(heap) > 1:
x = heap.pop()
y = heap.pop()
if x > y:
heap.push(x - y)
return heap.pop() if len(heap) == 1 else 0
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.lastStoneWeight([2, 7, 4, 1, 8, 1])
assert 0 == solution.lastStoneWeight([2, 2])
|
class FenwickTree():
# This list contains the Fenwick tree ranges
tree = []
# Make sure the 'values' array is one based meaning
# values[0] does not get used, O(n) construction
def __init__(self, values):
if values == None: raise ValueError("Values cannot be None")
# Make a clone of the values array since we manipulate
# the array in place destroying all its original content
self.tree = list(values)
self.n = len(self.tree)
for i in range(1, self.n):
j = i + self.lsb(i)
if j < self.n: self.tree[j] += self.tree[i]
# Returns the value of the least significant bit (LSB)
# lsb(108) = lsb(0b1101100) = 0b100 = 4
# lsb(104) = lsb(0b1101000) = 0b1000 = 8
# lsb(96) = lsb(0b1100000) = 0b100000 = 32
# lsb(64) = lsb(0b1000000) = 0b1000000 = 64
def lsb(self, i):
return i & -i
# Computes the prefix sum from [1, i], O(log(n))
def prefix_sum(self, i):
sum_ = 0
while i != 0:
sum_ += self.tree[i]
i &= ~self.lsb(i) # Equivalently, i -= self.lsb(i);
return sum_
# Returns the sum of the interval [i, j], O(log(n))
def sum(self, i, j):
if j < i: raise IndexError("Make sure j >= i")
return self.prefix_sum(j) - self.prefix_sum(i-1)
# Add 'v' to index 'i', O(log(n))
def add(self, i, v):
while i < self.n:
self.tree[i] += v
i += self.lsb(i)
# Set index i to be equal to v, O(log(n))
def set(self, i, v):
self.add(i, v - self.sum(i, i))
def __str__(self):
return str(self.tree)
|
'''数据处理,将低分辨率的图片转换成高分辨率的图片,
再将高分辨率的图片分成四份,并将box做对应处理'''
import numpy as np
# from PIL import Image
import os
import cv2
import matplotlib.pyplot as plt
def gen_new_data(lines, input_shape):
for t in lines:
# annotation_line = lines[t]
# line = annotation_line.split()
image = cv2.imread(t)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
ih, iw, _ = image.shape
w, h = input_shape
resize_ratio = min(w / iw, h / ih)
resize_w = int(iw * resize_ratio)
resize_h = int(ih * resize_ratio)
image_resize = cv2.resize(image, (resize_w, resize_h))
image_paded = np.full((h, w, 3), 128)
dw = int((w - resize_w) / 2)
dh = int((h - resize_h) / 2)
image_paded[dh: resize_h + dh, dw: resize_w + dw, :] = image_resize
new_image0 = image_paded[:int(h/2+52), :int(w/2+52),:]
new_image1 = image_paded[:int(h/2+52), int(w/2-52):,:]
new_image2 = image_paded[int(h/2-52):, :int(w/2+52):,:]
new_image3 = image_paded[int(h/2-52):, int(w/2-52):,:]
new_image=(new_image0,new_image1,new_image2, new_image3)
path = "E:\\kaggle\\nfl-impact-detection\\test_clip_images_992"
if not os.path.exists(path):
os.makedirs(path)
for j in range(4):
box_clip_filepath = os.path.join(path, t.split("\\")[-1].split('.')[0]+ "_%d"%j + ".jpg")
cv2.imwrite(box_clip_filepath, new_image[j])
if __name__ == '__main__':
test_data_dir = r'E:\kaggle\nfl-impact-detection\test_picture'
pictures = [os.path.join(test_data_dir, i) for i in os.listdir(test_data_dir) if i.endswith("jpg")]
# pictures = filter(lambda x: x.endswith('jpg'), pictures)
gen_new_data(pictures, (1880,1880))
|
import socket as socket_lib
import time
# класс для кастомных исключений
class ClientError(BaseException):
pass
# ответ сервера, просто структура для хранения
class ServerResponse:
def __init__(self, status, data):
self.status = status
self.data = data
@property
def has_error(self):
return self.status == 'error'
def __str__(self):
return f'status: {self.status}\ndata: {self.data}'
# парсер ответов сервера
class ServerResponseParser:
# индекс заголовка, всё, что после - считаем телом сообщения
IDX_HEADER = 0
# минимальное число строк в ответе, чтобы считаться валидным
MIN_LINES = 1
@staticmethod
def from_binary(data):
# преобразуем бинарщину в текст и разобьём на строки
text = data.decode('utf-8')
lines = text.strip().split('\n')
# если пришло меньше, чем мы решили считать нормальным - ругаемся
if len(lines) < ServerResponseParser.MIN_LINES:
print(len(lines))
raise ClientError('wrong answer format')
# формируем структуру ответа, здесь возможен более сложный парсинг
response = ServerResponse(lines[ServerResponseParser.IDX_HEADER], lines[ServerResponseParser.IDX_HEADER + 1:])
return response
class Client:
def __init__(self, host, port, timeout=None, buff_size=4096):
# зададим размер буффера как аттрибут, чтобы не хардкодить
self.buff_size = buff_size
# инстанс сокета
self.connection = socket_lib.socket()
# таймаут
if timeout is not None:
self.connection.settimeout(timeout)
# сразу же пытаемся подключиться
try:
self.connection.connect((host, port))
except BaseException:
raise ClientError('connection failed')
pass
# обёртка, чтобы не повторяться
def _send(self, message):
try:
self.connection.sendall(message.encode())
except BaseException:
raise ClientError('something went wrong')
# обёртка, чтобы не повторяться
def _recv(self):
data = self.connection.recv(self.buff_size)
if not data:
raise ClientError('server doesn\'t answer')
# прочитали бинарщину - хотим получить ответ в структуре
response = ServerResponseParser.from_binary(data)
return response
def command(self, message):
# отправляем сообщение
print(f' >> {message.strip()}')
self._send(message)
# читаем ответ
response = self._recv()
print(f' << {response.status}')
# если в ответе статус ошибки - выбросим исключение,
# по-хорошему это надо делать иначе - статус оговорит об ошибке, но это не исключение,
# а часть нормального общения по протоколу, но для учебного примера - пойдёт
if response.has_error:
raise ClientError(f'some "get" errors: {"".join(response.data)}')
return response
pass
def put(self, metric, value, timestamp=None):
# формируем сообщение
if timestamp is None:
timestamp = int(time.time())
response = self.command(f'put {metric} {value} {timestamp}\n')
return response
pass
def get(self, metric='*'):
# получаем ответ как обычно
response = self.command(f'get {metric}\n')
# а дальше парсим его в нужный нам формат
idx_key, idx_value, idx_time = 0, 1, 2
result = {}
for line in response.data:
values = line.split(sep=' ')
if values[idx_key] not in result:
result[values[idx_key]] = []
try:
result[values[idx_key]].append((int(values[idx_time]), float(values[idx_value])))
except BaseException:
raise ClientError("stored metrics format error")
for key in result.keys():
result[key] = sorted(result[key], key=lambda x: x[0])
return result
def do_stuff(name):
# создаём клиент
client = Client('127.0.0.1', 8888, timeout=15)
client.put("palm.cpu", 0.5, timestamp=1150864247)
client.put("palm.cpu", 2.0, timestamp=1150864248)
client.put("palm.cpu", 0.5, timestamp=1150864248)
client.put("eardrum.cpu", 3, timestamp=1150864250)
client.put("eardrum.cpu", 4, timestamp=1150864251)
client.put("eardrum.memory", 4200000)
# хранение на сервере не реализовано - по условиям задачи реализуем только клиент
stored = client.get("something")
print('stored metrics:')
print(stored)
# потестируем получение ошибки
client.command("zzzzzzz")
if __name__ == '__main__':
do_stuff('PyCharm')
|
from _typeshed import Incomplete
def weisfeiler_lehman_graph_hash(
G,
edge_attr: Incomplete | None = None,
node_attr: Incomplete | None = None,
iterations: int = 3,
digest_size: int = 16,
): ...
def weisfeiler_lehman_subgraph_hashes(
G,
edge_attr: Incomplete | None = None,
node_attr: Incomplete | None = None,
iterations: int = 3,
digest_size: int = 16,
): ...
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, exceptions
from datetime import datetime, timedelta
class HrDepartment(models.Model):
_inherit = 'hr.department'
remind_before = fields.Integer()
birthday_reminder_list = fields.Many2many('hr.employee')
class HrEmployee(models.Model):
_inherit = 'hr.employee'
include_in_birthday_reminder_list = fields.Boolean()
next_birthday = fields.Date(compute='_next_birthday', search='_search_next_birthday')
birthday_remind_date = fields.Date(compute='_birthday_remind_date', search='_search_remind_date')
def _search_next_birthday(self, operator, value):
employees = self.search([])
if employees:
list = []
for employee in employees:
if value == str(employee._next_birthday()).split(' ')[0]:
list.append(employee.id)
return [('id', 'in', list)]
def _search_remind_date(self, operator, value):
employees = self.search([])
if employees:
list = []
for employee in employees:
if value == str(employee._birthday_remind_date()).split(' ')[0]:
list.append(employee.id)
return [('id', 'in', list)]
@api.model
def create(self, values):
rec = super(HrEmployee, self).create(values)
try:
if rec.include_in_birthday_reminder_list:
if rec.department_id and rec.birthday:
reminders_list = []
for i in rec.department_id.birthday_reminder_list:
reminders_list.append(i.id)
reminders_list.append(rec.id)
self.department_id.write({'birthday_reminder_list': [[6, 0, reminders_list]]})
else:
raise exceptions.ValidationError('Department and birthday are required')
except Exception as e:
raise exceptions.UserError(e)
else:
return rec
def write(self, values):
rec = super(HrEmployee, self).write(values)
if 'include_in_birthday_reminder_list' in values:
if values['include_in_birthday_reminder_list']:
if self.department_id and self.birthday:
reminders_list = []
for i in self.department_id.birthday_reminder_list:
reminders_list.append(i.id)
reminders_list.append(self.id)
self.department_id.write({'birthday_reminder_list': [[6, 0, reminders_list]]})
else:
if self.department_id:
reminders_list = []
for i in self.department_id.birthday_reminder_list:
if i.id != self.id:
reminders_list.append(i.id)
self.department_id.write({'birthday_reminder_list': [[6, 0, reminders_list]]})
else:
return super(HrEmployee, self).write(values)
@api.depends('birthday')
def _next_birthday(self):
today = datetime.date(datetime.now())
for rec in self:
if rec.birthday:
if today.month == rec.birthday.month:
if today.day < rec.birthday.day:
year = str(today.year)
month = str(rec.birthday.month)
while len(month) < 2: month = '0' + month
day = str(rec.birthday.day)
while len(day) < 2: day = '0' + day
date_str = year + '-' + month + '-' + day
date = datetime.strptime(date_str, '%Y-%m-%d')
rec.next_birthday = date
return date
else:
year = str(today.year + 1)
month = str(rec.birthday.month)
while len(month) < 2: month = '0' + month
day = str(rec.birthday.day)
while len(day) < 2: day = '0' + day
date_str = year + '-' + month + '-' + day
date = datetime.strptime(date_str, '%Y-%m-%d')
rec.next_birthday = date
return date
if today.month > rec.birthday.month:
year = str(today.year + 1)
month = str(rec.birthday.month)
while len(month) < 2: month = '0' + month
day = str(rec.birthday.day)
while len(day) < 2: day = '0' + day
date_str = year + '-' + month + '-' + day
date = datetime.strptime(date_str, '%Y-%m-%d')
rec.next_birthday = date
return date
if today.month < rec.birthday.month:
year = str(today.year)
month = str(rec.birthday.month)
while len(month) < 2: month = '0' + month
day = str(rec.birthday.day)
while len(day) < 2: day = '0' + day
date_str = year + '-' + month + '-' + day
date = datetime.strptime(date_str, '%Y-%m-%d')
rec.next_birthday = date
return date
else:
rec.next_birthday = False
rec.include_in_birthday_reminder_list = False
return False
@api.depends('birthday','next_birthday' )
def _birthday_remind_date(self):
if self.birthday:
remind_before = self.department_id.remind_before
date = self.next_birthday - timedelta(days=remind_before)
self.birthday_remind_date = date
return date
else:
self.birthday_remind_date = False
return False
def send_birthday_reminder(self):
departments = self.env['hr.department'].search([])
for department in departments:
employees = department.birthday_reminder_list
for employee in employees:
template = self.env.ref('barameg_birthday_reminder.birthday_reminder')
self.env['mail.template'].browse(template.id).send_mail(employee.id)
|
#! /usr/bin/env python3
# Berechnung der Oberfläche eines Quaders
länge = 6
breite = 2
höhe = 3
oberfläche = 2 * (
breite * höhe # Vorder- und Rückseite
+ länge * breite # Ober- und Unterseite
+ länge * höhe # rechte und linke Seite
)
print(oberfläche)
|
palabra=str(input())
print(palabra[0:-1])
|
import pytest
from numbers_to_dec import list_to_decimal
# [0, 4, 2, 8] => 428
# [1, 2] => 12
# [3] => 3
# [6, 2, True] => raises TypeError
# [-3, 12] => raises ValueError
# [3.6, 4, 1] => raises TypeError
# ['4', 5, 3, 1] => raises TypeError
def test_success():
assert list_to_decimal([0, 4, 2, 8]) == 428
assert list_to_decimal([1, 2]) == 12
assert list_to_decimal([3]) == 3
assert list_to_decimal([1,2,3,4,5,6,7,8,9,0]) == 1234567890
def test_negative():
with pytest.raises(ValueError):
list_to_decimal([-3, 12])
def test_empty():
with pytest.raises(ValueError):
list_to_decimal([])
def test_bool():
with pytest.raises(TypeError):
list_to_decimal([False, True, 0, -1])
def test_str():
with pytest.raises(TypeError):
list_to_decimal(['4', 5, 3, 1])
def test_range():
with pytest.raises(ValueError):
list_to_decimal([11,99])
def test_endpoint():
"""
This test was added so that a mutation would fail the code.
Pybites uses mut.py to test the tests.
Cehck this link for more details:
https://pybit.es/guest-mutpy-exploration.html
"""
with pytest.raises(ValueError):
list_to_decimal([10,0])
@pytest.mark.xfail
def test_mutaion():
"""
This did not help with testing mutation.
"""
if(list_to_decimal([10]) == 10):
pytest.xfail("Extra test case to pass mutation test")
|
from math import ceil
def generateTriangle(triangle_heigth):
#declare list for main triangle
triangle = [[0, 1, 0]]
row_count = 0
while(row_count < triangle_heigth):
previous_row_len = len(triangle[row_count])
newList = [0] * (previous_row_len + 2)
triangle.append(newList)
row_count += 1
fillTriangleRowValues(triangle)
displayTriangle(triangle)
def displayTriangle(listValue):
space = len(listValue) * 5
for index , row in enumerate(listValue):
print()
print((" " * space), end="")
for i in row:
if i == 0:
print("", end=" ")
if i != 0:
print("{:^5d}".format(i), end="")
space -= 3
def fillTriangleRowValues(triangle):
for count, row in enumerate(triangle):
if count == 0:
print("true")
continue;
length = len(row)
is_even = True if length % 2 == 0 else False
starting_index = int((length-2) / 2) + (count+1)%2
for index in range(starting_index, length, 2):
a = 0 if index-2 < 0 else triangle[count - 1][index-2]
b = 0 if index >= len(triangle[count-1]) else triangle[count-1][index]
newValue = a + b
triangle[count][index] = newValue
triangle[count][:ceil(len(triangle[count])/2)-1] = triangle[count][ceil(len(triangle[count])/2):][::-1]
generateTriangle(15)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import os
import unittest
from scipy import linalg
from .. import auxfnsc, auxfn
from ..model import nambu
from . import test_tools
currentdir = os.path.join(os.getcwd(), "mea/tests")
@unittest.skip("Skipping TestGFAuxSC because I need to recode it as self-anormal in d-wave is real!!.")
class TestGFAuxSC(unittest.TestCase):
""" A class that implements tests for the Auxiliary green function class. If input is given
as a sE file of a cluster, then it builds an auxiliary GF (it can do so
for real or complex frequencies. However, imaginary frequencies is implicit).
However, if an auxiliary green function file is given,
it can extract the self energy (it is implicit then that the frequencies
are on the real axis"""
@classmethod
def setUpClass(TestGFAuxSC):
print("\nIn test_auxfnsc: \n")
def __init__(self, methodName="runTest"):
""" """
super(TestGFAuxSC, self).__init__(methodName)
def test_init(self):
""" """
fin_sE_to = os.path.join(currentdir, "files/self_moy_sc_b60tp04n0495U6500.dat")
gf_aux = auxfnsc.GFAuxSC(fin_sE_to=fin_sE_to)
self.assertEqual(gf_aux.zn_col, 0)
self.assertEqual(gf_aux.fin_sE_to, fin_sE_to)
self.assertEqual(gf_aux.fout_sE_ctow, "self_nambu_ctow.dat")
self.assertEqual(gf_aux.fout_gf_aux_to, "gf_aux_sb.dat")
def test_build_gfvec_aux(self):
""" """
fin_sE_to = os.path.join(currentdir, "files/self_moy_sc_b60tp04n0495U6500.dat")
(zn_vec, sE_c) = nambu.read_nambu_c(fin_sE_to)
gf_aux = auxfnsc.GFAuxSC(fin_sE_to=fin_sE_to)
gf_aux.build_gfvec_aux()
sEvec_ir = nambu.c_to_ir(sE_c)
sEvec_sb = np.zeros((sEvec_ir.shape[0], 2, 2) , dtype=complex)
sEvec_sb[:, 0, 0] = sEvec_ir[:, 2, 2].copy() ; sEvec_sb[:, 0, 1] = sEvec_ir[:, -1, 2].copy()
sEvec_sb[:, 1, 0] = np.conjugate(np.transpose(sEvec_sb[:, 0, 1].copy())) ; sEvec_sb[:, 1, 1] = -np.conjugate(sEvec_ir[:, 3, 3])
gfvec_aux_sb_test = np.zeros(sEvec_sb.shape, dtype=complex)
for (i, sE) in enumerate(sEvec_sb):
gfvec_aux_sb_test[i] = linalg.inv(1.0j*zn_vec[i]*np.eye(2, dtype=complex) - sE)
try:
np.testing.assert_allclose(sEvec_sb, gf_aux.sEvec_sb)
np.testing.assert_allclose(gfvec_aux_sb_test, gf_aux.gfvec_aux_sb)
except AssertionError:
self.fail("Problem at test_build_gfvec_aux")
def test_run_acon(self):
""" """
#pass
fin_sE_to = os.path.join(currentdir, "files/self_moy_sc_b60tp04n0495U6500.dat")
gf_aux = auxfnsc.GFAuxSC(fin_sE_to=fin_sE_to, rm_sE_ifty=False)
gf_aux.build_gfvec_aux()
gf_aux.run_acon(fin_OME_default=os.path.join(currentdir, "files/OME_default.dat"), \
fin_OME_other=os.path.join(currentdir, "files/OME_other.dat"), \
fin_OME_input=os.path.join(currentdir, "files/OME_input_test.dat")
)
# gf_aux.get_sEvec_w_list() put this line in the next test
#Aw_manual_small_truncation = np.loadtxt(os.path.join(currentdir,"files/Aw_manual_small_truncation.dat"))
#w_n_manual = Aw_manual_small_truncation[:, 0]
#Aw_manual = np.delete(Aw_manual_small_truncation,0, axis=1)
#w_n =gf_aux.w_n_list[0]
#Aw = gf_aux.Aw_t_list[0][:, 0][:, np.newaxis]
# print("Aw.shape = ", Aw.shape)
# print(Aw_manual.shape)
try:
pass
#np.testing.assert_allclose(w_n.shape, w_n_manual.shape)
#np.testing.assert_allclose(Aw.shape, Aw_manual.shape)
#test_tools.compare_arrays(w_n, w_n_manual, rprecision=10**-2, n_diff_max=5, zero_equivalent=10**-5)
#test_tools.compare_arrays(Aw, Aw_manual, rprecision=10**-2, n_diff_max=5, zero_equivalent=10**-5)
except AssertionError:
self.fail("ayaya np.allclose failed at test_build_gfvec_aux")
def test_get_sEvec_w(self):
""" """
pass
if __name__ == '__main__':
unittest.main() |
from gim.core.models import Issue
from .managers import ActivityManager
def update_activity_for_fk_link(sender, instance, created, **kwargs):
# only if the object can be saved in the activity stream
try:
manager = ActivityManager.get_for_model_instance(instance)
except IndexError:
return
if manager.for_issue:
if not instance.issue_id:
return
if not manager.is_obj_valid(instance.issue, instance):
return
instance.issue.activity.add_entry(instance)
instance.issue.ask_for_activity_update()
else:
repository = None
try:
repository = getattr(instance, 'repository', None)
if not repository:
repository = instance.issue.repository
except Exception:
pass
if not repository:
return
if not manager.is_obj_valid(repository, instance):
return
repository.activity.add_entry(instance)
def update_activity_for_commit_comment(sender, instance, created, **kwargs):
try:
instance.issue = instance.commit.related_commits.all()[0].issue
instance.issue_id = instance.issue.id
except IndexError:
return
update_activity_for_fk_link(sender, instance, created, **kwargs)
def update_activity_for_event_part(sender, instance, created, **kwargs):
if not instance.event_id:
return
# first check for fields we want to ignore
if instance.event.is_update and instance.field in instance.event.renderer_ignore_fields:
return
# only if the event can be saved in the activity stream
try:
manager = ActivityManager.get_for_model_instance(instance.event)
except IndexError:
return
if manager.for_issue:
if not manager.is_obj_valid(instance.event.issue, instance.event):
return
instance.event.issue.activity.add_entry(instance.event)
instance.event.issue.ask_for_activity_update()
else:
if not manager.is_obj_valid(instance.event.repository, instance.event):
return
instance.event.repository.activity.add_entry(instance.event)
|
INCH = 2.54 #변수를 상수 취급하기 위해 대문자로 표현, 값을 변경 가능
def sum(n):
result = 0
for i in range(1, n+1):
result += i
return result
|
from rest_framework import serializers
from .models import Post, Tag
class PostListSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ("id", "title")
class TagListSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = "__all__"
class TagPostDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('name',)
class PostDetailSerializer(serializers.ModelSerializer):
tags = TagPostDetailsSerializer(many=True)
class Meta:
model = Post
fields = '__all__'
class TagDetailSerializer(serializers.ModelSerializer):
posts = PostDetailSerializer(many=True)
class Meta:
model = Tag
fields = '__all__'
class PostCreateSerializer(serializers.ModelSerializer):
def create(self, validated_data):
post = Post(**validated_data)
post.save()
return post
class Meta:
model = Post
exclude = ('id', 'tags')
class TagCreateSerializer(serializers.ModelSerializer):
def create(self, validated_data):
tag = Tag(**validated_data)
tag.save()
return tag
class Meta:
model = Tag
fields = "__all__" |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import abc
import copy
import numbers
import os
import struct
import textwrap
# Abstract Base Class of Protocols
# Pre-define useful arguments and methods of protocols
from .utilities import seekset, Info, ProtoChain
from .exceptions import BoolError, BytesError
from .validations import bool_check, int_check
ABCMeta = abc.ABCMeta
abstractmethod = abc.abstractmethod
abstractproperty = abc.abstractproperty
class Protocol:
"""Abstract base class for all protocol family.
Properties:
* name -- str, name of corresponding procotol
* info -- Info, info dict of current instance
* length -- int, header length of corresponding protocol
* protocol -- str, name of next layer protocol
* protochain -- ProtoChain, protocol chain of current instance
Attributes:
* _file -- BytesIO, bytes to be extracted
* _info -- Info, info dict of current instance
* _protos -- ProtoChain, protocol chain of current instance
Utilities:
* _read_protos -- read next layer protocol type
* _read_fileng -- read file buffer
* _read_unpack -- read bytes and unpack to integers
* _read_binary -- read bytes and convert into binaries
* _decode_next_layer -- decode next layer protocol type
* _import_next_layer -- import next layer protocol extractor
"""
__metaclass__ = ABCMeta
##########################################################################
# Properties.
##########################################################################
# name of current protocol
@abstractproperty
def name(self):
pass
# info dict of current instance
@property
def info(self):
return self._info
# header length of current protocol
@abstractproperty
def length(self):
pass
# name of next layer protocol
@property
def protocol(self):
try:
return self._protos[1]
except IndexError:
return None
# protocol chain of current instance
@property
def protochain(self):
return self._protos
##########################################################################
# Data models.
##########################################################################
# Not hashable
__hash__ = None
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
return self
def __repr__(self):
name = self.__class__.__name__
repr_ = f"<class 'protocol.{name}'>"
return repr_
@seekset
def __str__(self):
str_ = ' '.join(textwrap.wrap(self._file.read().hex(), 2))
return str_
@seekset
def __bytes__(self):
bytes_ = self._file.read()
return bytes_
@abstractmethod
def __len__(self):
pass
@abstractmethod
def __length_hint__(self):
pass
def __iter__(self):
file_ = copy.deepcopy(self._file)
file_.seek(os.SEEK_SET)
return iter(file_)
# def __next__(self):
# next_ = self._file.read(1)
# if next_:
# return next_
# else:
# self._file.seek(os.SEEK_SET)
# raise StopIteration
def __getitem__(self, key):
return self._info[key]
##########################################################################
# Utilities.
##########################################################################
def _read_protos(self, size):
"""Read next layer protocol type.
Keyword arguments:
size -- int, buffer size
"""
return None
def _read_fileng(self, *args, **kwargs):
"""Read file buffer."""
return self._file.read(*args, **kwargs)
def _read_unpack(self, size=1, *, sign=False, lilendian=False):
"""Read bytes and unpack for integers.
Keyword arguments:
size -- int, buffer size (default is 1)
sign -- bool, signed flag (default is False)
<keyword> True / False
lilendian -- bool, little-endian flag (default is False)
<keyword> True / False
"""
endian = '<' if lilendian else '>'
if size == 8: kind = 'q' if sign else 'Q' # unpack to 8-byte integer (long long)
elif size == 4: kind = 'i' if sign else 'I' # unpack to 4-byte integer (int / long)
elif size == 2: kind = 'h' if sign else 'H' # unpack to 2-byte integer (short)
elif size == 1: kind = 'b' if sign else 'B' # unpack to 1-byte integer (char)
else: kind = None # do not unpack
if kind is None:
buf = self._file.read(size)
else:
try:
fmt = f'{endian}{kind}'
mem = self._file.read(size)
buf = struct.unpack(fmt, mem)[0]
except struct.error:
return None
return buf
def _read_binary(self, size=1):
"""Read bytes and convert into binaries.
Keyword arguments:
size -- int, buffer size (default is 1)
"""
bin_ = ''
for _ in range(size):
byte = self._file.read(1)
bin_ += bin(ord(byte))[2:].zfill(8)
return bin_
def _decode_next_layer(self, dict_, proto=None, length=None):
"""Decode next layer protocol.
Keyword arguments:
dict_ -- dict, info buffer
proto -- str, next layer protocol name
length -- int, valid (not padding) length
"""
next_ = self._import_next_layer(proto, length)
# make next layer protocol name
name_ = str(proto or 'Raw').lower()
# write info and protocol chain into dict
dict_[name_] = next_[0]
self._protos = ProtoChain(proto, next_[1])
return dict_
def _import_next_layer(self, proto, length=None):
"""Import next layer extractor.
Keyword arguments:
proto -- str, next layer protocol name
length -- int, valid (not padding) length
"""
data = file_.read(*[length]) or None
return data, None
|
# Generated by Django 3.2.7 on 2021-10-06 09:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='researchpaper',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300, null=True)),
('publication', models.CharField(max_length=200, null=True)),
('link', models.CharField(max_length=300, null=True)),
('authors', models.CharField(max_length=200, null=True)),
('year', models.DateField(verbose_name='date published')),
],
),
]
|
#looping through all key-value pairs:
user_0 = {
'username': 'efermi',
'first': 'enrico',
'last': 'fermi',
}
for key, value in user_0.items():
print(key)
print(value)
#loop through all key-value pairs and print keys:
for key in user_0.keys():
print(key)
#looping through the keys only is the default behavior of looping
# through dictionaries thus this statement is equal to the one above
for key in user_0:
print(key)
#using the keys method to determine if a key is present in a dictionary:
fav_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python',
}
if 'erin' not in fav_languages.keys():
print('please take the quiz erin')
#sorted keys loop:
for name in sorted(fav_languages.keys()):
print(name)
#loop through all values:
for languages in fav_languages.values():
print(languages) |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input,Output
import plotly
import plotly.offline as pyo
import plotly.graph_objs as go
import pandas as pd
app=dash.Dash()
app.layout=html.Div([
html.H1('BLOOD PRESSURE'),
dcc.Graph(id='graph'),
dcc.Interval(id='interval_div',interval=1000,n_intervals=1)
])
@app.callback(Output('graph','figure'),
[Input('interval_div','n_intervals')])
def update_graph(n):
df=pd.read_csv('gasdata.csv')
df2=df.iloc[:n]
data=[go.Scatter(x=df2['Time'],y=df2['Concentration'],
mode='markers',
marker=dict(color=df['Concentration'],size=df['Concentration']))]
layout=go.Layout(title='HEART RATE')
fig=go.Figure(data=data,layout=layout)
return fig
if __name__=='__main__':
app.run_server(port=8052)
|
from loggable import Loggable
logger = Loggable("Primer3Plus")
|
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from random import randint
from utils import *
from models import GCN, MLP, GCN_Hybrid
from scipy.sparse import csr_matrix
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', 'gcn_hybrid', 'Model string.') # 'gcn', 'gcn_cheby', 'gcn_hybrid', 'dense'
flags.DEFINE_float('learning_rate', 0.010, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 3000, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 256, 'Number of units in hidden layer 1.') #originally 64
flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 1000, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
flags.DEFINE_string('summaries_dir', "./logs", "Logs directory")
flags.DEFINE_string('run_name', "gcn_hybrid_denseFirst_1", "Run name")
# import pdb; pdb.set_trace()
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_connectome_data()
#import pdb; pdb.set_trace()
# Some preprocessing
features = preprocess_features(features)
if FLAGS.model == 'gcn':
support = [preprocess_adj(adj)]
num_supports = 1
model_func = GCN
elif FLAGS.model == 'gcn_cheby':
support = chebyshev_polynomials(adj, FLAGS.max_degree)
num_supports = 1 + FLAGS.max_degree
model_func = GCN
elif FLAGS.model == 'gcn_hybrid':
support = chebyshev_polynomials(adj, FLAGS.max_degree)
num_supports = 1 + FLAGS.max_degree
model_func = GCN_Hybrid
elif FLAGS.model == 'dense':
support = [preprocess_adj(adj)] # Not used
num_supports = 1
model_func = MLP
else:
raise ValueError('Invalid argument for model: ' + str(FLAGS.model))
# Define placeholders
placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout
}
# Create model
model = model_func(placeholders, input_dim=features[2][1], logging=True)
# Initialize session
sess = tf.Session()
# Define model evaluation function
def evaluate(features, support, labels, mask, placeholders):
t_test = time.time()
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
outs_val = sess.run([model.loss, model.accuracy, model.acc_sum], feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], (time.time() - t_test), outs_val[2]
# Init variables
sess.run(tf.global_variables_initializer())
cost_val = []
# Train model
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + "/" + FLAGS.run_name + "/train", graph=sess.graph)
valid_writer = tf.summary.FileWriter(FLAGS.summaries_dir + "/" + FLAGS.run_name + "/valid", graph=sess.graph)
for epoch in range(FLAGS.epochs):
t = time.time()
# Construct feed dictionary
feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Training step
outs = sess.run([model.opt_op, model.loss, model.accuracy, model.acc_sum], feed_dict=feed_dict)
train_writer.add_summary(outs[3], epoch)
# Validation
cost, acc, duration, val_sum = evaluate(features, support, y_val, val_mask, placeholders)
cost_val.append(cost)
valid_writer.add_summary(val_sum, epoch)
# Print results
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
"train_acc=", "{:.5f}".format(outs[2]), "val_loss=", "{:.5f}".format(cost),
"val_acc=", "{:.5f}".format(acc), "time=", "{:.5f}".format(time.time() - t))
# if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
# print("Early stopping...")
# break
print("Optimization Finished!")
# Testing
test_cost, test_acc, test_duration, test_sum = evaluate(features, support, y_test, test_mask, placeholders)
print("Test set results:", "cost=", "{:.5f}".format(test_cost),
"accuracy=", "{:.5f}".format(test_acc), "time=", "{:.5f}".format(test_duration))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from revolver.core import run
from revolver.tool import ruby_build, ruby_rbenv
def install(version, _update=True):
ruby_rbenv.ensure()
ruby_build.ensure()
status = run("rbenv global %s; true" % version)
if not status == "" or _update:
run("rbenv install %s" % version)
run("rbenv global %s" % version)
run("rbenv rehash")
run("gem install --no-ri --no-rdoc bundler")
def ensure(version):
install(version, _update=False)
|
import re
from weather_api import WeatherApi
class Listener():
def __init__(self):
return
def handle_event(self, event):
return
class WeatherBot(Listener):
def say_weather(self, location):
WeatherApi.get_weather(location)
def handle_event(self, event):
print "WeatherBot is handling event {}".format(event)
msg_data = event.data.get("data")
pattern = "^(.*)\s(.*)\s(\d*)\s"
# functions = ["weather", "zombie", "lyft"]
# if msg_data.startswith("pytro"):
# for function in functions:
# if function in msg_data.contains(function):
# eval(function, )
if msg_data.startswith("pyro"):
if "weather" in msg_data:
pattern = "^(.*)\s(.*)\s(\d*)"
groups = re.findall(pattern, msg_data)
location = groups[0][2]
self.say_weather(location)
else:
return
return
|
"""
作者:Wanghao
日期:2020年11月18日
"""
import matlab.engine
eng = matlab.engine.start_matlab()
eng.plot1(nargout=0)
eng.quit()
#如果想改变nargout=0输出的个数,可以将脚本定义成一个函数,然后调用这个函数
# #This example shows how to call the MATLAB® sqrt function asynchronously from Python® and retrieve the square root later.
#
# The engine calls MATLAB functions synchronously by default. Control returns to Python only when the MATLAB function finishes. But the engine also can call functions asynchronously. Control immediately returns to Python while MATLAB is still executing the function. The engine stores the result in a Python variable that can be inspected after the function finishes.
#
# Use the background argument to call a MATLAB function asynchronously.
#
# import matlab.engine
# eng = matlab.engine.start_matlab()
# future = eng.sqrt(4.0,background=True)
# ret = future.result()
# print(ret)
# 2.0
# Use the done method to check if an asynchronous call finished.
#
# tf = future.done()
# print(tf)
# True
# To stop execution of the function before it finishes, call future.cancel().
# Call User Scripts and Functions from Python
# This example shows how to call a MATLAB® script to compute the area of a triangle from Python®.
#
# In your current folder, create a MATLAB script in a file named triarea.m.
#
# b = 5;
# h = 3;
# a = 0.5*(b.* h)
# After you save the file, start Python and call the script.
#
# import matlab.engine
# eng = matlab.engine.start_matlab()
# eng.triarea(nargout=0)
# a =
#
# 7.5000
#
# Specify nargout=0. Although the script prints output, it returns no output arguments to Python.
#
# Convert the script to a function and call the function from the engine. To edit the file, open the MATLAB editor.
#
# eng.edit('triarea',nargout=0)
# Delete the three statements. Then add a function declaration and save the file.
#
# function a = triarea(b,h)
# a = 0.5*(b.* h);
# Call the new triarea function from the engine.
#
# ret = eng.triarea(1.0,5.0)
# print(ret)
# 2.5
# The triarea function returns only one output argument, so there is no need to specify nargout.
#
# See Also |
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
import unittest
import os
import sys
import platform
from time import gmtime, strftime
import distro
from musclex import __version__
from .test_utils import module_test, hdf_read_test, gpu_device_test, pyfai_gpu_integrate_test
class MuscleXTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
if getattr(sys, 'frozen', False):
cls.currdir = os.path.join(os.path.dirname(sys._MEIPASS), "musclex")
else:
cls.currdir = os.path.dirname(__file__)
cls.inpath = os.path.join(cls.currdir, "test_images")
cls.dipath = os.path.join(cls.inpath, "di_test_data")
cls.hdfpath = os.path.join(cls.dipath, "test.hdf")
cls.hdfpickle = os.path.join(cls.inpath, "hdf_record", "hdfdata_record.p")
cls.testversion = __version__ # change this to test against a different version
system = platform.system()
node = platform.node()
proc = platform.processor()
version = platform.version()
machine = platform.machine()
arch = platform.architecture()
python_version = platform.python_version()
osx_ver, osx_info, _ = platform.mac_ver()
win_rel, win_ver, win_csd, proc_type = platform.win32_ver()
lin_name, lin_ver, lin_id = distro.linux_distribution()
sysinfo = """\nSYSTEM INFO
System: {}
Node: {}
Processor: {}
Version: {}
Machine: {}
Architecture: {}
Python Version: {}
OSX Version: {}
OSX Info: {}
Windows Release: {}
Windows Version: {}
Windows CSD: {}
Windows OS Type: {}
Linux Distribution Name: {}
Linux Version: {}
Linux ID: {}\n
""".format(system, node, proc, version, machine, arch, python_version,
osx_ver, osx_info, win_rel, win_ver, win_csd, proc_type,
lin_name, lin_ver, lin_id)
cls.logname = os.path.join(cls.currdir,"test_logs", "test.log")
if not os.path.isdir(os.path.dirname(cls.logname)):
os.mkdir(os.path.dirname(cls.logname))
if os.path.exists(cls.logname):
append_write = 'a'
else:
append_write = 'w'
with open(cls.logname, append_write) as lf:
lf.write("\n{}\n".format("-"*80))
lf.write("Beginning test at {}\n".format(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
lf.write("Testing MuscleX version: {}\n".format(__version__))
lf.write(sysinfo)
lf.write("Package Information\n")
try:
import h5py
lf.write("h5py Version: {}\n".format(h5py.__version__))
except Exception:
lf.write("Unable to import h5py\n")
try:
import lmfit
lf.write("lmfit Version: {}\n".format(lmfit.__version__))
except Exception:
lf.write("Unable to import lmfit\n")
lf.write("OpenCL Information\n")
try:
import pyopencl
p = pyopencl.get_platforms()[0]
opencl_version = p.version
cpus = p.get_devices(pyopencl.device_type.CPU)
gpus = p.get_devices(pyopencl.device_type.GPU)
lf.write("OpenCL Version: {}\n".format(opencl_version))
lf.write("CPUs: {}\n".format(cpus))
lf.write("GPUs: {}\n".format(gpus))
except Exception:
lf.write("OpenCL not available. Check pyopencl installation.\n")
lf.write("\nSummary of Test Results\n")
@classmethod
def tearDownClass(cls):
with open(cls.logname, 'a') as lf:
lf.write("Ending test at {}\n".format(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
lf.write("\n{}\n".format("-"*80))
def testEquatorImage(self):
"""
Runs a test of EquatorImage using the given settings.
"""
settingsA = {
"left_fix_sigmac" : 1.0, "right_fix_sigmac" : 1.0, "fix_k" : 0,
"left_fix_sigmas" : 0.0001, "right_fix_sigmas" : 0.0001, "orientation_model" : 0,
"nPeaks" : 2, "model" : "Gaussian", "isSkeletal" : True, "isExtraPeak": False,
"mask_thres" : -1.0, "90rotation" : False, "blank_mask" : False,
"no_cache" : True
}
# settingsB = {
# "left_sigmac" : 1.0, "right_sigmac" : 1.0, "orientation_model" : 0,
# "nPeaks" : 5, "model" : "Voigt", "isSkeletal" : True,
# "mask_thres" : -1.0, "90rotation" : False, "blank_mask" : True,
# "no_cache" : True
# }
pass_test = module_test(mode="eq",
settings=settingsA,
pickledir=os.path.join(self.currdir, "eq", "tmp_verify_settingsA"),
inputpath=self.inpath,
compdir=os.path.join(self.currdir, "eq", "test_pickles_settingsA"),
testrecord=False,
testversion=self.testversion,
keeppickles=False)
self.log_results(pass_test, "Equator Image")
self.assertTrue(pass_test,"Equator Image Test for settings configuration A failed.")
def testQuadrantFolder(self):
settingsQF = {
'bgsub' : 'None',
'sigmoid' : 0.0,
'no_cache' : True,
'orientation_model' : 0
}
pass_test = module_test(
mode="qf",
settings=settingsQF,
pickledir=os.path.join(self.currdir, "qf", "tmp_verify_settingsQF"),
inputpath=self.inpath,
compdir=os.path.join(self.currdir, "qf", "test_pickles_settingsQF"),
testrecord=False,
testversion=self.testversion,
keeppickles=False)
self.log_results(pass_test, "Quadrant Folder")
self.assertTrue(pass_test,"Quadrant Folder Test for settings configuration QF failed.")
def testDiffractionCentroids(self):
settingsDC = {
'orientation_model' : 0,
'90rotation' : False,
'no_cache' : True
}
pass_test = module_test(
mode="dc",
settings=settingsDC,
pickledir=os.path.join(self.currdir, "dc", "tmp_verify_settingsDC"),
inputpath=self.inpath,
compdir=os.path.join(self.currdir, "dc", "test_pickles_settingsDC"),
testrecord=False,
testversion=self.testversion,
keeppickles=False)
self.log_results(pass_test, "Diffraction Centroids")
self.assertTrue(pass_test, "Diffraction Centroids Test for settings configuration DC failed.")
def testProjectionTraces(self):
settingsPT = {
'boxes' : {'box1' : ((200, 800),(500, 600))},
'bgsubs' : {'box1' : 0},
'types' : {'box1' : 'h'},
'peaks' : {'box1' : [100]},
'bgsub' : 'None',
'sigmoid' : 0.0,
'no_cache' : True,
'orientation_model' : 0
}
pass_test = module_test(
mode="pt",
settings=settingsPT,
pickledir=os.path.join(self.currdir, "pt", "tmp_verify_settingsPT"),
inputpath=self.inpath,
compdir=os.path.join(self.currdir, "pt", "test_pickles_settingsPT"),
testrecord=False,
testversion=self.testversion,
keeppickles=False)
self.log_results(pass_test, "Projection Traces")
self.assertTrue(pass_test, "Projection Traces Test for settings configuration PT has failed.")
def testScanningDiffraction(self):
settingsDI = {}
pass_test = module_test(
mode="di",
settings=settingsDI,
pickledir=os.path.join(self.currdir, "di", "tmp_verify_settingsDI"),
inputpath=self.inpath,
compdir=os.path.join(self.currdir, "di", "test_pickles_settingsDI"),
testrecord=False,
testversion=self.testversion,
keeppickles=False)
self.log_results(pass_test, "Scanning Diffraction")
self.assertTrue(pass_test, "Scanning Diffraction Test for settings configuration DI has failed.")
def testHDFRead(self):
pass_test = hdf_read_test(self.hdfpath, self.hdfpickle)
self.log_results(pass_test, "HDF5 Read")
self.assertTrue(pass_test, "HDF5 read test failed. Check the h5py module for updates.")
def testGPUIntegratePyFAI(self):
pass_test = pyfai_gpu_integrate_test()
self.log_results(pass_test, "pyFAI Integration")
self.assertTrue(pass_test, "PyFAI GPU acceleration is unavailable on this machine.")
def testOpenCLDevice(self):
pass_test = gpu_device_test()
self.log_results(pass_test, "OpenCL GPU Device")
self.assertTrue(pass_test, "No GPU devices found or pyopencl is not installed.")
############################ Non-Test Methods ##############################
def log_results(self, pass_test, testname):
"""
Save the result in the log file
"""
if pass_test:
result = 'pass'
else:
result = 'fail'
with open(self.logname, 'a') as lf:
lf.write("{tn} Test: {r}\n".format(tn=testname, r=result))
if __name__=="__main__":
unittest.main(verbosity=2)
|
import time
import numpy as np
import pandas as pd
import pickle as pkl
# Parameters. #
min_side = 384
max_side = 384
l_jitter = 240
u_jitter = 384
# Load the VOC 2012 dataset. #
print("Loading the data.")
start_tm = time.time()
tmp_path = "C:/Users/admin/Desktop/Data/VOCdevkit/VOC2012/"
tmp_pd_file = tmp_path + "voc_2012_objects.csv"
raw_voc_df = pd.read_csv(tmp_pd_file)
tmp_df_cols = ["filename", "width", "height",
"xmin", "xmax", "ymin", "ymax", "label"]
raw_voc_df = pd.DataFrame(raw_voc_df, columns=tmp_df_cols)
image_files = sorted(list(pd.unique(raw_voc_df["filename"])))
image_files = pd.DataFrame(image_files, columns=["filename"])
# The VOC data class names. #
class_names = list(
sorted(list(pd.unique(raw_voc_df["label"]))))
label_2_id = dict(
[(class_names[x], x) for x in range(len(class_names))])
id_2_label = dict(
[(x, class_names[x]) for x in range(len(class_names))])
elapsed_tm = (time.time() - start_tm) / 60.0
print("Total of", str(len(image_files)), "images in VOC dataset.")
print("Elapsed time:", str(elapsed_tm), "mins.")
# Format the data. #
print("Formatting VOC data.")
start_tm = time.time()
voc_objects = []
for n_img in range(len(image_files)):
img_file = image_files.iloc[n_img]["filename"]
tmp_filter = raw_voc_df[
raw_voc_df["filename"] == img_file]
tmp_filter = tmp_filter[[
"width", "height", "label",
"xmin", "xmax", "ymin", "ymax"]]
n_objects = len(tmp_filter)
tmp_bboxes = []
tmp_labels = []
for n_obj in range(n_objects):
tmp_object = tmp_filter.iloc[n_obj]
img_width = tmp_object["width"]
img_height = tmp_object["height"]
box_x_min = tmp_object["xmin"] / img_width
box_x_max = tmp_object["xmax"] / img_width
box_y_min = tmp_object["ymin"] / img_height
box_y_max = tmp_object["ymax"] / img_height
tmp_bbox = np.array([
box_x_min, box_y_min,
box_x_max, box_y_max])
tmp_label = np.array([
label_2_id[tmp_object["label"]]])
tmp_labels.append(np.expand_dims(tmp_label, axis=0))
tmp_bboxes.append(np.expand_dims(tmp_bbox, axis=0))
tmp_labels = np.concatenate(tmp_labels, axis=0)
tmp_labels = tmp_labels.reshape((n_objects,))
tmp_bboxes = np.concatenate(tmp_bboxes, axis=0)
tmp_objects = {"bbox": tmp_bboxes,
"label": tmp_labels}
voc_objects.append({
"image": img_file,
"min_side": min_side,
"max_side": max_side,
"l_jitter": l_jitter,
"u_jitter": u_jitter,
"objects": tmp_objects})
if (n_img+1) % 1000 == 0:
elapsed_tm = (time.time() - start_tm) / 60.0
print(str(n_img+1), "images processed",
"(" + str(elapsed_tm), "mins).")
elapsed_tm = (time.time() - start_tm) / 60.0
print("Total of", str(len(voc_objects)), "images.")
print("Elapsed Time:", str(round(elapsed_tm, 3)), "mins.")
print("Saving the file.")
save_pkl_file = tmp_path + "voc_data.pkl"
with open(save_pkl_file, "wb") as tmp_save:
pkl.dump(id_2_label, tmp_save)
pkl.dump(voc_objects, tmp_save)
print("VOC data processed.")
|
# -*- coding: utf-8 -*-
"""WMI Common Information Model (CIM) repository files."""
import glob
import hashlib
import logging
import os
from dtfabric import errors as dtfabric_errors
from dtfabric.runtime import data_maps as dtfabric_data_maps
from dtformats import data_format
from dtformats import errors
from dtformats import file_system
class ClassDefinitionProperty(object):
"""Class definition property.
Attributes:
index (int): index of the property.
name (str): name of the property.
qualifiers (dict[str, object]): qualifiers.
value_data_offset (int): the property value data offset.
value_data_type (int): the property value data type.
"""
def __init__(self):
"""Initializes a class property."""
super(ClassDefinitionProperty, self).__init__()
self.index = None
self.name = None
self.qualifiers = {}
self.value_data_offset = None
self.value_data_type = None
class ClassValueDataMap(object):
"""Class value data map.
Attributes:
class_name (str): name of the class.
derivation (list[str]): name of the classes this class is derived from.
dynasty (str): name of the parent class of the parent clas or None if not
available.
properties (dict[str, PropertyValueDataMap]): value data maps of
the properties.
properties_size (int): size of the properties in value data.
super_class_name (str): name of the parent class or None if not available.
"""
_PROPERTY_TYPE_VALUE_DATA_SIZE = {
0x00000002: 2,
0x00000003: 4,
0x00000004: 4,
0x00000005: 8,
0x0000000b: 2,
0x00000010: 1,
0x00000011: 1,
0x00000012: 2,
0x00000013: 4,
0x00000014: 8,
0x00000015: 8}
def __init__(self):
"""Initializes a class value data map."""
super(ClassValueDataMap, self).__init__()
self.class_name = None
self.derivation = []
self.dynasty = None
self.properties = {}
self.properties_size = 0
self.super_class_name = None
def Build(self, class_definitions):
"""Builds the class map from the class definitions.
Args:
class_definitions (list[ClassDefinition]): the class definition and its
parent class definitions.
Raises:
ParseError: if the class map cannot be build.
"""
self.class_name = class_definitions[-1].name
self.derivation = [
class_definition.name for class_definition in class_definitions[:-1]]
self.derivation.reverse()
derivation_length = len(self.derivation)
if derivation_length >= 1:
self.super_class_name = self.derivation[0]
if derivation_length >= 2:
self.dynasty = self.derivation[1]
largest_offset = None
largest_property_map = None
for class_definition in class_definitions:
for name, property_definition in class_definition.properties.items():
type_qualifier = property_definition.qualifiers.get('type', None)
if not type_qualifier:
name_lower = property_definition.name.lower()
if name_lower in self.properties:
continue
raise errors.ParseError((
f'Missing type qualifier for property: '
f'{property_definition.name:s} of class: '
f'{class_definition.name:s}'))
value_data_size = self._PROPERTY_TYPE_VALUE_DATA_SIZE.get(
property_definition.value_data_type, 4)
property_map = PropertyValueDataMap(
property_definition.name, property_definition.value_data_type,
property_definition.value_data_offset, value_data_size)
type_qualifier_lower = type_qualifier.lower()
if ':' in type_qualifier_lower:
type_qualifier_lower, _, _ = type_qualifier_lower.partition(':')
property_map.type_qualifier = type_qualifier_lower
# TODO: compare property_map against property map of parent classes.
self.properties[name.lower()] = property_map
if (largest_offset is None or
largest_offset < property_definition.value_data_offset):
largest_offset = property_definition.value_data_offset
largest_property_map = property_map
if largest_property_map:
self.properties_size = (
largest_property_map.offset + largest_property_map.size)
class IndexBinaryTreePage(object):
"""Index binary-tree page.
Attributes:
keys (list[str]): index binary-tree keys.
number_of_keys (int): number of keys.
page_key_segments (list[bytes]): page key segments.
page_type (int): page type.
page_value_offsets (list[int]): page value offsets.
page_values (list[bytes]): page values.
root_page_number (int): root page number.
sub_pages (list[int]): sub page numbers.
"""
def __init__(self):
"""Initializes an index binary-tree page."""
super(IndexBinaryTreePage, self).__init__()
self.keys = []
self.number_of_keys = None
self.page_key_segments = []
self.page_type = None
self.page_value_offsets = None
self.page_values = []
self.root_page_number = None
self.sub_pages = []
class MappingTable(object):
"""Mapping table."""
def __init__(self, mapping_table):
"""Initializes a mapping table.
Args:
mapping_table (mapping_table): mapping table.
"""
super(MappingTable, self).__init__()
self._mapping_table = mapping_table
def ResolveMappedPageNumber(self, mapped_page_number):
"""Resolves a mapped page number.
Args:
mapped_page_number (int): mapped page number.
Returns:
int: (physical) page number.
"""
mapping_table_entry = self._mapping_table.entries[mapped_page_number]
return mapping_table_entry.page_number
class ObjectsDataPage(object):
"""An objects data page.
Attributes:
page_offset (int): offset of the page relative to the start of the file.
"""
def __init__(self, page_offset):
"""Initializes an objects data page.
Args:
page_offset (int): offset of the page relative to the start of the file.
"""
super(ObjectsDataPage, self).__init__()
self._object_descriptors = []
self.page_offset = page_offset
def AppendObjectDescriptor(self, object_descriptor):
"""Appends an object descriptor.
Args:
object_descriptor (cim_object_descriptor): object descriptor.
"""
self._object_descriptors.append(object_descriptor)
def GetObjectDescriptor(self, record_identifier, data_size):
"""Retrieves a specific object descriptor.
Args:
record_identifier (int): object record identifier.
data_size (int): object record data size.
Returns:
cim_object_descriptor: an object descriptor or None.
"""
object_descriptor_match = None
for object_descriptor in self._object_descriptors:
if object_descriptor.identifier == record_identifier:
object_descriptor_match = object_descriptor
break
if not object_descriptor_match:
logging.warning('Object record data not found.')
return None
if object_descriptor_match.data_size != data_size:
logging.warning('Object record data size mismatch.')
return None
return object_descriptor_match
class ObjectRecord(object):
"""Object record.
Attributes:
data (bytes): object record data.
data_type (str): object record data type.
offset (int): offset of the record data.
"""
def __init__(self, data_type, data, offset=0):
"""Initializes an object record.
Args:
data_type (str): object record data type.
data (bytes): object record data.
offset (int): offset of the record data.
"""
super(ObjectRecord, self).__init__()
self.data = data
self.data_type = data_type
self.offset = offset
class PropertyValueDataMap(object):
"""Property value data map.
Attributes:
data_type (int): property value data type.
name (str): name of the property.
offset (int): offset of the property in value data.
size (int): size of the property in value data.
type_qualifier (str): type qualifier of the property.
"""
def __init__(self, name, data_type, offset, size):
"""Initializes a property value data map.
Args:
name (str): name of the property.
data_type (int): property value data type.
offset (int): offset of the property in value data.
size (int): size of the property in value data.
"""
super(PropertyValueDataMap, self).__init__()
self.data_type = data_type
self.name = name
self.offset = offset
self.size = size
self.type_qualifier = None
class IndexBinaryTreeFile(data_format.BinaryDataFile):
"""Index binary-tree (Index.btr) file."""
# Using a class constant significantly speeds up the time required to load
# the dtFabric and dtFormats definition files.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('wmi_repository.yaml')
_DEBUG_INFORMATION = data_format.BinaryDataFile.ReadDebugInformationFile(
'wmi_repository.debug.yaml', custom_format_callbacks={
'page_type': '_FormatIntegerAsPageType'})
_PAGE_SIZE = 8192
_STRING = _FABRIC.CreateDataTypeMap('string')
_PAGE_TYPES = {
0xaccc: 'Is active',
0xaddd: 'Is administrative',
0xbadd: 'Is deleted'}
_KEY_SEGMENT_SEPARATOR = '\\'
def __init__(self, debug=False, output_writer=None):
"""Initializes an index binary-tree file.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(IndexBinaryTreeFile, self).__init__(
debug=debug, output_writer=output_writer)
self._unavailable_page_numbers = set([0, 0xffffffff])
def _DebugPrintPageBody(self, page_body):
"""Prints page body debug information.
Args:
page_body (cim_page_body): page body.
"""
self._DebugPrintDecimalValue('Number of keys', page_body.number_of_keys)
for index, value in enumerate(page_body.unknown2):
self._DebugPrintDecimalValue(f'Unknown2: {index:d}', value)
for index, page_number in enumerate(page_body.sub_pages):
value_string, _ = self._FormatIntegerAsPageNumber(page_number)
self._DebugPrintValue(
f'Sub page: {index:d} mapped page number', value_string)
for index, key_offset in enumerate(page_body.key_offsets):
value_string, _ = self._FormatIntegerAsOffset(key_offset)
self._DebugPrintValue(f'Key: {index:d} offset', value_string)
number_of_bytes = page_body.key_data_size * 2
self._DebugPrintValue(
'Key data size',
f'{page_body.key_data_size:d} ({number_of_bytes:d} bytes)')
self._DebugPrintData('Key data', page_body.key_data)
self._DebugPrintDecimalValue(
'Number of values', page_body.number_of_values)
for index, offset in enumerate(page_body.value_offsets):
value_string, _ = self._FormatIntegerAsOffset(offset)
self._DebugPrintValue(f'Value: {index:d} offset', value_string)
number_of_bytes = page_body.value_data_size * 2
self._DebugPrintValue(
'Value data size',
f'{page_body.value_data_size:d} ({number_of_bytes:d} bytes)')
self._DebugPrintData('Value data', page_body.value_data)
def _FormatIntegerAsPageNumber(self, integer):
"""Formats an integer as a page number.
Args:
integer (int): integer.
Returns:
str: integer formatted as a page number.
"""
if integer in self._unavailable_page_numbers:
return f'0x{integer:08x} (unavailable)'
return f'{integer:d}'
def _FormatIntegerAsPageType(self, integer):
"""Formats an integer as a page type.
Args:
integer (int): integer.
Returns:
str: integer formatted as a page type.
"""
page_type_string = self._PAGE_TYPES.get(integer, 'Unknown')
return f'0x{integer:04x} ({page_type_string:s})'
def _ReadPage(self, file_object, file_offset):
"""Reads a page.
Args:
file_object (file): a file-like object.
file_offset (int): offset of the page relative to the start of the file.
Return:
IndexBinaryTreePage: an index binary-tree page.
Raises:
ParseError: if the page cannot be read.
"""
if self._debug:
page_number = file_offset // self._PAGE_SIZE
self._DebugPrintText((
f'Reading page: {page_number:d} at offset: {file_offset:d} '
f'(0x{file_offset:08x}).\n'))
page_data = self._ReadData(
file_object, file_offset, self._PAGE_SIZE, 'index binary-tree page')
page_header = self._ReadPageHeader(file_offset, page_data[:16])
file_offset += 16
page_data_offset = 16
index_binary_tree_page = IndexBinaryTreePage()
index_binary_tree_page.page_type = page_header.page_type
index_binary_tree_page.root_page_number = page_header.root_page_number
if page_header.page_type == 0xaccc:
page_body_data = page_data[page_data_offset:]
data_type_map = self._GetDataTypeMap('cim_page_body')
context = dtfabric_data_maps.DataTypeMapContext()
page_body = self._ReadStructureFromByteStream(
page_body_data, file_offset, data_type_map, 'page body',
context=context)
page_data_offset += context.byte_size
if self._debug:
self._DebugPrintData(
'Page body data', page_body_data[:page_data_offset])
if self._debug:
self._DebugPrintPageBody(page_body)
if self._debug:
trailing_data_size = self._PAGE_SIZE - page_data_offset
if trailing_data_size > 0:
self._DebugPrintData('Trailing data', page_data[page_data_offset:])
if page_header.page_type == 0xaccc:
index_binary_tree_page.number_of_keys = page_body.number_of_keys
for page_number in page_body.sub_pages:
if page_number not in (0, 0xffffffff):
index_binary_tree_page.sub_pages.append(page_number)
index_binary_tree_page.page_value_offsets = page_body.value_offsets
# TODO: return page_key_segments
self._ReadPageKeyData(index_binary_tree_page, page_body)
self._ReadPageValueData(index_binary_tree_page, page_body)
index_binary_tree_page.keys = []
for page_key_segments in index_binary_tree_page.page_key_segments:
key_segments = []
for segment_index in page_key_segments:
page_value = index_binary_tree_page.page_values[segment_index]
key_segments.append(page_value)
key_path = ''.join([
self._KEY_SEGMENT_SEPARATOR,
self._KEY_SEGMENT_SEPARATOR.join(key_segments)])
index_binary_tree_page.keys.append(key_path)
return index_binary_tree_page
def _ReadPageHeader(self, file_offset, page_header_data):
"""Reads a page header.
Args:
file_offset (int): offset of the page header relative to the start of
the file.
page_header (bytes): page header data.
Returns:
page_header: page header.
Raises:
ParseError: if the name cannot be read.
"""
if self._debug:
self._DebugPrintData('Page header data', page_header_data)
data_type_map = self._GetDataTypeMap('cim_page_header')
page_header = self._ReadStructureFromByteStream(
page_header_data, file_offset, data_type_map, 'page header')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get('cim_page_header', None)
self._DebugPrintStructureObject(page_header, debug_info)
return page_header
def _ReadPageKeyData(self, index_binary_tree_page, page_body):
"""Reads page key data.
Args:
index_binary_tree_page (IndexBinaryTreePage): index binary-tree page.
page_body (cim_page_body): page body.
Raises:
ParseError: if the page key data cannot be read.
"""
key_data = page_body.key_data
data_type_map = self._GetDataTypeMap('cim_page_key')
for page_key_index, key_offset in enumerate(page_body.key_offsets):
page_key_offset = key_offset * 2
if self._debug:
value_string, _ = self._FormatIntegerAsOffset(page_key_offset)
self._DebugPrintValue(
f'Page key: {page_key_index:d} offset', value_string)
context = dtfabric_data_maps.DataTypeMapContext()
page_key = self._ReadStructureFromByteStream(
key_data[page_key_offset:], page_key_offset, data_type_map,
f'page key: {page_key_index:d}', context=context)
if self._debug:
page_key_end_offset = page_key_offset + context.byte_size
self._DebugPrintData(
f'Page key: {page_key_index:d} data:',
key_data[page_key_offset:page_key_end_offset])
index_binary_tree_page.page_key_segments.append(page_key.segments)
if self._debug:
self._DebugPrintDecimalValue(
f'Page key: {page_key_index:d} number of segments',
page_key.number_of_segments)
value_string = ', '.join([
f'{segment_index:d}' for segment_index in page_key.segments])
self._DebugPrintValue(
f'Page key: {page_key_index:d} segments', value_string)
self._DebugPrintText('\n')
def _ReadPageValueData(self, index_binary_tree_page, page_body):
"""Reads page value data.
Args:
index_binary_tree_page (IndexBinaryTreePage): index binary-tree page.
page_body (cim_page_body): page body.
Raises:
ParseError: if the page value data cannot be read.
"""
value_data = page_body.value_data
for index, page_value_offset in enumerate(
index_binary_tree_page.page_value_offsets):
# TODO: determine size
try:
value_string = self._STRING.MapByteStream(
value_data[page_value_offset:])
except dtfabric_errors.MappingError as exception:
raise errors.ParseError((
f'Unable to parse page value: {index:d} string with error: '
f'{exception!s}'))
if self._debug:
self._DebugPrintValue(f'Page value: {index:d} data', value_string)
index_binary_tree_page.page_values.append(value_string)
if self._debug and index_binary_tree_page.page_value_offsets:
self._DebugPrintText('\n')
def GetPage(self, page_number):
"""Retrieves a specific page.
Args:
page_number (int): page number.
Returns:
IndexBinaryTreePage: an index binary-tree page or None.
"""
file_offset = page_number * self._PAGE_SIZE
if file_offset >= self._file_size:
return None
# TODO: cache pages.
return self._ReadPage(self._file_object, file_offset)
def ReadFileObject(self, file_object):
"""Reads an index binary-tree file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
if self._debug:
file_offset = 0
while file_offset < self._file_size:
self._ReadPage(file_object, file_offset)
file_offset += self._PAGE_SIZE
class MappingFile(data_format.BinaryDataFile):
"""Mappings (*.map) file.
Attributes:
format_version (int): format version of the mapping file.
sequence_number (int): sequence number.
"""
# Using a class constant significantly speeds up the time required to load
# the dtFabric and dtFormats definition files.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('wmi_repository.yaml')
_DEBUG_INFORMATION = data_format.BinaryDataFile.ReadDebugInformationFile(
'wmi_repository.debug.yaml', custom_format_callbacks={
'page_number': '_FormatIntegerAsPageNumber'})
def __init__(self, debug=False, output_writer=None):
"""Initializes a mappings file.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(MappingFile, self).__init__(debug=debug, output_writer=output_writer)
self._mapping_table1 = None
self._mapping_table2 = None
self._unavailable_page_numbers = set([0xffffffff])
self.format_version = None
self.sequence_number = None
def _DebugPrintMappingTable(self, mapping_table):
"""Prints a mapping table debug information.
Args:
mapping_table (mapping_table): mapping table.
"""
self._DebugPrintText('Mapping table:\n')
self._DebugPrintDecimalValue(
' Number of entries', mapping_table.number_of_entries)
self._DebugPrintText('\n')
for index, mapping_table_entry in enumerate(mapping_table.entries):
debug_info = self._DEBUG_INFORMATION.get('cim_map_table_entry', None)
self._DebugPrintText(f' Entry: {index:d}:\n')
self._DebugPrintStructureObject(mapping_table_entry, debug_info)
self._DebugPrintText('\n')
def _DebugPrintUnknownTable(self, unknown_table):
"""Prints a unknown table debug information.
Args:
unknown_table (unknown_table): mapping table.
"""
self._DebugPrintText('Unknown table:\n')
self._DebugPrintDecimalValue(
' Number of entries', unknown_table.number_of_entries)
for index, page_number in enumerate(unknown_table.entries):
value_string, _ = self._FormatIntegerAsPageNumber(page_number)
self._DebugPrintValue(f' Entry: {index:d} page number', value_string)
self._DebugPrintText('\n')
def _FormatIntegerAsPageNumber(self, integer):
"""Formats an integer as a page number.
Args:
integer (int): integer.
Returns:
str: integer formatted as a page number.
"""
if integer in self._unavailable_page_numbers:
return f'0x{integer:08x} (unavailable)'
return f'{integer:d}'
def _ReadDetermineFormatVersion(self, file_object):
"""Reads the file footer to determine the format version.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file footer cannot be read.
"""
file_object.seek(0, os.SEEK_SET)
file_header = self._ReadFileHeader(file_object, format_version=2)
if file_header.unknown1 == file_header.unknown2 + 1:
self.format_version = 2
else:
self.format_version = 1
def _ReadFileFooter(self, file_object):
"""Reads the file footer.
Args:
file_object (file): file-like object.
format_version (Optional[int]): format version.
Returns:
cim_map_footer: file footer.
Raises:
ParseError: if the file footer cannot be read.
"""
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('cim_map_footer')
file_footer, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'file footer')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get('cim_map_footer', None)
self._DebugPrintStructureObject(file_footer, debug_info)
return file_footer
def _ReadFileHeader(self, file_object, format_version=None):
"""Reads the file header.
Args:
file_object (file): file-like object.
format_version (Optional[int]): format version.
Returns:
cim_map_header: file header.
Raises:
ParseError: if the file header cannot be read.
"""
file_offset = file_object.tell()
if format_version == 1:
data_type_map = self._GetDataTypeMap('cim_map_header_v1')
else:
data_type_map = self._GetDataTypeMap('cim_map_header_v2')
file_header, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'file header')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get('cim_map_header', None)
self._DebugPrintStructureObject(file_header, debug_info)
return file_header
def _ReadMappingTable(self, file_object):
"""Reads the mapping tables.
Args:
file_object (file): file-like object.
Returns:
mapping_table: mapping table.
Raises:
ParseError: if the mappings cannot be read.
"""
file_offset = file_object.tell()
if self.format_version == 1:
data_type_map = self._GetDataTypeMap('cim_map_mapping_table_v1')
else:
data_type_map = self._GetDataTypeMap('cim_map_mapping_table_v2')
mapping_table, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'mapping table')
if self._debug:
self._DebugPrintMappingTable(mapping_table)
return mapping_table
def _ReadUnknownTable(self, file_object):
"""Reads the unknown tables.
Args:
file_object (file): file-like object.
Returns:
unknown_table: unknown table.
Raises:
ParseError: if the unknowns cannot be read.
"""
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('cim_map_unknown_table')
unknown_table, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'unknown table')
if self._debug:
self._DebugPrintUnknownTable(unknown_table)
return unknown_table
def GetIndexMappingTable(self):
"""Retrieves the index mapping table.
Returns:
MappingTable: index mapping table.
"""
return MappingTable(self._mapping_table2 or self._mapping_table1)
def GetObjectsMappingTable(self):
"""Retrieves the objects mapping table.
Returns:
MappingTable: objects mapping table.
"""
return MappingTable(self._mapping_table1)
def ReadFileObject(self, file_object):
"""Reads a mappings file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
self._ReadDetermineFormatVersion(file_object)
file_object.seek(0, os.SEEK_SET)
file_header = self._ReadFileHeader(
file_object, format_version=self.format_version)
self.sequence_number = file_header.sequence_number
self._mapping_table1 = self._ReadMappingTable(file_object)
self._ReadUnknownTable(file_object)
self._ReadFileFooter(file_object)
file_offset = file_object.tell()
if self.format_version == 2 or file_offset < self._file_size:
try:
file_header = self._ReadFileHeader(
file_object, format_version=self.format_version)
except errors.ParseError:
file_header = None
if not file_header and self.format_version == 2:
raise errors.ParseError('Unable to read second file header.')
# Seen trailing data in Windows XP objects.map file.
if file_header:
self._mapping_table2 = self._ReadMappingTable(file_object)
self._ReadUnknownTable(file_object)
self._ReadFileFooter(file_object)
class ObjectsDataFile(data_format.BinaryDataFile):
"""An objects data (Objects.data) file."""
# Using a class constant significantly speeds up the time required to load
# the dtFabric and dtFormats definition files.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('wmi_repository.yaml')
_DEBUG_INFORMATION = data_format.BinaryDataFile.ReadDebugInformationFile(
'wmi_repository.debug.yaml', custom_format_callbacks={
'offset': '_FormatIntegerAsOffset'})
_EMPTY_OBJECT_DESCRIPTOR = b'\x00' * 16
_PAGE_SIZE = 8192
def _ReadObjectDescriptor(self, file_object):
"""Reads an object descriptor.
Args:
file_object (file): a file-like object.
Returns:
cim_object_descriptor: an object descriptor or None if the object
descriptor is empty.
Raises:
ParseError: if the object descriptor cannot be read.
"""
file_offset = file_object.tell()
if self._debug:
self._DebugPrintText((
f'Reading object descriptor at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
object_descriptor_data = file_object.read(16)
if self._debug:
self._DebugPrintData('Object descriptor data', object_descriptor_data)
# The last object descriptor (terminator) is filled with 0-byte values.
if object_descriptor_data == self._EMPTY_OBJECT_DESCRIPTOR:
return None
data_type_map = self._GetDataTypeMap('cim_object_descriptor')
object_descriptor = self._ReadStructureFromByteStream(
object_descriptor_data, file_offset, data_type_map, 'object descriptor')
setattr(object_descriptor, 'data_file_offset',
file_offset + object_descriptor.data_offset)
if self._debug:
debug_info = self._DEBUG_INFORMATION.get('cim_object_descriptor', None)
self._DebugPrintStructureObject(object_descriptor, debug_info)
return object_descriptor
def _ReadObjectDescriptors(self, file_object, objects_page):
"""Reads object descriptors.
Args:
file_object (file): a file-like object.
objects_page (ObjectsDataPage): objects data page.
Raises:
ParseError: if the object descriptor cannot be read.
"""
while True:
object_descriptor = self._ReadObjectDescriptor(file_object)
if not object_descriptor:
break
objects_page.AppendObjectDescriptor(object_descriptor)
def _ReadPage(self, file_object, file_offset, is_data_page):
"""Reads a page.
Args:
file_object (file): a file-like object.
file_offset (int): offset of the page relative to the start of the file.
is_data_page (bool): True if the page is a data page.
Raises:
ParseError: if the page cannot be read.
Returns:
ObjectsDataPage: objects data page or None.
"""
file_object.seek(file_offset, os.SEEK_SET)
if self._debug:
self._DebugPrintText((
f'Reading objects data page at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
objects_page = ObjectsDataPage(file_offset)
if not is_data_page:
self._ReadObjectDescriptors(file_object, objects_page)
return objects_page
def GetPage(self, page_number, is_data_page):
"""Retrieves a specific page.
Args:
page_number (int): page number.
is_data_page (bool): True if the page is a data page.
Returns:
ObjectsDataPage: objects data page or None.
"""
file_offset = page_number * self._PAGE_SIZE
if file_offset >= self._file_size:
return None
return self._ReadPage(self._file_object, file_offset, is_data_page)
def ReadFileObject(self, file_object):
"""Reads an objects data file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
self._file_object = file_object
def ReadObjectRecordDataSegment(self, objects_page, data_offset, data_size):
"""Reads a data segment of an object record.
Args:
objects_page (ObjectsDataPage): objects data page.
data_offset (int): offset of the object record data relative to
the start of the page.
data_size (int): object record data size.
Returns:
bytes: object record data segment.
Raises:
ParseError: if the object record data segment cannot be read.
"""
# Make the offset relative to the start of the file.
file_offset = objects_page.page_offset + data_offset
self._file_object.seek(file_offset, os.SEEK_SET)
if self._debug:
self._DebugPrintText((
f'Reading object record data segment at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
available_page_size = self._PAGE_SIZE - data_offset
if data_size > available_page_size:
read_size = available_page_size
else:
read_size = data_size
return self._file_object.read(read_size)
class RepositoryFile(data_format.BinaryDataFile):
"""Repository file."""
# Using a class constant significantly speeds up the time required to load
# the dtFabric and dtFormats definition files.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('wmi_repository.yaml')
_DEBUG_INFORMATION = data_format.BinaryDataFile.ReadDebugInformationFile(
'wmi_repository.debug.yaml', custom_format_callbacks={
'offset': '_FormatIntegerAsOffset'})
_DEBUG_INFO_INSTANCE_ROOT_NODE = [
('child_objects_root_node_offset', 'Child objects root node offset',
'_FormatIntegerAsOffset'),
('name_node_offset', 'Name node offset', '_FormatIntegerAsOffset'),
('instance_branch_node_offset', 'Instance branch node offset',
'_FormatIntegerAsOffset'),
('unknown1', 'Unknown1', '_FormatIntegerAsDecimal'),
('child_objects_list_node_offset', 'Child objects list node offset',
'_FormatIntegerAsOffset'),
('unknown2', 'Unknown2', '_FormatIntegerAsOffset'),
('unknown_node5_offset', 'Unknown node 5 offset',
'_FormatIntegerAsOffset'),
('footer', 'Footer', '_FormatDataInHexadecimal')]
def __init__(self, debug=False, output_writer=None):
"""Initializes a repository file.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(RepositoryFile, self).__init__(
debug=debug, output_writer=output_writer)
self._root_namespace_node_offset = None
self._system_class_definition_root_node_offset = None
def _ReadChildObjectsList(self, file_object, list_node_offset):
"""Reads a child objects list.
Args:
file_object (file): file-like object.
list_node_offset (int): offset of the list node relative to the start
of the file.
Yields:
int: element value offset.
"""
node_cell = self._ReadNodeCell(file_object, list_node_offset - 4)
list_node = self._ReadChildObjectsListNode(node_cell.data, list_node_offset)
list_element = 1
next_list_element_node_offset = list_node.first_list_element_node_offset
while next_list_element_node_offset > 40:
if self._debug:
self._DebugPrintText(f'Reading list element: {list_element:d}\n')
node_cell = self._ReadNodeCell(
file_object, next_list_element_node_offset - 4)
list_element_node = self._ReadChildObjectsListElementNode(
node_cell.data, next_list_element_node_offset)
if list_element_node.name_node_offset > 40:
node_cell = self._ReadNodeCell(
file_object, list_element_node.name_node_offset - 4)
self._ReadNameNode(node_cell.data, list_element_node.name_node_offset)
yield list_element_node.value_node_offset
list_element += 1
next_list_element_node_offset = (
list_element_node.next_list_element_node_offset)
def _ReadChildObjectsListNode(self, block_data, file_offset):
"""Reads a child objects list node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_child_objects_list_node: child objects list node.
Raises:
ParseError: if the child objects list node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading child objects list node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_child_objects_list_node')
list_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'child objects list node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_child_objects_list_node', None)
self._DebugPrintStructureObject(list_node, debug_info)
return list_node
def _ReadChildObjectsListElementNode(self, block_data, file_offset):
"""Reads a child objects list element node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_child_objects_list_element_node: child objects list element node.
Raises:
ParseError: if the list element node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading child objects list element node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap(
'cim_rep_child_objects_list_element_node')
list_element_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map,
'child objects list element node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_child_objects_list_element_node', None)
self._DebugPrintStructureObject(list_element_node, debug_info)
return list_element_node
def _ReadChildObjectsTree(self, file_object, root_node_offset):
"""Reads a child objects tree.
Args:
file_object (file): file-like object.
root_node_offset (int): offset of the root node relative to the start of
the file.
Yields:
int: leaf value offset.
"""
node_cell = self._ReadNodeCell(file_object, root_node_offset - 4)
root_node = self._ReadChildObjectsTreeRootNode(
node_cell.data, root_node_offset)
if root_node.depth == 1:
yield root_node.branch_node_offset
elif root_node.depth == 2:
if root_node.branch_node_offset > 40:
node_cell = self._ReadNodeCell(
file_object, root_node.branch_node_offset - 4)
branch_node = self._ReadChildObjectsTreeBranchNode(
node_cell.data, root_node.branch_node_offset)
if branch_node.leaf_node_offset > 40:
node_cell = self._ReadNodeCell(
file_object, branch_node.leaf_node_offset - 4)
leaf_node = self._ReadChildObjectsTreeLeafNode(
node_cell.data, branch_node.leaf_node_offset)
for node_offset in (
leaf_node.value_node_offset1,
leaf_node.value_node_offset2,
leaf_node.value_node_offset3,
leaf_node.value_node_offset4,
leaf_node.value_node_offset5,
leaf_node.value_node_offset6,
leaf_node.value_node_offset7,
leaf_node.value_node_offset8,
leaf_node.value_node_offset9,
leaf_node.value_node_offset10):
yield node_offset
# TODO: rename
def _ReadChildObjectsTreeBranchNode(self, block_data, file_offset):
"""Reads a child objects tree branch node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_child_objects_branch_node: child objects branch node.
Raises:
ParseError: if the child objects branch node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading child objects branch node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_child_objects_branch_node')
branch_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'child objects branch node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_child_objects_branch_node', None)
self._DebugPrintStructureObject(branch_node, debug_info)
return branch_node
def _ReadChildObjectsTreeLeafNode(self, block_data, file_offset):
"""Reads a child objects tree leaf node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_child_objects_leaf_node: child objects leaf node.
Raises:
ParseError: if the child objects leaf node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading child objects leaf node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_child_objects_leaf_node')
leaf_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'child objects leaf node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_child_objects_leaf_node', None)
self._DebugPrintStructureObject(leaf_node, debug_info)
return leaf_node
# TODO: rename
def _ReadChildObjectsTreeRootNode(self, block_data, file_offset):
"""Reads a child objects tree root node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_child_objects_root_node: child objects root node.
Raises:
ParseError: if the child objects root node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading child objects root node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_child_objects_root_node')
root_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'child objects root node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_child_objects_root_node', None)
self._DebugPrintStructureObject(root_node, debug_info)
return root_node
# TODO: rename
def _ReadClassDefinitionBranchNode(self, block_data, file_offset):
"""Reads a class definition branch node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_class_definition_branch_node: class definition branch node.
Raises:
ParseError: if the class definition branch node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading class definition branch node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_class_definition_branch_node')
class_definition_branch_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'class definition branch node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_class_definition_branch_node', None)
self._DebugPrintStructureObject(class_definition_branch_node, debug_info)
return class_definition_branch_node
def _ReadClassDefinitionLeafNode(self, block_data, file_offset):
"""Reads a class definition leaf node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_class_definition_leaf_node: class definition leaf node.
Raises:
ParseError: if the class definition leaf node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading class definition leaf node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_class_definition_leaf_node')
class_definition_leaf_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'class definition leaf node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_class_definition_leaf_node', None)
self._DebugPrintStructureObject(class_definition_leaf_node, debug_info)
return class_definition_leaf_node
def _ReadClassDefinitionRootNode(self, block_data, file_offset):
"""Reads a class definition root node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_class_definition_root_node: class definition root node.
Raises:
ParseError: if the class definition root node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading class definition root node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_class_definition_root_node')
class_definition_root_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'class definition root node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_class_definition_root_node', None)
self._DebugPrintStructureObject(class_definition_root_node, debug_info)
return class_definition_root_node
def _ReadFileHeader(self, file_object):
"""Reads a file header.
Args:
file_object (file): file-like object.
Returns:
cim_rep_file_header: file header.
Raises:
ParseError: if the file header cannot be read.
"""
data_type_map = self._GetDataTypeMap('cim_rep_file_header')
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, data_type_map, 'file header')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get('cim_rep_file_header', None)
self._DebugPrintStructureObject(file_header, debug_info)
return file_header
def _ReadInstanceBranchNode(self, block_data, file_offset):
"""Reads an instance branch node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_instance_branch_node: instance branch node.
Raises:
ParseError: if the instance branch node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading instance branch node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_instance_branch_node')
instance_branch_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'instance branch node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_instance_branch_node', None)
self._DebugPrintStructureObject(instance_branch_node, debug_info)
return instance_branch_node
def _ReadInstanceLeafNode(self, block_data, file_offset):
"""Reads an instance leaf node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_instance_leaf_node: instance leaf node.
Raises:
ParseError: if the instance leaf node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading instance leaf node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_instance_leaf_node')
instance_leaf_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'instance leaf node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_instance_leaf_node', None)
self._DebugPrintStructureObject(instance_leaf_node, debug_info)
return instance_leaf_node
def _ReadInstanceLeafValueNode(self, block_data, file_offset):
"""Reads an instance leaf value node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_instance_leaf_value_node: instance leaf value node.
Raises:
ParseError: if the instance leaf value node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading instance leaf value node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_instance_leaf_value_node')
instance_leaf_value_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'instance leaf value node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_instance_leaf_value_node', None)
self._DebugPrintStructureObject(instance_leaf_value_node, debug_info)
return instance_leaf_value_node
def _ReadInstanceRootNode(self, block_data, file_offset):
"""Reads an instance root node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_instance_root_node: instance root node.
Raises:
ParseError: if the instance root node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading instance root node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_instance_root_node')
instance_root_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'instance root node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get(
'cim_rep_instance_root_node', None)
self._DebugPrintStructureObject(instance_root_node, debug_info)
return instance_root_node
def _ReadNameNode(self, block_data, file_offset):
"""Reads a name node.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_name_node: name node.
Raises:
ParseError: if the name node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading name node at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_name_node')
name_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'name node')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get('cim_rep_name_node', None)
self._DebugPrintStructureObject(name_node, debug_info)
return name_node
def _ReadNodeBinHeader(self, file_object, file_offset):
"""Reads a node bin header.
Args:
file_object (file): file-like object.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_node_bin_header: node bin header.
Raises:
ParseError: if the node bin header cannot be read.
"""
data_type_map = self._GetDataTypeMap('cim_rep_node_bin_header')
node_bin_header, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'node bin header')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get('cim_rep_node_bin_header', None)
self._DebugPrintStructureObject(node_bin_header, debug_info)
return node_bin_header
def _ReadNodeCell(self, file_object, file_offset, cell_number=None):
"""Reads a node cell.
Args:
file_object (file): file-like object.
file_offset (int): offset of the node cell relative to the start of
the file.
cell_number (Optional[int]): cell number.
Returns:
cim_rep_node_cell: node cell.
Raises:
ParseError: if the node cell cannot be read.
"""
data_type_map = self._GetDataTypeMap('cim_rep_node_cell')
node_cell, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'node cell')
if self._debug:
value_string, _ = self._FormatIntegerAsOffset(file_offset)
self._DebugPrintValue('Node cell offset', value_string)
value_string, _ = self._FormatIntegerAsOffset(file_offset + 4)
self._DebugPrintValue('Data offset', value_string)
if cell_number is not None:
value_string, _ = self._FormatIntegerAsDecimal(cell_number)
self._DebugPrintValue('Node cell number', value_string)
debug_info = self._DEBUG_INFORMATION.get('cim_rep_node_cell', None)
self._DebugPrintStructureObject(node_cell, debug_info)
return node_cell
def _ReadUnknownNode5(self, block_data, file_offset):
"""Reads an unknown node 5.
Args:
block_data (bytes): block data.
file_offset (int): offset of the node cell relative to the start of
the file.
Returns:
cim_rep_unknown_node5: unknown node.
Raises:
ParseError: if the unknown node cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading unknown node 5 at offset: {file_offset:d} '
f'(0x{file_offset:08x})\n'))
data_type_map = self._GetDataTypeMap('cim_rep_unknown_node5')
unknown_node = self._ReadStructureFromByteStream(
block_data, file_offset, data_type_map, 'unknown node 5')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get('cim_rep_unknown_node5', None)
self._DebugPrintStructureObject(unknown_node, debug_info)
return unknown_node
def _ReadClassDefinition(self, file_object, branch_node_offset):
"""Reads a class definition.
Args:
file_object (file): file-like object.
branch_node_offset (int): offset of the branch node relative to the start
of the file.
Returns:
ClassDefinition: class definition.
"""
node_cell = self._ReadNodeCell(file_object, branch_node_offset - 4)
branch_node = self._ReadClassDefinitionBranchNode(
node_cell.data, branch_node_offset)
leaf_node_offset = branch_node.class_definition_leaf_node_offset
if leaf_node_offset <= 40:
return None
node_cell = self._ReadNodeCell(file_object, leaf_node_offset - 4)
leaf_node = self._ReadClassDefinitionLeafNode(
node_cell.data, leaf_node_offset)
class_definition = ClassDefinition(
debug=self._debug, output_writer=self._output_writer)
class_definition.ReadClassDefinitionBlock(
leaf_node.class_definition_block_data,
record_data_offset=leaf_node_offset)
return class_definition
def _ReadClassDefinitionInstance(self, file_object, root_node_offset):
"""Reads an instance from a class definition.
Args:
file_object (file): file-like object.
root_node_offset (int): offset of the root node relative to the start of
the file.
Return:
Instance: instance.
"""
def _ReadClassDefinitionHierarchy(self, file_object, root_node_offset):
"""Reads the class definition hierarchy.
Args:
file_object (file): file-like object.
root_node_offset (int): offset of the root node relative to the start of
the file.
Yields:
Instance: instance.
"""
node_cell = self._ReadNodeCell(file_object, root_node_offset - 4)
root_node = self._ReadClassDefinitionRootNode(
node_cell.data, root_node_offset)
branch_node_offset = root_node.class_definition_branch_node_offset
if branch_node_offset > 40:
node_cell = self._ReadNodeCell(file_object, branch_node_offset - 4)
branch_node = self._ReadClassDefinitionBranchNode(
node_cell.data, branch_node_offset)
leaf_node_offset = branch_node.class_definition_leaf_node_offset
if leaf_node_offset > 40:
node_cell = self._ReadNodeCell(file_object, leaf_node_offset - 4)
leaf_node = self._ReadClassDefinitionLeafNode(
node_cell.data, leaf_node_offset)
class_definition = ClassDefinition(
debug=self._debug, output_writer=self._output_writer)
class_definition.ReadClassDefinitionBlock(
leaf_node.class_definition_block_data,
record_data_offset=leaf_node_offset)
if self._debug:
class_definition.DebugPrint()
if root_node.child_objects_list_node_offset > 40:
for value_node_offset in self._ReadChildObjectsList(
file_object, root_node.child_objects_list_node_offset):
if value_node_offset > 40:
instance = self._ReadInstance(file_object, value_node_offset)
yield instance
if root_node.sub_node_offset > 40 and root_node.sub_node_type in (9, 10):
for value_node_offset in self._ReadChildObjectsTree(
file_object, root_node.sub_node_offset):
if value_node_offset > 40:
for instance in self._ReadClassDefinitionHierarchy(
file_object, value_node_offset):
yield instance
if self._debug:
if root_node.child_objects_root_node_offset > 40:
for value_node_offset in self._ReadChildObjectsTree(
file_object, root_node.child_objects_root_node_offset):
if value_node_offset > 40:
node_cell = self._ReadNodeCell(file_object, value_node_offset - 4)
self._ReadNameNode(node_cell.data, value_node_offset)
def _ReadInstance(self, file_object, branch_node_offset):
"""Reads an instance.
Args:
file_object (file): file-like object.
branch_node_offset (int): offset of the branch node relative to the start
of the file.
Returns:
Instance: instance.
"""
node_cell = self._ReadNodeCell(file_object, branch_node_offset - 4)
instance_branch_node = self._ReadInstanceBranchNode(
node_cell.data, branch_node_offset)
if instance_branch_node.class_definition_root_node_offset <= 40:
return None
if instance_branch_node.instance_leaf_node_offset <= 40:
return None
if instance_branch_node.unknown1 != 2:
return None
node_cell = self._ReadNodeCell(
file_object, instance_branch_node.class_definition_root_node_offset - 4)
root_node = self._ReadClassDefinitionRootNode(
node_cell.data, instance_branch_node.class_definition_root_node_offset)
if root_node.class_definition_branch_node_offset <= 40:
return None
class_definition = self._ReadClassDefinition(
file_object, root_node.class_definition_branch_node_offset)
node_cell = self._ReadNodeCell(
file_object, instance_branch_node.instance_leaf_node_offset - 4)
leaf_node = self._ReadInstanceLeafNode(
node_cell.data, instance_branch_node.instance_leaf_node_offset)
# TODO: read class definition hierarcy
class_definitions = [class_definition]
class_value_data_map = ClassValueDataMap()
class_value_data_map.Build(class_definitions)
instance = Instance(
debug=self._debug, output_writer=self._output_writer)
instance.ReadInstanceBlockData(
class_value_data_map, leaf_node.instance_block_data,
record_data_offset=instance_branch_node.instance_leaf_node_offset)
# pylint: disable=attribute-defined-outside-init
instance.class_name = class_value_data_map.class_name
instance.derivation = class_value_data_map.derivation
instance.dynasty = class_value_data_map.dynasty
instance.super_class_name = class_value_data_map.super_class_name
if self._debug:
class_definition.DebugPrint()
instance.DebugPrint()
return instance
def _ReadInstanceHierarchy(self, file_object, root_node_offset):
"""Reads an instance hierarchy.
Args:
file_object (file): file-like object.
root_node_offset (int): offset of the root node relative to the start of
the file.
Yields:
Instance: instance.
"""
node_cell = self._ReadNodeCell(file_object, root_node_offset - 4)
root_node = self._ReadInstanceRootNode(node_cell.data, root_node_offset)
if self._debug:
if root_node.name_node_offset > 40:
node_cell = self._ReadNodeCell(
file_object, root_node.name_node_offset - 4)
self._ReadNameNode(node_cell.data, root_node.name_node_offset)
if root_node.instance_branch_node_offset > 40:
instance = self._ReadInstance(
file_object, root_node.instance_branch_node_offset)
yield instance
if self._debug:
if root_node.unknown_node5_offset > 40 and root_node.unknown2 == 0:
node_cell = self._ReadNodeCell(
file_object, root_node.unknown_node5_offset - 4)
unknown_node5 = self._ReadUnknownNode5(
node_cell.data, root_node.unknown_node5_offset)
# TODO: clean up after debugging
_ = unknown_node5
if (root_node.child_objects_root_node_offset > 40 and
root_node.child_objects_root_node_offset != 0xffffffff):
for value_node_offset in self._ReadChildObjectsTree(
file_object, root_node.child_objects_root_node_offset):
if value_node_offset > 40:
node_cell = self._ReadNodeCell(file_object, value_node_offset - 4)
instance_leaf_value_node = self._ReadInstanceLeafValueNode(
node_cell.data, value_node_offset)
if self._debug:
if instance_leaf_value_node.name_node_offset > 40:
node_cell = self._ReadNodeCell(
file_object, instance_leaf_value_node.name_node_offset - 4)
self._ReadNameNode(
node_cell.data, instance_leaf_value_node.name_node_offset)
if instance_leaf_value_node.instance_root_node_offset > 40:
for instance in self._ReadInstanceHierarchy(
file_object,
instance_leaf_value_node.instance_root_node_offset):
yield instance
if self._debug:
if root_node.child_objects_list_node_offset > 40:
for value_node_offset in self._ReadChildObjectsList(
file_object, root_node.child_objects_list_node_offset):
if value_node_offset > 40:
node_cell = self._ReadNodeCell(file_object, root_node_offset - 4)
root_node = self._ReadClassDefinitionRootNode(
node_cell.data, root_node_offset)
if root_node.class_definition_branch_node_offset > 40:
self._ReadClassDefinition(
file_object, root_node.class_definition_branch_node_offset)
def _ReadNamespaceInstanceHierarchy(
self, file_object, root_node_offset, parent_namespace_segments):
"""Reads a namespace instance hierarchy.
Args:
file_object (file): file-like object.
root_node_offset (int): offset of the root node relative to the start of
the file.
parent_namespace_segments (list[str]): segments of the parent namespace.
Yields:
Instance: instance.
"""
node_cell = self._ReadNodeCell(file_object, root_node_offset - 4)
root_node = self._ReadInstanceRootNode(node_cell.data, root_node_offset)
if root_node.instance_branch_node_offset > 40:
instance = self._ReadInstance(
file_object, root_node.instance_branch_node_offset)
name_property = instance.properties.get('Name', None)
namespace_segments = list(parent_namespace_segments)
namespace_segments.append(name_property)
instance.namespace = '\\'.join(namespace_segments)
yield instance
if (root_node.child_objects_root_node_offset > 40 and
root_node.child_objects_root_node_offset != 0xffffffff):
for value_node_offset in self._ReadChildObjectsTree(
file_object, root_node.child_objects_root_node_offset):
if value_node_offset > 40:
node_cell = self._ReadNodeCell(file_object, value_node_offset - 4)
instance_leaf_value_node = self._ReadInstanceLeafValueNode(
node_cell.data, value_node_offset)
if instance_leaf_value_node.instance_root_node_offset > 40:
for instance in self._ReadNamespaceInstanceHierarchy(
file_object,
instance_leaf_value_node.instance_root_node_offset,
namespace_segments):
yield instance
def ReadClassDefinitions(self):
"""Reads class definitions.
Yields:
Instance: instance.
"""
if self._system_class_definition_root_node_offset > 40:
for instance in self._ReadClassDefinitionHierarchy(
self._file_object, self._system_class_definition_root_node_offset):
yield instance
def ReadInstances(self):
"""Reads instances.
Yields:
Instance: instance.
"""
if self._root_namespace_node_offset > 40:
for instance in self._ReadInstanceHierarchy(
self._file_object, self._root_namespace_node_offset):
yield instance
def ReadNamespaces(self):
"""Reads namespace instances.
Yields:
Instance: instance.
"""
if self._root_namespace_node_offset > 40:
for instance in self._ReadNamespaceInstanceHierarchy(
self._file_object, self._root_namespace_node_offset, []):
yield instance
def ReadFileObject(self, file_object):
"""Reads a mappings file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
file_object.seek(0, os.SEEK_END)
file_size = file_object.tell()
file_object.seek(0, os.SEEK_SET)
file_header = self._ReadFileHeader(file_object)
file_offset = 40
cell_number = 0
next_node_bin_offset = file_header.node_bin_size
self._root_namespace_node_offset = None
self._system_class_definition_root_node_offset = None
while file_offset < file_size:
if file_offset == next_node_bin_offset:
node_bin_header = self._ReadNodeBinHeader(file_object, file_offset)
file_offset += 4
next_node_bin_offset += node_bin_header.node_bin_size
node_cell = self._ReadNodeCell(
file_object, file_offset, cell_number=cell_number)
if node_cell.size == 0:
break
if file_header.root_namespace_cell_number == cell_number:
self._root_namespace_node_offset = file_offset + 4
elif file_header.system_class_cell_number == cell_number:
self._system_class_definition_root_node_offset = file_offset + 4
file_offset += node_cell.size & 0x7ffffff
cell_number += 1
if (not self._debug and self._root_namespace_node_offset is not None and
self._system_class_definition_root_node_offset is not None):
break
class CIMObject(data_format.BinaryDataFormat):
"""CIM object."""
# Using a class constant significantly speeds up the time required to load
# the dtFabric definition file.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('wmi_repository.yaml')
_CIM_DATA_TYPES = _FABRIC.CreateDataTypeMap('cim_data_types')
def _DebugPrintCIMString(self, cim_string, description):
"""Prints CIM string information.
Args:
cim_string (cim_string): CIM string.
description (str): description of the structure.
"""
description = ''.join([description[0].upper(), description[1:]])
self._DebugPrintValue(
f'{description:s} string flags', f'0x{cim_string.string_flags:02x}')
self._DebugPrintValue(f'{description:s} string', cim_string.string)
def _FormatIntegerAsDataType(self, integer):
"""Formats an integer as a data type.
Args:
integer (int): integer.
Returns:
str: integer formatted as a data type.
"""
data_type_string = self._CIM_DATA_TYPES.GetName(
integer & 0x3fff) or 'UNKNOWN'
# TODO: format flag 0x4000
return f'0x{integer:08x} ({data_type_string:s})'
def _ReadCIMString(
self, string_offset, record_data, record_data_offset, description):
"""Reads a CIM string.
Args:
string_offset (int): string offset.
record_data (bytes): record data.
record_data_offset (int): offset of the string data relative to
the start of the record data.
description (str): description of the structure.
Returns:
str: string.
Raises:
ParseError: if the qualifier value cannot be read.
"""
data_type_map = self._GetDataTypeMap('cim_string')
cim_string = self._ReadStructureFromByteStream(
record_data[string_offset:], record_data_offset + string_offset,
data_type_map, description)
if self._debug:
self._DebugPrintCIMString(cim_string, description)
return cim_string.string
class ClassDefinition(CIMObject):
"""Class definition.
Attributes:
name (str): name of the class.
properties (dict[str, ClassDefinitionProperty]): properties.
qualifiers (dict[str, object]): qualifiers.
super_class_name (str): name of the parent class.
"""
_DEBUG_INFO_CLASS_DEFINITION_BLOCK = [
('unknown1', 'Unknown1', '_FormatIntegerAsDecimal'),
('name_offset', 'Name offset', '_FormatIntegerAsOffset'),
('default_value_size', 'Default value size', '_FormatIntegerAsDecimal'),
('super_class_name_block_size', 'Super class name block size',
'_FormatIntegerAsDecimal'),
('super_class_name_block_data', 'Super class name block data',
'_FormatDataInHexadecimal'),
('qualifiers_block_size', 'Qualifiers block size',
'_FormatIntegerAsDecimal'),
('qualifiers_block_data', 'Qualifiers block data',
'_FormatDataInHexadecimal'),
('number_of_property_descriptors', 'Number of property descriptors',
'_FormatIntegerAsDecimal'),
('property_descriptors', 'Property descriptors',
'_FormatArrayOfPropertyDescriptors'),
('default_value_data', 'Default value data', '_FormatDataInHexadecimal'),
('values_data_size', 'Values data size',
'_FormatIntegerAsPropertiesBlockSize'),
('values_data', 'Values data', '_FormatDataInHexadecimal')]
_DEBUG_INFO_QUALIFIER_DESCRIPTOR = [
('name_offset', 'Name offset', '_FormatIntegerAsOffset'),
('unknown1', 'Unknown1', '_FormatIntegerAsHexadecimal2'),
('value_data_type', 'Value data type', '_FormatIntegerAsDataType'),
('value_boolean', 'Value', '_FormatIntegerAsDecimal'),
('value_floating_point', 'Value', '_FormatFloatingPoint'),
('value_integer', 'Value', '_FormatIntegerAsDecimal'),
('value_offset', 'Value offset', '_FormatIntegerAsOffset')]
_DEBUG_INFO_PROPERTY_DEFINITION = [
('value_data_type', ' Value data type', '_FormatIntegerAsDataType'),
('index', ' Index', '_FormatIntegerAsDecimal'),
('value_data_offset', ' Value data offset', '_FormatIntegerAsOffset'),
('level', ' Level', '_FormatIntegerAsDecimal'),
('qualifiers_block_size', ' Qualifiers block size',
'_FormatIntegerAsDecimal'),
('qualifiers_block_data', ' Qualifiers block data',
'_FormatDataInHexadecimal'),
('value_boolean', ' Value', '_FormatIntegerAsDecimal'),
('value_floating_point', ' Value', '_FormatFloatingPoint'),
('value_integer', ' Value', '_FormatIntegerAsDecimal'),
('value_offset', ' Value offset', '_FormatIntegerAsOffset')]
_PREDEFINED_NAMES = {
1: 'key',
3: 'read',
4: 'write',
6: 'provider',
7: 'dynamic',
10: 'type'}
def __init__(self, debug=False, output_writer=None):
"""Initializes a class definition.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(ClassDefinition, self).__init__(
debug=debug, output_writer=output_writer)
self.name = None
self.super_class_name = None
self.properties = {}
self.qualifiers = {}
def _FormatArrayOfPropertyDescriptors(self, array_of_property_descriptors):
"""Formats an array of property descriptors.
Args:
array_of_property_descriptors (list[property_descriptor]): array of
property descriptors.
Returns:
str: formatted array of property descriptors.
"""
lines = []
for index, property_descriptor in enumerate(array_of_property_descriptors):
value_string, _ = self._FormatIntegerAsOffset(
property_descriptor.name_offset)
line, _ = self._FormatValue(
f' Property descriptor: {index:d} name offset', value_string)
lines.append(line)
value_string, _ = self._FormatIntegerAsOffset(
property_descriptor.definition_offset)
line, _ = self._FormatValue(
f' Property descriptor: {index:d} definition offset', value_string)
lines.append(line)
return ''.join(lines)
def _FormatIntegerAsPropertiesBlockSize(self, integer):
"""Formats an integer as a properties block size.
Args:
integer (int): integer.
Returns:
str: integer formatted as a properties block size.
"""
size_value = integer & 0x7fffffff
return f'{size_value:d} (0x{integer:08x})'
def _ReadClassDefinitionMethods(self, class_definition_data):
"""Reads a class definition methods.
Args:
class_definition_data (bytes): class definition data.
Raises:
ParseError: if the class definition cannot be read.
"""
# TODO: set record_data_offset
record_data_offset = 0
if self._debug:
self._DebugPrintText((
f'Reading class definition methods at offset: {record_data_offset:d} '
f'(0x{record_data_offset:08x}).\n'))
data_type_map = self._GetDataTypeMap('class_definition_methods')
class_definition_methods = self._ReadStructureFromByteStream(
class_definition_data, record_data_offset, data_type_map,
'class definition methods')
methods_block_size = class_definition_methods.methods_block_size
if self._debug:
size_value = methods_block_size & 0x7fffffff
self._DebugPrintValue(
'Methods block size', f'{size_value:d} (0x{methods_block_size:08x})')
self._DebugPrintData(
'Methods block data', class_definition_methods.methods_block_data)
def _ReadClassDefinitionPropertyDefinition(
self, property_index, definition_offset, values_data, values_data_offset):
"""Reads a class definition property definition.
Args:
property_index (int): property index.
definition_offset (int): definition offset.
values_data (bytes): values data.
values_data_offset (int): offset of the values data relative to the start
of the record data.
Returns:
property_definition: property definition.
Raises:
ParseError: if the property name cannot be read.
"""
if self._debug:
self._DebugPrintText(f'Property: {property_index:d} definition:\n')
record_data_offset = values_data_offset + definition_offset
data_type_map = self._GetDataTypeMap('property_definition')
property_definition = self._ReadStructureFromByteStream(
values_data[definition_offset:], record_data_offset, data_type_map,
f'property: {property_index:d} definition')
if self._debug:
self._DebugPrintStructureObject(
property_definition, self._DEBUG_INFO_PROPERTY_DEFINITION)
return property_definition
def _ReadClassDefinitionPropertyName(
self, property_index, name_offset, values_data, values_data_offset):
"""Reads a class definition property name.
Args:
property_index (int): property index.
name_offset (int): name offset.
values_data (bytes): values data.
values_dataoffset (int): offset of the values data relative to the start
of the record data.
Returns:
str: property name.
Raises:
ParseError: if the property name cannot be read.
"""
if name_offset & 0x80000000:
name_index = name_offset & 0x7fffffff
property_name = self._PREDEFINED_NAMES.get(
name_index, f'UNKNOWN_{name_index:d}')
if self._debug:
self._DebugPrintValue(
f'Property: {property_index:d} name index', f'{name_index:d}')
self._DebugPrintValue(
f'Property: {property_index:d} name', property_name)
else:
property_name = self._ReadCIMString(
name_offset, values_data, values_data_offset,
f'property: {property_index:d} name')
return property_name
def _ReadClassDefinitionProperties(
self, property_descriptors, values_data, values_data_offset):
"""Reads class definition properties.
Args:
property_descriptors (list[property_descriptor]): property descriptors.
values_data (bytes): properties data.
values_data_offset (int): offset of the values data relative to the start
of the record data.
Returns:
dict[str, ClassDefinitionProperty]: properties.
Raises:
ParseError: if the properties cannot be read.
"""
if self._debug:
self._DebugPrintText('Reading class definition properties.\n')
properties = {}
for property_index, property_descriptor in enumerate(property_descriptors):
property_name = self._ReadClassDefinitionPropertyName(
property_index, property_descriptor.name_offset, values_data,
values_data_offset)
property_definition = self._ReadClassDefinitionPropertyDefinition(
property_index, property_descriptor.definition_offset, values_data,
values_data_offset)
qualifiers_block_offset = property_descriptor.definition_offset + 18
property_qualifiers = self._ReadQualifiers(
property_definition.qualifiers_block_data, qualifiers_block_offset,
values_data, values_data_offset)
class_definition_property = ClassDefinitionProperty()
class_definition_property.name = property_name
class_definition_property.index = property_definition.index
class_definition_property.value_data_offset = (
property_definition.value_data_offset)
class_definition_property.value_data_type = (
property_definition.value_data_type)
class_definition_property.qualifiers = property_qualifiers
properties[property_name] = class_definition_property
return properties
def _ReadQualifierName(
self, qualifier_index, name_offset, values_data, values_data_offset):
"""Reads a qualifier name.
Args:
qualifier_index (int): qualifier index.
name_offset (int): name offset.
values_data (bytes): values data.
values_data_offset (int): offset of the values data relative to the start
of the record data.
Returns:
str: qualifier name.
Raises:
ParseError: if the qualifier name cannot be read.
"""
if name_offset & 0x80000000:
name_index = name_offset & 0x7fffffff
qualifier_name = self._PREDEFINED_NAMES.get(
name_index, f'UNKNOWN_{name_index:d}')
if self._debug:
self._DebugPrintValue(
f'Qualifier: {qualifier_index:d} name index', f'{name_index:d}')
self._DebugPrintValue(
f'Qualifier: {qualifier_index:d} name', qualifier_name)
else:
qualifier_name = self._ReadCIMString(
name_offset, values_data, values_data_offset,
f'qualifier: {qualifier_index:d} name')
return qualifier_name
def _ReadQualifiers(
self, qualifiers_data, qualifiers_data_offset, values_data,
values_data_offset):
"""Reads qualifiers.
Args:
qualifiers_data (bytes): qualifiers data.
qualifiers_data_offset (int): offset of the qualifiers data relative
to the start of the record data.
values_data (bytes): values data.
values_data_offset (int): offset of the values data relative to the start
of the record data.
Returns:
dict[str, object]: qualifier names and values.
Raises:
ParseError: if the qualifiers cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading qualifiers at offset: {qualifiers_data_offset:d} '
f'(0x{qualifiers_data_offset:08x}).\n'))
qualifiers = {}
qualifiers_data_offset = 0
qualifier_index = 0
while qualifiers_data_offset < len(qualifiers_data):
record_data_offset = qualifiers_data_offset + qualifiers_data_offset
data_type_map = self._GetDataTypeMap('qualifier_descriptor')
context = dtfabric_data_maps.DataTypeMapContext()
qualifier_descriptor = self._ReadStructureFromByteStream(
qualifiers_data[qualifiers_data_offset:], record_data_offset,
data_type_map, 'qualifier descriptor', context=context)
if self._debug:
self._DebugPrintStructureObject(
qualifier_descriptor, self._DEBUG_INFO_QUALIFIER_DESCRIPTOR)
qualifier_name = self._ReadQualifierName(
qualifier_index, qualifier_descriptor.name_offset, values_data,
values_data_offset)
cim_data_type = self._CIM_DATA_TYPES.GetName(
qualifier_descriptor.value_data_type)
if cim_data_type == 'CIM-TYPE-BOOLEAN':
qualifier_value = qualifier_descriptor.value_boolean
elif cim_data_type in (
'CIM-TYPE-SINT16', 'CIM-TYPE-SINT32', 'CIM-TYPE-SINT8',
'CIM-TYPE-UINT8', 'CIM-TYPE-UINT16', 'CIM-TYPE-UINT16',
'CIM-TYPE-SINT64', 'CIM-TYPE-UINT64'):
qualifier_value = qualifier_descriptor.value_integer
elif cim_data_type in ('CIM-TYPE-REAL32', 'CIM-TYPE-REAL64'):
qualifier_value = qualifier_descriptor.value_floating_point
elif cim_data_type == 'CIM-TYPE-STRING':
qualifier_value = self._ReadCIMString(
qualifier_descriptor.value_offset, values_data, values_data_offset,
f'qualifier: {qualifier_index:d} value')
elif cim_data_type == 'CIM-TYPE-DATETIME':
# TODO: implement
qualifier_value = None
elif cim_data_type == 'CIM-TYPE-REFERENCE':
# TODO: implement
qualifier_value = None
elif cim_data_type == 'CIM-TYPE-CHAR16':
# TODO: implement
qualifier_value = None
else:
qualifier_value = None
if self._debug:
self._DebugPrintText('\n')
# TODO: preserve case of qualifier names?
qualifier_name = qualifier_name.lower()
qualifiers[qualifier_name] = qualifier_value
qualifiers_data_offset += context.byte_size
qualifier_index += 1
return qualifiers
def DebugPrint(self):
"""Prints class definition information."""
self._DebugPrintText('Class definition:\n')
self._DebugPrintValue(' Name', self.name)
if self.super_class_name:
self._DebugPrintValue(' Super class name', self.super_class_name)
for qualifier_name, qualifier_value in self.qualifiers.items():
self._DebugPrintValue(
f' Qualifier: {qualifier_name:s}', f'{qualifier_value!s}')
for property_name, class_definition_property in self.properties.items():
self._DebugPrintText(f' Property: {property_name:s}\n')
value_string, _ = self._FormatIntegerAsDecimal(
class_definition_property.index)
self._DebugPrintValue(' Index', value_string)
value_string, _ = self._FormatIntegerAsOffset(
class_definition_property.value_data_offset)
self._DebugPrintValue(' Value data offset', value_string)
for qualifier_name, qualifier_value in (
class_definition_property.qualifiers.items()):
self._DebugPrintValue(
f' Qualifier: {qualifier_name:s}', f'{qualifier_value!s}')
self._DebugPrintText('\n')
def IsAbstract(self):
"""Determines if the class is abstract.
Returns:
bool: True if abstract, False otherwise.
"""
return self.qualifiers.get('abstract', False)
def ReadClassDefinitionBlock(
self, class_definition_data, record_data_offset=0):
"""Reads a class definition block.
Args:
class_definition_data (bytes): class definition data.
record_data_offset (Optional[int]): offset of the class definition data
relative to the start of the record data.
Raises:
ParseError: if the class definition cannot be read.
"""
if self._debug:
self._DebugPrintText((
f'Reading class definition block at offset: {record_data_offset:d} '
f'(0x{record_data_offset:08x}).\n'))
data_type_map = self._GetDataTypeMap('class_definition_block')
class_definition_block = self._ReadStructureFromByteStream(
class_definition_data, record_data_offset, data_type_map,
'class definition block')
if self._debug:
self._DebugPrintStructureObject(
class_definition_block, self._DEBUG_INFO_CLASS_DEFINITION_BLOCK)
super_class_name_block_offset = record_data_offset + 13
qualifiers_block_offset = (
super_class_name_block_offset +
class_definition_block.super_class_name_block_size)
value_data_offset = (
qualifiers_block_offset +
class_definition_block.qualifiers_block_size + (
class_definition_block.number_of_property_descriptors * 8 ) +
class_definition_block.default_value_size + 4)
class_name = self._ReadCIMString(
class_definition_block.name_offset, class_definition_block.values_data,
value_data_offset, 'class name')
super_class_name = None
if class_definition_block.super_class_name_block_size > 4:
super_class_name = self._ReadCIMString(
0, class_definition_block.super_class_name_block_data,
super_class_name_block_offset, 'super class name')
class_qualifiers = {}
if class_definition_block.qualifiers_block_size > 4:
class_qualifiers = self._ReadQualifiers(
class_definition_block.qualifiers_block_data, qualifiers_block_offset,
class_definition_block.values_data, value_data_offset)
class_properties = self._ReadClassDefinitionProperties(
class_definition_block.property_descriptors,
class_definition_block.values_data, value_data_offset)
self.name = class_name
self.properties = class_properties
self.qualifiers = class_qualifiers
self.super_class_name = super_class_name
# TODO: complete handling methods
# data_offset = (
# 12 + (class_definition_object_record.super_class_name_size * 2) +
# class_definition_object_record.class_definition_block_size)
# if data_offset < len(object_record_data):
# if self._debug:
# self._DebugPrintData('Methods data', object_record_data[data_offset:])
# self._ReadClassDefinitionMethods(object_record_data[data_offset:])
class ClassDefinitionReference(CIMObject):
"""Class definition reference.
Attributes:
data (bytes): instance block data.
offset (int): offset of the instance block data.
super_class_name (str): name of the parent class.
"""
_DEBUG_INFO_CLASS_DEFINITION_OBJECT_RECORD = [
('super_class_name_size', 'Super class name size',
'_FormatIntegerAsDecimal'),
('super_class_name', 'Super class name', '_FormatString'),
('date_time', 'Unknown date and time', '_FormatIntegerAsFiletime'),
('class_definition_block_size', 'Class definition block size',
'_FormatIntegerAsDecimal'),
('class_definition_block_data', 'Class definition block data',
'_FormatDataInHexadecimal')]
def __init__(self, debug=False, output_writer=None):
"""Initializes an instance reference.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(ClassDefinitionReference, self).__init__()
self.data = None
self.offset = None
self.super_class_name = None
def ReadObjectRecord(self, object_record_data):
"""Reads an instance reference from object record data.
Args:
object_record_data (bytes): object record data.
Raises:
ParseError: if the instance reference cannot be read.
"""
if self._debug:
self._DebugPrintText('Reading class definition object record.\n')
self._DebugPrintData('Object record data', object_record_data)
data_type_map = self._GetDataTypeMap('class_definition_object_record')
context = dtfabric_data_maps.DataTypeMapContext()
class_definition_object_record = self._ReadStructureFromByteStream(
object_record_data, 0, data_type_map,
'class definition object record', context=context)
if self._debug:
self._DebugPrintStructureObject(
class_definition_object_record,
self._DEBUG_INFO_CLASS_DEFINITION_OBJECT_RECORD)
self.data = class_definition_object_record.class_definition_block_data
self.offset = context.byte_size
self.super_class_name = class_definition_object_record.super_class_name
class InstanceReference(CIMObject):
"""Instance reference.
Attributes:
class_name(str): class name.
class_name_hash (str): hash of the class name.
data (bytes): instance block data.
offset (int): offset of the instance block data.
"""
_DEBUG_INFO_INSTANCE_OBJECT_RECORD = [
('class_name_hash', 'Class name hash', '_FormatString'),
('date_time1', 'Unknown date and time1', '_FormatIntegerAsFiletime'),
('date_time2', 'Unknown date and time2', '_FormatIntegerAsFiletime'),
('instance_block_size', 'Instance block size', '_FormatIntegerAsDecimal'),
('instance_block_data', 'Instance block data',
'_FormatDataInHexadecimal')]
def __init__(self, format_version, debug=False, output_writer=None):
"""Initializes an instance reference.
Args:
format_version (str): format version.
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(InstanceReference, self).__init__()
self._format_version = format_version
self.class_name = None
self.class_name_hash = None
self.data = None
self.offset = None
def ReadObjectRecord(self, object_record_data):
"""Reads an instance reference from object record data.
Args:
object_record_data (bytes): object record data.
Raises:
ParseError: if the instance reference cannot be read.
"""
if self._debug:
self._DebugPrintText('Reading instance object record.\n')
self._DebugPrintData('Object record data', object_record_data)
if self._format_version == '2.1':
data_type_map = self._GetDataTypeMap('instance_object_record_v1')
else:
data_type_map = self._GetDataTypeMap('instance_object_record_v2')
instance_object_record = self._ReadStructureFromByteStream(
object_record_data, 0, data_type_map, 'instance object record')
if self._debug:
self._DebugPrintStructureObject(
instance_object_record, self._DEBUG_INFO_INSTANCE_OBJECT_RECORD)
if self._format_version == '2.1':
self.offset = 84
else:
self.offset = 144
self.class_name_hash = instance_object_record.class_name_hash
self.data = instance_object_record.instance_block_data
class Instance(CIMObject):
"""Instance.
Attributes:
class_name (str): class name.
class_name_hash (str): hash of the class name.
namespace (str): namespace.
properties (dict[str, object]): instance property names and values.
"""
_DEBUG_INFO_INSTANCE_BLOCK = [
('class_name_offset', 'Class name offset', '_FormatIntegerAsOffset'),
('unknown1', 'Unknown1', '_FormatIntegerAsHexadecimal2'),
('property_state_bits', 'Property state bits',
'_FormatDataInHexadecimal'),
('property_values_data', 'Property values data',
'_FormatDataInHexadecimal'),
('qualifiers_block_size', 'Qualifiers block size',
'_FormatIntegerAsDecimal'),
('qualifiers_block_data', 'Qualifiers block data',
'_FormatDataInHexadecimal'),
('dynamic_block_type', 'Dynamic block type', '_FormatIntegerAsDecimal'),
('dynamic_block_value1', 'Dynamic block value1',
'_FormatIntegerAsHexadecimal8')]
_DEBUG_INFO_DYNAMIC_TYPE2_HEADER = [
('number_of_entries', 'Number of entries', '_FormatIntegerAsDecimal')]
_DEBUG_INFO_DYNAMIC_TYPE2_ENTRY = [
('data_size', 'Data size', '_FormatIntegerAsDecimal'),
('data', 'Data', '_FormatDataInHexadecimal')]
_FIXED_SIZE_VALUE_DATA_TYPES = frozenset([
0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x0000000b, 0x00000010,
0x00000011, 0x00000012, 0x00000013, 0x00000014, 0x00000015])
_STRING_VALUE_DATA_TYPES = frozenset([0x00000008, 0x00000065, 0x00000066])
def __init__(self, debug=False, output_writer=None):
"""Initializes an instance.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(Instance, self).__init__(debug=debug, output_writer=output_writer)
self.class_name = None
self.class_name_hash = None
self.namespace = None
self.properties = {}
def DebugPrint(self):
"""Prints instance information."""
self._DebugPrintText('Instance:\n')
if self.namespace:
self._DebugPrintValue(' Namespace', self.namespace)
self._DebugPrintValue(' Class name', self.class_name)
if self.class_name_hash:
self._DebugPrintValue(' Class name hash', self.class_name_hash)
for property_name, property_value in self.properties.items():
self._DebugPrintValue(
f' Property: {property_name:s}', f'{property_value!s}')
self._DebugPrintText('\n')
def ReadInstanceBlockData(
self, class_value_data_map, instance_data, record_data_offset=0):
"""Reads the instance block data.
Args:
class_value_data_map (ClassValueDataMap): the class value data map.
instance_data (bytes): instance data.
record_data_offset (Optional[int]): offset of the class definition data
relative to the start of the record data.
Raises:
ParseError: if the instance block data cannot be read.
"""
data_type_map = self._GetDataTypeMap('instance_block')
# 2 state bits per property, stored byte aligned.
number_of_properties = len(class_value_data_map.properties)
property_state_bits_size, remainder = divmod(number_of_properties, 4)
if remainder > 0:
property_state_bits_size += 1
if self._debug:
value_string, _ = self._FormatIntegerAsDecimal(property_state_bits_size)
self._DebugPrintValue('Property state bits size', value_string)
value_string, _ = self._FormatIntegerAsDecimal(
class_value_data_map.properties_size)
self._DebugPrintValue('Property values data size', value_string)
self._DebugPrintText('\n')
context = dtfabric_data_maps.DataTypeMapContext(values={
'property_state_bits_size': property_state_bits_size,
'property_values_data_size': class_value_data_map.properties_size})
instance_block = self._ReadStructureFromByteStream(
instance_data, record_data_offset, data_type_map,
'instance block', context=context)
if self._debug:
self._DebugPrintStructureObject(
instance_block, self._DEBUG_INFO_INSTANCE_BLOCK)
data_offset = context.byte_size
if instance_block.dynamic_block_type == 2:
data_type_map = self._GetDataTypeMap(
'instance_block_dynamic_type2_header')
dynamic_type2_header = self._ReadStructureFromByteStream(
instance_data[data_offset:], record_data_offset + data_offset,
data_type_map, 'dynamic block type 2 header')
if self._debug:
self._DebugPrintText('Dynamic type 2 header\n')
self._DebugPrintStructureObject(
dynamic_type2_header, self._DEBUG_INFO_DYNAMIC_TYPE2_HEADER)
data_offset += 4
data_type_map = self._GetDataTypeMap('instance_block_dynamic_type2_entry')
for index in range(dynamic_type2_header.number_of_entries):
context = dtfabric_data_maps.DataTypeMapContext()
dynamic_type2_entry = self._ReadStructureFromByteStream(
instance_data[data_offset:], record_data_offset + data_offset,
data_type_map, 'dynamic block type 2 entry', context=context)
if self._debug:
self._DebugPrintText(f'Dynamic type 2 entry: {index:d}\n')
self._DebugPrintStructureObject(
dynamic_type2_entry, self._DEBUG_INFO_DYNAMIC_TYPE2_ENTRY)
data_offset += context.byte_size
data_type_map = self._GetDataTypeMap('uint32le')
unknown_offset = self._ReadStructureFromByteStream(
instance_data[data_offset:], record_data_offset + data_offset,
data_type_map, 'unknown offset')
if self._debug:
value_string, _ = self._FormatIntegerAsOffset(unknown_offset)
self._DebugPrintValue('Unknown offset', value_string)
data_offset += 4
values_data = instance_data[data_offset:]
if self._debug:
self._DebugPrintData('Values data', values_data)
self.class_name = self._ReadCIMString(
instance_block.class_name_offset, values_data, data_offset,
'class name')
property_values_data = instance_block.property_values_data
property_values_data_offset = 5 + len(instance_block.property_state_bits)
for property_value_data_map in class_value_data_map.properties.values():
property_map_offset = property_value_data_map.offset
description = (
f'property: {property_value_data_map.name:s} '
f'value: {property_value_data_map.type_qualifier:s}')
property_value = None
if property_value_data_map.data_type in self._FIXED_SIZE_VALUE_DATA_TYPES:
data_type_map_name = (
f'property_value_{property_value_data_map.type_qualifier:s}')
data_type_map = self._GetDataTypeMap(data_type_map_name)
property_value = self._ReadStructureFromByteStream(
property_values_data[property_map_offset:],
property_values_data_offset + property_map_offset, data_type_map,
description)
if self._debug:
description = (
f'Property: {property_value_data_map.name:s} value: '
f'{property_value_data_map.type_qualifier:s}')
self._DebugPrintValue(description, property_value)
elif property_value_data_map.data_type in self._STRING_VALUE_DATA_TYPES:
description = (
f'Property: {property_value_data_map.name:s} value: string offset')
data_type_map = self._GetDataTypeMap('property_value_offset')
string_offset = self._ReadStructureFromByteStream(
property_values_data[property_map_offset:],
property_values_data_offset + property_map_offset, data_type_map,
description)
if self._debug:
self._DebugPrintValue(description, string_offset)
# A string offset of 0 appears to indicate not set.
if string_offset > 0:
description = f'property: {property_value_data_map.name:s} value'
property_value = self._ReadCIMString(
string_offset, values_data, data_offset, description)
elif property_value_data_map.data_type == 0x00002008:
description = (
f'Property: {property_value_data_map.name:s} value: string '
f'array offset')
data_type_map = self._GetDataTypeMap('property_value_offset')
string_array_offset = self._ReadStructureFromByteStream(
property_values_data[property_map_offset:],
property_values_data_offset + property_map_offset, data_type_map,
description)
if self._debug:
self._DebugPrintValue(description, string_array_offset)
# A string array offset of 0 appears to indicate not set.
if string_array_offset > 0:
description = (
f'Property: {property_value_data_map.name:s} value: string '
f'array')
data_type_map = self._GetDataTypeMap('cim_string_array')
string_array = self._ReadStructureFromByteStream(
values_data[string_array_offset:],
data_offset + string_array_offset, data_type_map, description)
property_value = []
for string_index, string_offset in enumerate(
string_array.string_offsets):
description = (
f'property: {property_value_data_map.name:s} value entry: '
f'{string_index:d}')
string_value = self._ReadCIMString(
string_offset, values_data, data_offset, description)
property_value.append(string_value)
else:
description = (
f'Property: {property_value_data_map.name:s} value: array offset')
data_type_map = self._GetDataTypeMap('property_value_offset')
array_offset = self._ReadStructureFromByteStream(
property_values_data[property_map_offset:],
property_values_data_offset + property_map_offset, data_type_map,
description)
if self._debug:
self._DebugPrintValue(description, array_offset)
self.properties[property_value_data_map.name] = property_value
if self._debug:
self._DebugPrintText('\n')
class Registration(CIMObject):
"""Registration.
Attributes:
name (str): name of the registration.
"""
_DEBUG_INFO_REGISTRATION_OBJECT_RECORD = [
('name_space_string_size', 'Name space string size',
'_FormatIntegerAsDecimal'),
('name_space_string', 'Name space string', '_FormatString'),
('class_name_string_size', 'Class name string size',
'_FormatIntegerAsDecimal'),
('class_name_string', 'Class name string', '_FormatString'),
('instance_name_string_size', 'Instance name string size',
'_FormatIntegerAsDecimal'),
('instance_name_string', 'Instance name string', '_FormatString'),
('index_key_string_size', 'Index key string size',
'_FormatIntegerAsDecimal'),
('index_key_string', 'Index key string', '_FormatString')]
def __init__(self, debug=False, output_writer=None):
"""Initializes a registration.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(Registration, self).__init__(debug=debug, output_writer=output_writer)
self.name = None
def ReadObjectRecord(self, object_record_data):
"""Reads a registration from object record data.
Args:
object_record_data (bytes): object record data.
Raises:
ParseError: if the registration cannot be read.
"""
if self._debug:
self._DebugPrintText('Reading registration object record.\n')
self._DebugPrintData('Object record data', object_record_data)
data_type_map = self._GetDataTypeMap('registration_object_record')
registration_object_record = self._ReadStructureFromByteStream(
object_record_data, 0, data_type_map, 'registration object record')
if self._debug:
self._DebugPrintStructureObject(
registration_object_record,
self._DEBUG_INFO_REGISTRATION_OBJECT_RECORD)
class CIMRepository(data_format.BinaryDataFormat):
"""A CIM repository.
Attributes:
format_version (str): format version.
"""
# Using a class constant significantly speeds up the time required to load
# the dtFabric definition file.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('wmi_repository.yaml')
_KEY_SEGMENT_SEPARATOR = '\\'
_KEY_VALUE_SEPARATOR = '.'
_KEY_VALUE_PAGE_NUMBER_INDEX = 1
_KEY_VALUE_RECORD_IDENTIFIER_INDEX = 2
_KEY_VALUE_DATA_SIZE_INDEX = 3
_COMMON_NAMESPACES = [
'__SystemClass',
'ROOT',
'ROOT\\Appv',
'ROOT\\CIMV2',
'ROOT\\CIMV2\\Applications',
'ROOT\\CIMV2\\Applications\\MicrosoftIE',
'ROOT\\CIMV2\\mdm',
'ROOT\\CIMV2\\mdm\\dmmap',
'ROOT\\CIMV2\\power',
'ROOT\\CIMV2\\Security',
'ROOT\\CIMV2\\Security\\MicrosoftTpm',
'ROOT\\CIMV2\\Security\\MicrosoftVolumeEncryption',
'ROOT\\CIMV2\\TerminalServices',
'ROOT\\Cli',
'ROOT\\dcim',
'ROOT\\dcim\\sysman',
'ROOT\\dcim\\sysman\\biosattributes',
'ROOT\\dcim\\sysman\\wmisecurity',
'ROOT\\DEFAULT',
'ROOT\\directory',
'ROOT\\directory\\LDAP',
'ROOT\\Hardware',
'ROOT\\Intel_ME',
'ROOT\\Interop',
'ROOT\\Microsoft',
'ROOT\\Microsoft\\HomeNet',
'ROOT\\Microsoft\\protectionManagement',
'ROOT\\Microsoft\\SecurityClient',
'ROOT\\Microsoft\\Uev',
'ROOT\\Microsoft\\Windows',
'ROOT\\Microsoft\\Windows\\AppBackgroundTask',
'ROOT\\Microsoft\\Windows\\CI',
'ROOT\\Microsoft\\Windows\\Defender',
'ROOT\\Microsoft\\Windows\\DeliveryOptimization',
'ROOT\\Microsoft\\Windows\\DesiredStateConfiguration',
'ROOT\\Microsoft\\Windows\\DesiredStateConfigurationProxy',
'ROOT\\Microsoft\\Windows\\DeviceGuard',
'ROOT\\Microsoft\\Windows\\dfsn',
'ROOT\\Microsoft\\Windows\\DHCP',
'ROOT\\Microsoft\\Windows\\Dns',
'ROOT\\Microsoft\\Windows\\EventTracingManagement',
'ROOT\\Microsoft\\Windows\\HardwareManagement',
'ROOT\\Microsoft\\Windows\\Hgs',
'ROOT\\Microsoft\\Windows\\Powershellv3',
'ROOT\\Microsoft\\Windows\\PS_MMAgent',
'ROOT\\Microsoft\\Windows\\RemoteAccess',
'ROOT\\Microsoft\\Windows\\RemoteAccess\\Client',
'ROOT\\Microsoft\\Windows\\SMB',
'ROOT\\Microsoft\\Windows\\SmbWitness',
'ROOT\\Microsoft\\Windows\\Storage',
'ROOT\\Microsoft\\Windows\\Storage\\Providers_v2',
'ROOT\\Microsoft\\Windows\\Storage\\PT',
'ROOT\\Microsoft\\Windows\\Storage\\PT\\Alt',
'ROOT\\Microsoft\\Windows\\StorageReplica',
'ROOT\\Microsoft\\Windows\\TaskScheduler',
'ROOT\\Microsoft\\Windows\\Wdac',
'ROOT\\Microsoft\\Windows\\WindowsUpdate',
'ROOT\\Microsoft\\Windows\\winrm',
'ROOT\\MSAPPS10',
'ROOT\\msdtc',
'ROOT\\MSPS',
'ROOT\\nap',
'ROOT\\NetFrameworkv1',
'ROOT\\PEH',
'ROOT\\Policy',
'ROOT\\RSOP',
'ROOT\\RSOP\\Computer',
'ROOT\\RSOP\\User',
'ROOT\\SECURITY',
'ROOT\\SecurityCenter',
'ROOT\\SecurityCenter2',
'ROOT\\ServiceModel',
'ROOT\\StandardCimv2',
'ROOT\\StandardCimv2\\embedded',
'ROOT\\subscription',
'ROOT\\WMI']
def __init__(self, debug=False, file_system_helper=None, output_writer=None):
"""Initializes a CIM repository.
Args:
debug (Optional[bool]): True if debug information should be written.
file_system_helper (Optional[FileSystemHelper]): file system helper.
output_writer (Optional[OutputWriter]): output writer.
"""
if not file_system_helper:
file_system_helper = file_system.NativeFileSystemHelper()
super(CIMRepository, self).__init__()
self._debug = debug
self._class_definitions_by_hash = {}
self._class_value_data_map_by_hash = {}
self._file_system_helper = file_system_helper
self._index_binary_tree_file = None
self._index_mapping_table = None
self._index_root_page = None
self._namespace_instances = []
self._objects_data_file = None
self._objects_mapping_table = None
self._output_writer = output_writer
self._repository_file = None
self.format_version = None
def _DebugPrintText(self, text):
"""Prints text for debugging.
Args:
text (str): text.
"""
if self._output_writer:
self._output_writer.WriteText(text)
def _FormatFilenameAsGlob(self, filename):
"""Formats the filename as a case-insensitive glob.
Args:
filename (str): name of the file.
Returns:
str: case-insensitive glob of representation the filename.
"""
glob_parts = []
for character in filename:
if character.isalpha():
character_upper = character.upper()
character_lower = character.lower()
glob_part = f'[{character_upper:s}{character_lower:s}]'
else:
glob_part = character
glob_parts.append(glob_part)
return ''.join(glob_parts)
def _GetActiveMappingFile(self, path):
"""Retrieves the active mapping file.
Args:
path (str): path to the CIM repository.
Returns:
MappingFile: mapping file or None if not available.
Raises:
ParseError: if the mapping version file cannot be read.
"""
mapping_ver_file_number = None
file_object = self._OpenMappingVersionFile(path)
if file_object:
data_type_map = self._GetDataTypeMap('uint32le')
try:
mapping_ver_file_number, _ = self._ReadStructureFromFileObject(
file_object, 0, data_type_map, 'Mapping.ver')
finally:
file_object.close()
if self._debug:
self._DebugPrintText(
f'Mapping.ver file number: {mapping_ver_file_number:d}\n')
active_mapping_file = None
active_mapping_file_number = None
# Unsure how reliable this method is since multiple index[1-3].map files
# can have the same sequence number but contain different mappings.
for mapping_file_number in range(1, 4):
filename_as_glob, _ = self._FormatFilenameAsGlob(
f'mapping{mapping_file_number:d}.map')
path_with_glob = self._file_system_helper.JoinPath([
path, filename_as_glob])
mapping_file_glob = glob.glob(path_with_glob)
if not mapping_file_glob:
continue
if self._debug:
self._DebugPrintText(f'Reading: {mapping_file_glob[0]:s}\n')
mapping_file = MappingFile(
debug=self._debug, output_writer=self._output_writer)
# TODO: change to only read limited information.
mapping_file.Open(mapping_file_glob[0])
if not active_mapping_file:
active_mapping_file = mapping_file
active_mapping_file_number = mapping_file_number
elif mapping_file.sequence_number > active_mapping_file.sequence_number:
active_mapping_file.Close()
active_mapping_file = mapping_file
active_mapping_file_number = mapping_file_number
if (mapping_ver_file_number is not None and
mapping_ver_file_number != active_mapping_file_number):
logging.warning('Mismatch in active mapping file number.')
if self._debug:
self._DebugPrintText(
f'Active mapping file: mapping{active_mapping_file_number:d}.map\n')
return active_mapping_file
def _GetClassDefinitionByName(self, class_name):
"""Retrieves a class definition by name.
Args:
class_name (str): name of the class definition.
Returns:
ClassDefinition: class definitions or None.
"""
class_name_hash = self._GetHashFromString(class_name)
return self._GetClassDefinitionByHash(class_name_hash)
def _GetClassDefinitionByHash(self, class_name_hash):
"""Retrieves a class definition by hash of the name.
Args:
class_name_hash (str): hash of the class name.
Returns:
ClassDefinition: class definitions or None.
"""
# TODO: change to resolve on demand and cache the resulting class
# definition.
class_definition = self._class_definitions_by_hash.get(
class_name_hash.lower(), None)
return class_definition
def _GetClassValueMapByHash(self, class_name_hash):
"""Retrieves a class value map by hash of the name.
Args:
class_name_hash (str): hash of the class name.
Returns:
ClassValueMap: class value map or None.
Raises:
RuntimeError: if a class definition cannot be found.
"""
lookup_key = class_name_hash.lower()
class_value_data_map = self._class_value_data_map_by_hash.get(
lookup_key, None)
if not class_value_data_map:
class_definition = self._GetClassDefinitionByHash(class_name_hash)
if not class_definition:
raise RuntimeError((
f'Unable to retrieve definition of class with hash: '
f'{class_name_hash:s}'))
class_definitions = [class_definition]
while class_definition.super_class_name:
class_definition = self._GetClassDefinitionByName(
class_definition.super_class_name)
if not class_definition:
raise RuntimeError((
f'Unable to retrieve definition of class with name: '
f'{class_definition.super_class_name:s}'))
class_definitions.append(class_definition)
# The ClassValueDataMap.Build functions want the class definitions
# starting the with the base class first.
class_definitions.reverse()
if self._debug:
for class_definition in class_definitions:
class_definition.DebugPrint()
class_value_data_map = ClassValueDataMap()
class_value_data_map.Build(class_definitions)
self._class_value_data_map_by_hash[lookup_key] = class_value_data_map
return class_value_data_map
def _GetHashFromString(self, string):
"""Retrieves the hash of a string.
Args:
string (str): string to hash.
Returns:
str: hash of the string.
"""
string_data = string.upper().encode('utf-16-le')
if self.format_version in ('2.0', '2.1'):
string_hash = hashlib.md5(string_data)
else:
string_hash = hashlib.sha256(string_data)
return string_hash.hexdigest()
def _GetIndexPageByMappedPageNumber(self, mapped_page_number):
"""Retrieves a specific index page by mapped page number.
Args:
mapped_page_number (int): mapped page number.
Returns:
IndexBinaryTreePage: an index binary-tree page or None.
"""
page_number = self._index_mapping_table.ResolveMappedPageNumber(
mapped_page_number)
index_page = self._index_binary_tree_file.GetPage(page_number)
if not index_page:
logging.warning(
f'Unable to read index binary-tree page: {page_number:d}.')
return None
return index_page
def _GetIndexFirstMappedPage(self):
"""Retrieves the index first mapped page.
Returns:
IndexBinaryTreePage: an index binary-tree page or None.
Raises:
RuntimeError: if the index first mapped page could not be determined.
"""
page_number = self._index_mapping_table.ResolveMappedPageNumber(0)
index_page = self._index_binary_tree_file.GetPage(page_number)
if not index_page:
raise RuntimeError((
f'Unable to determine first mapped index binary-tree page: '
f'{page_number:d}.'))
if index_page.page_type != 0xaddd:
raise RuntimeError((
f'Unsupported first mapped index binary-tree page type: '
f'0x{index_page.page_type:04x}'))
return index_page
def _GetIndexRootPage(self):
"""Retrieves the index root page.
Returns:
IndexBinaryTreePage: an index binary-tree page or None.
"""
if not self._index_root_page:
if self.format_version == '2.1':
first_mapped_page = self._GetIndexFirstMappedPage()
root_page_number = first_mapped_page.root_page_number
else:
root_page_number = 1
page_number = self._index_mapping_table.ResolveMappedPageNumber(
root_page_number)
index_page = self._index_binary_tree_file.GetPage(page_number)
if not index_page:
logging.warning(
f'Unable to read index binary-tree root page: {page_number:d}.')
return None
self._index_root_page = index_page
return self._index_root_page
def _GetKeysFromIndexPage(self, index_page):
"""Retrieves the keys from an index page.
Yields:
str: a CIM key.
"""
if index_page:
for key in index_page.keys:
yield key
for mapped_page_number in index_page.sub_pages:
sub_index_page = self._GetIndexPageByMappedPageNumber(
mapped_page_number)
for key in self._GetKeysFromIndexPage(sub_index_page):
yield key
def _GetObjectsPageByMappedPageNumber(self, mapped_page_number, is_data_page):
"""Retrieves a specific objects page by mapped page number.
Args:
mapped_page_number (int): mapped page number.
is_data_page (bool): True if the page is a data page.
Returns:
ObjectsDataPage: objects data page or None.
"""
page_number = self._objects_mapping_table.ResolveMappedPageNumber(
mapped_page_number)
objects_page = self._objects_data_file.GetPage(page_number, is_data_page)
if not objects_page:
logging.warning(f'Unable to read objects data page: {page_number:d}.')
return None
return objects_page
def _GetObjectRecord(
self, data_type, mapped_page_number, record_identifier, data_size):
"""Retrieves a specific object record.
Args:
data_type (str): key data type.
mapped_page_number (int): mapped page number.
record_identifier (int): record identifier.
data_size (int): data size.
Returns:
ObjectRecord: an object record or None.
Raises:
ParseError: if the objects records could not be parsed.
RuntimeError: if the objects data file was not opened.
"""
if not self._objects_data_file:
raise RuntimeError('Objects.data file was not opened.')
data_segments = []
is_data_page = False
data_segment_index = 0
while data_size > 0:
object_page = self._GetObjectsPageByMappedPageNumber(
mapped_page_number, is_data_page)
if not object_page:
raise errors.ParseError((
f'Unable to read objects record: {record_identifier:d} data '
f'segment: {data_segment_index:d}.'))
if not is_data_page:
object_descriptor = object_page.GetObjectDescriptor(
record_identifier, data_size)
data_offset = object_descriptor.data_offset
is_data_page = True
else:
data_offset = 0
data_segment = self._objects_data_file.ReadObjectRecordDataSegment(
object_page, data_offset, data_size)
if not data_segment:
raise errors.ParseError((
f'Unable to read objects record: {record_identifier:d} data '
f'segment: {data_segment_index:d}.'))
data_segments.append(data_segment)
data_size -= len(data_segment)
data_segment_index += 1
mapped_page_number += 1
object_record_data = b''.join(data_segments)
return ObjectRecord(data_type, object_record_data)
def _GetObjectRecordValuesFromKey(self, key_segment):
"""Retrieves the object record values from a key segment.
Args:
key_segment (str): an index key segment.
Returns:
tuple[str, str, int, int, int]: data type, name hash, page number, record
identifier and record data size of an object record or None.
"""
if self._KEY_VALUE_SEPARATOR not in key_segment:
return None, None, None, None, None
key_values = key_segment.split(self._KEY_VALUE_SEPARATOR)
if not len(key_values) == 4:
logging.warning('Unsupported number of key values.')
return None, None, None, None, None
data_type, _, name_hash = key_values[0].partition('_')
name_hash = name_hash.lower()
try:
page_number = int(key_values[self._KEY_VALUE_PAGE_NUMBER_INDEX], 10)
except ValueError:
logging.warning('Unsupported key value page number.')
return None, None, None, None, None
try:
record_identifier = int(
key_values[self._KEY_VALUE_RECORD_IDENTIFIER_INDEX], 10)
except ValueError:
logging.warning('Unsupported key value record identifier.')
return None, None, None, None, None
try:
data_size = int(key_values[self._KEY_VALUE_DATA_SIZE_INDEX], 10)
except ValueError:
logging.warning('Unsupported key value data size.')
return None, None, None, None, None
return data_type, name_hash, page_number, record_identifier, data_size
def _OpenIndexBinaryTreeFile(self, path):
"""Opens an index binary tree.
Args:
path (str): path to the CIM repository.
Returns:
IndexBinaryTreeFile: index binary tree file or None if not available.
"""
filename_as_glob, _ = self._FormatFilenameAsGlob('index.btr')
index_binary_tree_file_glob = self._file_system_helper.JoinPath([
path, filename_as_glob])
index_binary_tree_file_path = glob.glob(index_binary_tree_file_glob)
if not index_binary_tree_file_path:
return None
if self._debug:
self._DebugPrintText(f'Reading: {index_binary_tree_file_path[0]:s}\n')
index_binary_tree_file = IndexBinaryTreeFile(
debug=self._debug, output_writer=self._output_writer)
index_binary_tree_file.Open(index_binary_tree_file_path[0])
return index_binary_tree_file
def _OpenMappingFile(self, path, filename):
"""Opens a mapping file.
Args:
path (str): path to the CIM repository.
filename (str): mapping file name.
Returns:
MappingFile: mapping file or None if not available.
"""
filename_as_glob, _ = self._FormatFilenameAsGlob(filename)
mapping_file_glob = self._file_system_helper.JoinPath([
path, filename_as_glob])
mapping_file_path = glob.glob(mapping_file_glob)
if not mapping_file_path:
return None
if self._debug:
self._DebugPrintText(f'Reading: {mapping_file_path[0]:s}\n')
mapping_file = MappingFile(
debug=self._debug, output_writer=self._output_writer)
mapping_file.Open(mapping_file_path[0])
return mapping_file
def _OpenMappingVersionFile(self, path):
"""Opens a mapping version file.
Args:
path (str): path to the CIM repository.
Returns:
file: file-like object or None if not available.
"""
filename_as_glob, _ = self._FormatFilenameAsGlob('mapping.ver')
mapping_version_file_glob = self._file_system_helper.JoinPath([
path, filename_as_glob])
mapping_version_file_path = glob.glob(mapping_version_file_glob)
if not mapping_version_file_path:
return None
return open(mapping_version_file_path[0], 'rb') # pylint: disable=consider-using-with
def _OpenObjectsDataFile(self, path):
"""Opens an objects data file.
Args:
path (str): path to the CIM repository.
Returns:
ObjectsDataFile: objects data file or None if not available.
"""
filename_as_glob, _ = self._FormatFilenameAsGlob('objects.data')
objects_data_file_glob = self._file_system_helper.JoinPath([
path, filename_as_glob])
objects_data_file_path = glob.glob(objects_data_file_glob)
if not objects_data_file_path:
return None
if self._debug:
self._DebugPrintText(f'Reading: {objects_data_file_path[0]:s}\n')
objects_data_file = ObjectsDataFile(
debug=self._debug, output_writer=self._output_writer)
objects_data_file.Open(objects_data_file_path[0])
return objects_data_file
def _OpenRepositoryFile(self, path):
"""Opens a repository file.
Args:
path (str): path to the CIM repository.
Returns:
RepositoryFile: repository file or None if not available.
"""
filename_as_glob, _ = self._FormatFilenameAsGlob('cim.rep')
repository_file_glob = self._file_system_helper.JoinPath([
path, filename_as_glob])
repository_file_path = glob.glob(repository_file_glob)
if not repository_file_path:
return None
if self._debug:
self._DebugPrintText(f'Reading: {repository_file_path[0]:s}\n')
repository_file = RepositoryFile(
debug=self._debug, output_writer=self._output_writer)
repository_file.Open(repository_file_path[0])
return repository_file
def _ReadClassDefinitionObjectRecords(self):
"""Reads class definition object records.
Yields:
tuple[str, ObjectRecord]: name hash and class definition object record.
"""
index_page = self._GetIndexRootPage()
for key in self._GetKeysFromIndexPage(index_page):
key_segments = key.split(self._KEY_SEGMENT_SEPARATOR)
data_type, name_hash, mapped_page_number, record_identifier, data_size = (
self._GetObjectRecordValuesFromKey(key_segments[-1]))
if data_type != 'CD':
continue
object_record = self._GetObjectRecord(
data_type, mapped_page_number, record_identifier, data_size)
yield name_hash, object_record
def _ReadClassDefinitionsFromObjectRecords(self):
"""Reads the class definitions from object records."""
for name_hash, object_record in self._ReadClassDefinitionObjectRecords():
class_definition_reference = ClassDefinitionReference(
debug=self._debug, output_writer=self._output_writer)
class_definition_reference.ReadObjectRecord(object_record.data)
class_definition = ClassDefinition(
debug=self._debug, output_writer=self._output_writer)
class_definition.ReadClassDefinitionBlock(
class_definition_reference.data,
record_data_offset=class_definition_reference.offset)
self._class_definitions_by_hash[name_hash] = class_definition
if self._debug:
self._DebugPrintText('Class definitions:\n')
for class_definition in self._class_definitions_by_hash.values():
class_definition.DebugPrint()
def _ReadInstance(self, instance_reference):
"""Reads an instance.
Args:
instance_reference (InstanceReference): instance reference.
Returns:
Instance: instance.
"""
class_name_hash = instance_reference.class_name_hash
if not class_name_hash:
class_name_hash = self._GetHashFromString(instance_reference.class_name)
class_value_data_map = self._GetClassValueMapByHash(class_name_hash)
instance = Instance(debug=self._debug, output_writer=self._output_writer)
instance.ReadInstanceBlockData(
class_value_data_map, instance_reference.data,
record_data_offset=instance_reference.offset)
# pylint: disable=attribute-defined-outside-init
instance.class_name = class_value_data_map.class_name
instance.derivation = class_value_data_map.derivation
instance.dynasty = class_value_data_map.dynasty
instance.super_class_name = class_value_data_map.super_class_name
if self._debug:
instance.DebugPrint()
return instance
# TODO: remove after refactor
def _ReadInstanceFromObjectRecord(self, object_record):
"""Reads an instance.
Args:
object_record (ObjectRecord): object record.
Returns:
Instance: instance or None.
"""
instance_reference = InstanceReference(
self.format_version, debug=self._debug,
output_writer=self._output_writer)
instance_reference.ReadObjectRecord(object_record.data)
instance = Instance(
debug=self._debug, output_writer=self._output_writer)
class_value_data_map = self._GetClassValueMapByHash(
instance_reference.class_name_hash)
instance.ReadInstanceBlockData(
class_value_data_map, instance_reference.data,
record_data_offset=instance_reference.offset)
if self._debug:
instance.DebugPrint()
# pylint: disable=attribute-defined-outside-init
instance.class_name = class_value_data_map.class_name
instance.derivation = class_value_data_map.derivation
instance.dynasty = class_value_data_map.dynasty
instance.super_class_name = class_value_data_map.super_class_name
return instance
def _ReadInstanceObjectRecords(self):
"""Reads instance object records.
Yields:
tuple[str, ObjectRecord]: name hash and instance object record.
"""
index_page = self._GetIndexRootPage()
for key in self._GetKeysFromIndexPage(index_page):
key_segments = key.split(self._KEY_SEGMENT_SEPARATOR)
data_type, name_hash, mapped_page_number, record_identifier, data_size = (
self._GetObjectRecordValuesFromKey(key_segments[-1]))
if data_type not in ('I', 'IL'):
continue
object_record = self._GetObjectRecord(
data_type, mapped_page_number, record_identifier, data_size)
yield name_hash, object_record
def _ReadNamespacesFromObjectRecords(self):
"""Reads namespaces from object records."""
class_name_hash = self._GetHashFromString('__NAMESPACE')
object_record_values = set()
instances_per_namespace = {}
parent_namespaces = set()
index_page = self._GetIndexRootPage()
for key in self._GetKeysFromIndexPage(index_page):
key_segments = key.split(self._KEY_SEGMENT_SEPARATOR)
key_segment = key_segments[1]
if not key_segment.startswith('NS_'):
continue
namespace_hash = key_segment[3:].lower()
parent_namespaces.add(namespace_hash)
_, _, key_segment = key_segments[2].partition('_')
if key_segment.lower() != class_name_hash:
continue
data_type, _, mapped_page_number, record_identifier, data_size = (
self._GetObjectRecordValuesFromKey(key_segments[-1]))
if data_type not in ('I', 'IL'):
continue
if (mapped_page_number, record_identifier) in object_record_values:
continue
object_record = self._GetObjectRecord(
data_type, mapped_page_number, record_identifier, data_size)
object_record_values.add((mapped_page_number, record_identifier))
instance = self._ReadInstanceFromObjectRecord(object_record)
if namespace_hash not in instances_per_namespace:
instances_per_namespace[namespace_hash] = []
instances_per_namespace[namespace_hash].append(instance)
namespaces_by_hash = {}
for namespace in self._COMMON_NAMESPACES:
namespace_hash = self._GetHashFromString(namespace)
namespaces_by_hash[namespace_hash] = namespace
for _ in range(5):
unresolved_namespaces = set()
for parent_namespace_hash in parent_namespaces:
parent_namespace = namespaces_by_hash.get(parent_namespace_hash, None)
if not parent_namespace:
unresolved_namespaces.add(parent_namespace_hash)
continue
instances = instances_per_namespace.get(parent_namespace_hash, None)
if not instances:
continue
for instance in instances:
name_property = instance.properties.get('Name', None)
namespace = '\\'.join([parent_namespace, name_property])
namespace_hash = self._GetHashFromString(namespace)
namespaces_by_hash[namespace_hash] = namespace
instance.namespace = namespace
self._namespace_instances.append(instance)
parent_namespaces = unresolved_namespaces
def Close(self):
"""Closes the CIM repository."""
self._class_definitions_by_hash = {}
self._class_value_data_map_by_hash = {}
self._namespace_instances = []
self._index_mapping_table = None
self._index_root_page = None
self._objects_mapping_table = None
if self._objects_data_file:
self._objects_data_file.Close()
self._objects_data_file = None
if self._index_binary_tree_file:
self._index_binary_tree_file.Close()
self._index_binary_tree_file = None
def GetInstances(self):
"""Retrieves instances.
Yields:
Instance: an instance.
"""
if self._repository_file:
for instance in self._repository_file.ReadInstances():
yield instance
else:
for _, object_record in self._ReadInstanceObjectRecords():
instance_reference = InstanceReference(
self.format_version, debug=self._debug,
output_writer=self._output_writer)
instance_reference.ReadObjectRecord(object_record.data)
yield self._ReadInstance(instance_reference)
def GetNamespaces(self):
"""Retrieves namespaces.
Yields:
Instance: an instance.
"""
if self._repository_file:
for instance in self._repository_file.ReadNamespaces():
yield instance
else:
if not self._namespace_instances:
self._ReadNamespacesFromObjectRecords()
for instance in self._namespace_instances:
yield instance
def GetIndexKeys(self):
"""Retrieves the index keys.
Yields:
str: an index key path.
"""
if self._index_binary_tree_file:
index_page = self._GetIndexRootPage()
for key in self._GetKeysFromIndexPage(index_page):
yield key
# TODO: remove after debugging.
def GetObjectRecordByKey(self, key):
"""Retrieves a specific object record.
Args:
key (str): a CIM key.
Returns:
ObjectRecord: an object record or None.
"""
key_segments = key.split(self._KEY_SEGMENT_SEPARATOR)
data_type, _, mapped_page_number, record_identifier, data_size = (
self._GetObjectRecordValuesFromKey(key_segments[-1]))
return self._GetObjectRecord(
data_type, mapped_page_number, record_identifier, data_size)
def Open(self, path):
"""Opens the CIM repository.
Args:
path (str): path to the CIM repository or an individual file.
"""
basename = os.path.basename(path).lower()
if basename in ('index.map', 'mapping1.map', 'mapping2.map', 'mapping3.map',
'objects.map'):
path = os.path.dirname(path)
self._OpenMappingFile(path, basename)
return
if basename in ('cim.rep', 'index.btr'):
path = os.path.dirname(path)
active_mapping_file = None
if basename == 'cim.rep':
self.format_version = '2.0'
self._repository_file = self._OpenRepositoryFile(path)
else:
index_mapping_file = self._OpenMappingFile(path, 'index.map')
if not index_mapping_file:
active_mapping_file = self._GetActiveMappingFile(path)
index_mapping_file = active_mapping_file
self._index_mapping_table = index_mapping_file.GetIndexMappingTable()
if index_mapping_file.format_version == 1:
self.format_version = '2.1'
else:
self.format_version = '2.2'
if basename == 'index.btr' or not active_mapping_file:
index_mapping_file.Close()
self._index_binary_tree_file = self._OpenIndexBinaryTreeFile(path)
if basename == 'index.btr':
return
objects_mapping_file = self._OpenMappingFile(path, 'objects.map')
if not objects_mapping_file:
if not active_mapping_file:
active_mapping_file = self._GetActiveMappingFile(path)
objects_mapping_file = active_mapping_file
self._objects_mapping_table = (
objects_mapping_file.GetObjectsMappingTable())
objects_mapping_file.Close()
self._objects_data_file = self._OpenObjectsDataFile(path)
self._ReadClassDefinitionsFromObjectRecords()
|
# Generated by Django 2.2.11 on 2020-03-14 07:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orgchart', '0008_auto_20200314_1107'),
]
operations = [
migrations.AlterField(
model_name='detail',
name='manager',
field=models.CharField(default='None', max_length=40),
),
]
|
"""
Multilayer Perceptrons (MLP) with tensorflow as the only dependency.
The module contains MLP which serves as the base of all networks.
It aims to replace existing implementation of MLP class
(garage.tf.core.network), which is under development.
"""
import tensorflow as tf
from tensorflow.python.ops.gen_array_ops import broadcast_to
def mlp(input_var,
output_dim,
hidden_sizes,
name,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.contrib.layers.xavier_initializer(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.contrib.layers.xavier_initializer(),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
"""
MLP function.
Args:
input_var: Input tf.Tensor to the MLP.
output_dim: Dimension of the network output.
hidden_sizes: Output dimension of dense layer(s).
name: variable scope of the mlp.
hidden_nonlinearity: Activation function for
intermediate dense layer(s).
hidden_w_init: Initializer function for the weight
of intermediate dense layer(s).
hidden_b_init: Initializer function for the bias
of intermediate dense layer(s).
output_nonlinearity: Activation function for
output dense layer.
output_w_init: Initializer function for the weight
of output dense layer(s).
output_b_init: Initializer function for the bias
of output dense layer(s).
layer_normalization: Bool for using layer normalization or not.
Return:
The output tf.Tensor of the MLP
"""
with tf.variable_scope(name):
l_hid = input_var
for idx, hidden_size in enumerate(hidden_sizes):
l_hid = tf.layers.dense(
inputs=l_hid,
units=hidden_size,
activation=hidden_nonlinearity,
kernel_initializer=hidden_w_init,
bias_initializer=hidden_b_init,
name="hidden_{}".format(idx))
if layer_normalization:
l_hid = tf.contrib.layers.layer_norm(l_hid)
l_out = tf.layers.dense(
inputs=l_hid,
units=output_dim,
activation=output_nonlinearity,
kernel_initializer=output_w_init,
bias_initializer=output_b_init,
name="output")
return l_out
def parameter(input_var,
length,
initializer=tf.zeros_initializer(),
dtype=tf.float32,
trainable=True,
name="parameter"):
"""
Paramter function that creates variables that could be
broadcast to a certain shape to match with input var.
Args:
input_var: Input tf.Tensor.
length: Integer dimension of the variables.
initializer: Initializer of the variables.
dtype: Data type of the variables.
trainable: Whether these variables are trainable.
name: variable scope of the variables.
Return:
A tensor of broadcasted variables
"""
with tf.variable_scope(name):
p = tf.get_variable(
"parameter",
shape=(length, ),
dtype=dtype,
initializer=initializer,
trainable=trainable)
ndim = input_var.get_shape().ndims
broadcast_shape = tf.concat(
axis=0, values=[tf.shape(input_var)[:ndim - 1], [length]])
p_broadcast = broadcast_to(p, shape=broadcast_shape)
return p_broadcast
|
import cv2
import matplotlib.pyplot as plt
img = cv2.imread(r'F:\CS Projects\Coding\Python\Input\sukoku.jpg',0)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (5, 5), 0)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,3)
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,3)
sobelxy = cv2.Sobel(img,cv2.CV_64F,1,1,3)
Scharrx = cv2.Scharr(img,cv2.CV_64F,1,0)
Scharry = cv2.Scharr(img,cv2.CV_64F,0,1)
Scharrxy = cv2.Scharr(img,cv2.CV_64F,1,1)
cv2.imwrite(r'C:\Users\HP\Downloads\sukoku2.jpg',sobelxy)
cv2.imwrite(r'C:\Users\HP\Downloads\sukoku3.jpg',Scharrxy)
plt.subplot(231),plt.imshow(sobelx,cmap='gray'),plt.axis('off'),plt.title('SobelX')
plt.subplot(232),plt.imshow(sobely,cmap='gray'),plt.axis('off'),plt.title('SobelY')
plt.subplot(233),plt.imshow(sobelxy,cmap='gray'),plt.axis('off'),plt.title('SobelXY')
plt.subplot(234),plt.imshow(sobelx,cmap='gray'),plt.axis('off'),plt.title('ScharX')
plt.subplot(235),plt.imshow(sobely,cmap='gray'),plt.axis('off'),plt.title('ScharY')
plt.subplot(236),plt.imshow(sobelxy,cmap='gray'),plt.axis('off'),plt.title('ScharXY')
plt.show()
cv2.waitKey()
cv2.destroyAllWindows() |
#myre.py
class __compiledre(object):
"""
a compiled regular expression engine.
"""
def __init__(self, DFA):
"""
init this expression engine with a DFA.
a DFA includes:
transtable: a list of dic, each dic is index->STATE
START_STATE: a int
STOP_STATES: a list of int
"""
self.transtable, self.START_STATE, self.STOP_STATES = DFA
def search(self, string, start = 0, end = None):
"""
try to match the string between [start, end) with this regular expression.
"""
ans = []
if end == None:
end = len(string)
matchx = start
while True:
if matchx == end:
break
nowstate = self.START_STATE
tmatchy = matchx
matchy = matchx
while True:
if nowstate in self.STOP_STATES:
matchy = tmatchy
if tmatchy == end:
break
nowstate = self.transtable[nowstate].get(string[tmatchy], None)
if nowstate == None:
break
tmatchy = tmatchy + 1
if matchy != matchx:
ans.append((matchx, matchy))
matchx = matchy
continue
matchx += 1
return ans
def _pattern2NFA(pattern):
"""
change pattern into NFA = (transtable, star_tstate, stop_state).
pattern: a regular expression. support: \, |, *, (, )
NFA:
transtable: a list of dic: char/None -> set(). a state can turn into states in set with inputting char
"""
s, t = 0, len(pattern)
transtable = []
error = 0
def __add_trans(source, index, dest):
while source >= len(transtable):
transtable.append({})
s = transtable[source].get(index, None)
if s == None:
transtable[source][index] = set([dest])
else:
s.add(dest)
def __pattern_change(pt, _s, _t, start):
if _s == _t:
return start
#check '|'
bracket_num = 0
for i in range(_s, _t):
if pt[i] == '(':
bracket_num += 1
elif pt[i] == ')':
if bracket_num > 0:
bracket_num -= 1
else:
error = 1
return start
elif pt[i] == '|' and bracket_num == 0:
t_stop_state1 = __pattern_change(pt, _s, i, start + 1)
t_stop_state2 = __pattern_change(pt, i + 1, _t, t_stop_state1 + 1)
__add_trans(start, None, start + 1)
__add_trans(start, None, t_stop_state1 + 1)
__add_trans(t_stop_state1, None, t_stop_state2 + 1)
__add_trans(t_stop_state2, None, t_stop_state2 + 1)
return t_stop_state2 + 1
#check '()' and '*'
if pt[_s] == '(':
bracket_num = 0
for i in range(_s, _t):
if pt[i] == '(':
bracket_num += 1
if pt[i] == ')':
if bracket_num > 1:
bracket_num -= 1
elif bracket_num == 0:
error = 1
return start
elif bracket_num == 1:
if i + 1 < _t and pt[i + 1] == '*':
t_stop_state = __pattern_change(pt, _s + 1, i, start + 1)
__add_trans(start, None, start + 1)
__add_trans(start, None, t_stop_state + 1)
__add_trans(t_stop_state, None, start + 1)
__add_trans(t_stop_state, None, t_stop_state + 1)
return __pattern_change(pt, i + 2, _t, t_stop_state + 1)
else:
t_stop_state = __pattern_change(pt, _s + 1, i, start)
return __pattern_change(pt, i + 1, _t, t_stop_state)
#check others anid '*'
if pt[_s] == '\\':
if _s + 1 < _t and pt[_s + 1] == '*':
__add_trans(start, '*', start + 1)
return __pattern_change(pt, _s + 2, _t, start + 1)
else:
error = 1
return start
else:
if _s + 1 < _t and pt[_s + 1] == '*':
__add_trans(start, None, start + 1)
__add_trans(start, None, start + 3)
__add_trans(start + 1, pt[_s], start + 2)
__add_trans(start + 2, None, start + 3)
__add_trans(start + 2, None, start + 1)
return __pattern_change(pt, _s + 2, _t, start + 3)
else:
__add_trans(start, pt[_s], start + 1)
return __pattern_change(pt, _s + 1, _t, start + 1)
stop_state = __pattern_change(pattern, s, t, 0)
transtable.append({})
if error:
return None
else:
return (transtable, 0, stop_state)
from collections import deque
def _NFA2DFA(NFA):
"""
change NFA into DFA.
DFA : (transtable, startstate, stopstates)
"""
NFA_transtable, NFA_startstate, NFA_stopstate = NFA
transtable = []
states = []
stopstates = set()
ids = set([x for dic in NFA_transtable for x in dic if x])
def __find_dest(s, index):
dest = set()
for x in s:
dest.update(NFA_transtable[x].get(index, set()))
return dest
def __closure(s):
l = len(s)
s = set(s)
x = s
while True:
x = __find_dest(x, None)
s.update(x)
if l == len(s):
break
l = len(s)
return s
def __check_state(s):
if s not in states:
states.append(s)
if NFA_stopstate in s and s not in stopstates:
stopstates.add(states.index(s))
def __add_trans(source, index, dest):
x = states.index(source)
while x >= len(transtable):
transtable.append({})
transtable[x][index] = states.index(dest)
d = deque()
startstate = __closure(set([NFA_startstate]))
d.append(startstate)
while True:
if not d:
break
s = d.popleft()
__check_state(s)
for index in ids:
dest = __closure(__find_dest(s, index))
if dest:
if dest not in states:
d.append(dest)
__check_state(dest)
__add_trans(s, index, dest)
while(len(states) > len(transtable)):
transtable.append({})
return (transtable, states.index(startstate), stopstates)
def recompile(pattern):
DFA = _NFA2DFA(_pattern2NFA(pattern))
return __compiledre(DFA)
|
from PyQt5.QtGui import QPainter, QPen, QFont
from PyQt5.QtWidgets import QAbstractButton, QSlider, QWidget, QVBoxLayout, QHBoxLayout, \
QStyleOptionSlider, QStyle
from PyQt5.QtCore import Qt, QRect, QPoint
import numpy as np
class LabeledSlider(QWidget):
def __init__(self, minimum, maximum, interval=1, orientation=Qt.Horizontal,
labels=None, p0=0, parent=None):
super(LabeledSlider, self).__init__(parent=parent)
levels = range(minimum, maximum + interval, interval)
if labels is not None:
if not isinstance(labels, (tuple, list)):
raise Exception("<labels> is a list or tuple.")
if len(labels) != len(levels):
raise Exception("Size of <labels> doesn't match levels.")
self.levels = list(zip(levels, labels))
else:
self.levels = list(zip(levels, map(str, levels)))
if orientation == Qt.Horizontal:
self.layout = QVBoxLayout(self)
elif orientation == Qt.Vertical:
self.layout = QHBoxLayout(self)
else:
raise Exception("<orientation> wrong.")
# gives some space to print labels
self.left_margin = 10
self.top_margin = 10
self.right_margin = 10
self.bottom_margin = 10
self.layout.setContentsMargins(self.left_margin, self.top_margin,
self.right_margin, self.bottom_margin)
self.sl = QSlider(orientation, self)
self.sl.setMinimum(minimum)
self.sl.setMaximum(maximum)
self.sl.setValue(minimum)
self.sl.setSliderPosition(p0)
if orientation == Qt.Horizontal:
self.sl.setTickPosition(QSlider.TicksBelow)
self.sl.setMinimumWidth(300) # just to make it easier to read
else:
self.sl.setTickPosition(QSlider.TicksLeft)
self.sl.setMinimumHeight(300) # just to make it easier to read
self.sl.setTickInterval(interval)
self.sl.setSingleStep(1)
self.layout.addWidget(self.sl)
def paintEvent(self, e):
super(LabeledSlider, self).paintEvent(e)
style = self.sl.style()
painter = QPainter(self)
st_slider = QStyleOptionSlider()
st_slider.initFrom(self.sl)
st_slider.orientation = self.sl.orientation()
length = style.pixelMetric(QStyle.PM_SliderLength, st_slider, self.sl)
available = style.pixelMetric(QStyle.PM_SliderSpaceAvailable, st_slider, self.sl)
for v, v_str in self.levels:
# get the size of the label
rect = painter.drawText(QRect(), Qt.TextDontPrint, v_str)
if self.sl.orientation() == Qt.Horizontal:
# I assume the offset is half the length of slider, therefore
# + length//2
if v < self.sl.maximum():
x_loc = QStyle.sliderPositionFromValue(self.sl.minimum(), self.sl.maximum(), v, available) + length // 2
else:
x_loc = QStyle.sliderPositionFromValue(self.sl.minimum(), self.sl.maximum(), v - 10, available) + length // 2
# left bound of the text = center - half of text width + L_margin
left = x_loc - rect.width() // 2 + self.left_margin
bottom = self.rect().bottom()
# enlarge margins if clipping
if v == self.sl.minimum():
if left <= 0:
self.left_margin = rect.width() // 2 - x_loc
if self.bottom_margin <= rect.height():
self.bottom_margin = rect.height()
self.layout.setContentsMargins(self.left_margin,
self.top_margin, self.right_margin,
self.bottom_margin)
if v == self.sl.maximum() and rect.width() // 2 >= self.right_margin:
self.right_margin = rect.width() // 2
self.layout.setContentsMargins(self.left_margin,
self.top_margin, self.right_margin,
self.bottom_margin)
else:
y_loc = QStyle.sliderPositionFromValue(self.sl.minimum(),
self.sl.maximum(), v, available, upsideDown=True)
bottom = y_loc + length // 2 + rect.height() // 2 + self.top_margin - 3
# there is a 3 px offset that I can't attribute to any metric
left = self.left_margin - rect.width()
if left <= 0:
self.left_margin = rect.width() + 2
self.layout.setContentsMargins(self.left_margin,
self.top_margin, self.right_margin,
self.bottom_margin)
pos = QPoint(left, bottom)
painter.drawText(pos, v_str)
return
|
from os import path
from tempfile import gettempdir
from src.constants import _DEFAULT_OUT_DIR
import src.matrix_operations as matops
def add_subparser(subparsers):
parser = subparsers.add_parser(
"make",
description="Create matrix definition files suitable for use with solve",
help="Create matrix definition files",
)
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument(
"size", metavar="SIZE", type=int, nargs="?", help="Size of matrix A"
)
exgroup.add_argument(
"--from-legacy",
dest="legacy_dir",
metavar="LEG_DIR",
type=str,
help="Convert legacy files in directory LEG_DIR to updated format.",
)
parser.add_argument(
"-d",
"--directory",
default=None,
dest="directory",
metavar="DIR",
type=str,
help="Directory to output to. If not absolute will be placed in tmp.",
)
def _convert_legacy_files(directory):
inputs = matops.load_files(directory, False, True)
size = int(inputs["matA"][0])
inputs["matA"] = matops.reshape(inputs["matA"][2:], (size, -1))
inputs["matb"] = matops.reshape(inputs["matb"][2:], (-1, 1))
inputs["matsoln"] = matops.reshape(inputs["matsoln"][2:], (-1, 1))
if "omega" in inputs:
inputs["omega"] = matops.reshape(inputs["omega"][2:], (-1, 1))
return inputs
def main(options):
if getattr(options, "legacy_dir") is not None:
inputs = _convert_legacy_files(options.legacy_dir)
else:
inputs = {}
inputs["matA"] = matops.create_random_diagonal_dominate(options.size)
inputs["matsoln"] = matops.create_random(options.size, True)
inputs["matb"] = matops.multiply(inputs["matA"], inputs["matsoln"])
out_dir = options.directory if options.directory is not None else _DEFAULT_OUT_DIR
final_dir = matops.write_files(inputs, out_dir)
print(final_dir)
|
#construct series given the data
def construct_series(list_dates) :
series_dates = [0,0,0,0,0,0,0,0,0,0,0,0]
for row in range(len(list_dates)) :
date_str = str(list_dates[row][0])
month = date_str.split("-")[1]
#print(int(month))
series_dates[int(month)-1] = series_dates[int(month)-1] + 1
print("series",series_dates)
return series_dates
# constructs an svg given the series of bins
def construct_path(series_dates) :
svg = "M0,50 "
scale_x = 240/12
scale_y = 50/max(series_dates)
#print(max(series_dates))
for month in range(12) :
y = scale_y * series_dates[month]
#print(y)
x = scale_x * month
svg = svg + "L" + str(x) + "," + str(50-y) + " "
svg = svg + "L200,50 Z"
return svg
# accepts list of dates to convert to svg
def construct_svg_path(data):
dates = [("2018-01-01",0), ("2018-01-06",0), ("2018-01-08",0), ("2018-02-01",0), ("2018-02-02",0), ("2018-02-09",0), ("2018-02-04",0), ("2018-03-09",0), ("2018-03-01",0), ("2018-04-01",0), ("2018-04-05",0),("2018-04-09",0) ,("2018-04-04",0), ("2018-05-01",0), ("2018-06-01",0), ("2018-06-01",0), ("2018-08-01",0)]
svg_paths = []
for i in range(4) :
if len(data[i]) != 0:
svg_path = construct_path(construct_series(data[i]))
else :
svg_path = ""
svg_paths.append(svg_path)
print(svg_path)
return svg_paths
"""
dates = [("2018-01-01",0), ("2018-01-06",0), ("2018-01-08",0), ("2018-02-01",0), ("2018-02-02",0), ("2018-02-09",0), ("2018-02-04",0), ("2018-03-09",0), ("2018-03-01",0), ("2018-04-01",0), ("2018-04-05",0),("2018-04-09",0) ,("2018-04-04",0), ("2018-05-01",0), ("2018-06-01",0), ("2018-06-01",0), ("2018-08-01",0)]
print(construct_svg_path([dates,dates,dates,dates])[0])
"""
|
#Repaticion y contenacion
cad = "cadena"*3
print cad
cad1 = " cadena 1"
cad2 = " cadena 2"
cadc = cad1 +cad2
print cadc
|
# Write a program, which will find all such numbers between 1000 and 3000
# (both included) such that each digit of the number is an even number.
# The numbers obtained should be printed in a comma-separated sequence on a single line.
numbers = list()
for i in range(1000, 3001):
if i%2 != 0:
numbers.append(str(i))
print(','.join(numbers)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.