max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
ros_packages/qd_control/src/benchmark_trajectory.py
|
Chrispako990210/S4H2022-QuadrUS-V2
| 7
|
12775851
|
<reponame>Chrispako990210/S4H2022-QuadrUS-V2
#! /usr/bin/env python3
import roslib
#roslib.load_manifest('joint_trajectory_test')
import rospy
import actionlib
from std_msgs.msg import Float64
import trajectory_msgs.msg
import control_msgs.msg
from trajectory_msgs.msg import JointTrajectoryPoint
from control_msgs.msg import JointTrajectoryAction, JointTrajectoryGoal, FollowJointTrajectoryAction, FollowJointTrajectoryGoal
import time
def move(jta, angle):
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['J1']
point = JointTrajectoryPoint()
point.positions = [angle]
point.time_from_start = rospy.Duration(0.25)
goal.trajectory.points.append(point)
jta.send_goal_and_wait(goal)
if __name__ == '__main__':
rospy.init_node('joint_position_tester')
jta = actionlib.SimpleActionClient('/quadrus/leg_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
rospy.loginfo('Waiting for joint trajectory action')
jta.wait_for_server()
rospy.loginfo('Found joint trajectory action')
try:
while(True):
move(jta, 0)
rospy.loginfo('Sent cmd to 0')
time.sleep(5)
move(jta, 1.5708)
rospy.loginfo('Sent cmd to pi/2')
time.sleep(5)
except KeyboardInterrupt as e:
print(e)
| 2.296875
| 2
|
src/backend/libro/solicitar/app.py
|
gpeitzner/SA_EZREAD
| 0
|
12775852
|
<reponame>gpeitzner/SA_EZREAD<gh_stars>0
import os
import time
import json
from werkzeug.utils import secure_filename, send_file
from bson import ObjectId
from flask import Flask, request, jsonify
import pymongo
from flask_cors import CORS
import boto3
app = Flask(__name__)
CORS(app)
db_host = os.environ["db_host"] if "db_host" in os.environ else "localhost"
db_password = os.environ["db_password"] if "db_password" in os.environ else ""
db_port = int(os.environ["db_port"]) if "db_port" in os.environ else 27017
db_name = os.environ["db_name"] if "db_name" in os.environ else "ezread"
db_user = os.environ["db_user"] if "db_user" in os.environ else ""
s3_bucket_name = os.environ["s3_bucket_name"] if "s3_bucket_name" in os.environ else ""
s3_region_name = os.environ["s3_region_name"] if "s3_region_name" in os.environ else ""
s3_access_key_id = os.environ["s3_access_key_id"] if "s3_access_key_id" in os.environ else ""
s3_access_key = os.environ["s3_access_key"] if "s3_access_key" in os.environ else ""
s3 = boto3.client(
"s3",
region_name=s3_region_name,
aws_access_key_id=s3_access_key_id,
aws_secret_access_key=s3_access_key
)
client = pymongo.MongoClient(
host=db_host,
port=db_port,
username=db_user,
password=<PASSWORD>
)
db = client[str(db_name)]
col = db["solicitudes"]
@app.route("/solicitud", methods=["GET"])
def read():
id = request.args.get("id")
if id:
data = col.find_one({
'_id': ObjectId(str(id))
})
result = {
"id": str(data["_id"]),
"nombre": data["nombre"],
"autor": data["autor"],
"fecha": data["fecha"],
"url": data["url"]
}
return jsonify(result)
book_requests = col.find()
result = []
for book_request in book_requests:
result.append({
"id": str(book_request["_id"]),
"nombre": book_request["nombre"],
"autor": book_request["autor"],
"fecha": book_request["fecha"],
"url": book_request["url"]
})
return jsonify(result)
@app.route("/solicitud", methods=["POST"])
def create():
data = request.get_json()
book_name = data["nombre"]
book_autor = data["autor"]
book_date = data["fecha"]
book_url = data["url"]
new_book_request = col.insert_one(data)
return jsonify({
"id": str(new_book_request.inserted_id),
"nombre": book_name,
"autor": book_autor,
"fecha": book_date,
"url": book_url
})
@app.route("/solicitud/subir", methods=["POST"])
def upload():
new_file = request.files["file"]
if new_file:
sanitized_filename = secure_filename(new_file.filename)
filename = str(time.time()).replace(".", "")
file_extension = sanitized_filename.split(".")[1]
final_name = filename + "." + file_extension
new_file.save(final_name)
s3.upload_file(
Bucket=s3_bucket_name,
Filename=final_name,
Key=final_name,
ExtraArgs={'ACL': 'public-read'}
)
os.remove(final_name)
final_url = "https://" + s3_bucket_name + ".s3." + \
s3_region_name + ".amazonaws.com/"+final_name
return jsonify({"url": final_url})
else:
return jsonify({"message": "not file sended"}), 400
@app.route("/solicitud", methods=["DELETE"])
def delete():
id = request.args.get("id")
if id:
col.delete_one({
'_id': ObjectId(str(id))
})
return jsonify({"message": "book request deleted"})
return jsonify({"message": "empty id"}), 400
@app.route("/impuesto", methods=["GET"])
def taxes():
taxes = json.loads(open("taxes.json", "r").read())
return jsonify(taxes)
| 2.03125
| 2
|
src/Engine/Trajectory/__init__.py
|
MiguelReuter/Volley-ball-game
| 4
|
12775853
|
<reponame>MiguelReuter/Volley-ball-game
# encoding : UTF-8
from .trajectory_solver import *
from .thrower_manager import ThrowerManager
from .trajectory import Trajectory
| 1.265625
| 1
|
src/routes.py
|
budavariam/activity-visualizer
| 0
|
12775854
|
from flask import send_from_directory
from appserver import app
@app.server.route('/static/<path>')
def serve_static(path):
return send_from_directory('assets', path)
| 2.09375
| 2
|
azure-devops/azext_devops/dev/common/config.py
|
vijayraavi/azure-devops-cli-extension
| 0
|
12775855
|
<reponame>vijayraavi/azure-devops-cli-extension
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import stat
from six.moves import configparser
from knack.config import CLIConfig, get_config_parser
from knack.util import ensure_dir
from .const import (AZ_DEVOPS_CONFIG_DIR_ENVKEY,
AZ_DEVOPS_DEFAULT_CONFIG_DIR,
CLI_ENV_VARIABLE_PREFIX,
CONFIG_FILE_NAME)
_UNSET = object()
def _get_config_dir():
azure_devops_config_dir = os.getenv(AZ_DEVOPS_CONFIG_DIR_ENVKEY, None) or AZ_DEVOPS_DEFAULT_CONFIG_DIR
# Create a directory if it doesn't exist
ensure_dir(azure_devops_config_dir)
return azure_devops_config_dir
AZ_DEVOPS_GLOBAL_CONFIG_DIR = _get_config_dir()
AZ_DEVOPS_GLOBAL_CONFIG_PATH = os.path.join(AZ_DEVOPS_GLOBAL_CONFIG_DIR, CONFIG_FILE_NAME)
class AzDevopsConfig(CLIConfig):
def __init__(self, config_dir=AZ_DEVOPS_GLOBAL_CONFIG_DIR, config_env_var_prefix=CLI_ENV_VARIABLE_PREFIX):
super(AzDevopsConfig, self).__init__(config_dir=config_dir, config_env_var_prefix=config_env_var_prefix)
self.config_parser = get_config_parser()
azdevops_config = AzDevopsConfig()
azdevops_config.config_parser.read(AZ_DEVOPS_GLOBAL_CONFIG_PATH)
def set_global_config(config):
ensure_dir(AZ_DEVOPS_GLOBAL_CONFIG_DIR)
with open(AZ_DEVOPS_GLOBAL_CONFIG_PATH, 'w') as configfile:
config.write(configfile)
os.chmod(AZ_DEVOPS_GLOBAL_CONFIG_PATH, stat.S_IRUSR | stat.S_IWUSR)
# reload config
azdevops_config.config_parser.read(AZ_DEVOPS_GLOBAL_CONFIG_PATH)
def set_global_config_value(section, option, value):
config = get_config_parser()
config.read(AZ_DEVOPS_GLOBAL_CONFIG_PATH)
try:
config.add_section(section)
except configparser.DuplicateSectionError:
pass
config.set(section, option, _normalize_config_value(value))
set_global_config(config)
def _normalize_config_value(value):
if value:
value = '' if value in ["''", '""'] else value
return value
| 1.882813
| 2
|
ip_interceptor/middleware.py
|
jasonqiao36/django-forbidden-ip
| 0
|
12775856
|
<gh_stars>0
from django.http import HttpResponseForbidden
from django.utils.deprecation import MiddlewareMixin
from .models import ForbiddenIP
class IPInterceptorMiddleware(MiddlewareMixin):
def __init__(self, get_reqponse):
self.get_response = get_reqponse
def validate_ip(self, remote_ip):
if ForbiddenIP.objects.filter(ip=remote_ip).count():
return False
return True
def process_request(self, request):
ip = request.META.get('REMOTE_ADDR')
if not self.validate_ip(ip):
return HttpResponseForbidden('')
| 2.203125
| 2
|
ptbaselines/algos/ddpg/models.py
|
KongCDY/baselines_pytorch
| 0
|
12775857
|
<reponame>KongCDY/baselines_pytorch<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import numpy as np
from ptbaselines.algos.common.models import get_network_builder
from ptbaselines.algos.common.torch_utils import init_weight
class Actor(nn.Module):
def __init__(self, env, network = 'mlp', **network_kwargs):
super(Actor, self).__init__()
self.num_actions = env.action_space.shape[-1]
self.base_net = get_network_builder(network)(env.observation_space.shape, **network_kwargs)
self.fc = nn.Linear(self.base_net.out_dim, self.num_actions)
# init
init.uniform_(self.fc.weight.data, -3e-3, 3e-3)
init.uniform_(self.fc.bias.data, -3e-3, 3e-3)
def forward(self, obs):
latent = self.base_net(obs)
latent = latent.view(obs.size(0), self.base_net.out_dim)
action = torch.tanh(self.fc(latent))
return action
class Critic(nn.Module):
def __init__(self, env, network = 'mlp', **network_kwargs):
super(Critic, self).__init__()
self.num_actions = env.action_space.shape[-1]
input_shape = list(env.observation_space.shape)
input_shape[-1] += self.num_actions
self.base_net = get_network_builder(network)(input_shape, **network_kwargs)
self.fc = nn.Linear(self.base_net.out_dim, 1)
# init
init.uniform_(self.fc.weight.data, -3e-3, 3e-3)
init.uniform_(self.fc.bias.data, -3e-3, 3e-3)
def forward(self, obs, action):
x = torch.cat([obs, action], dim = -1)
latent = self.base_net(x)
latent = latent.view(obs.size(0), self.base_net.out_dim)
value = self.fc(latent)
return value
| 2.15625
| 2
|
jobboard/messages.py
|
OnGridSystems/RobotVeraWebApp
| 11
|
12775858
|
from django.utils.translation import ugettext_lazy as _
MESSAGES = {
'VacancyChange': _('Vacancy status change now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'Not_VacancyChange':
'<span class="red-text" data-uk-icon="ban"></span> To add new pipeline action you have to disable vacancy.',
'ActionDeleted': _('Action delete now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'NewAction': _(
'Transaction for add new action now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'Subscribe': _(
'Transaction for subscribe to vacancy now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'tokenApprove': _('Approving tokens for platform now pending') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'ChangeStatus': _('Changing status now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'NewMember': _('Your new contract now creating...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'NewVacancy': _('Your new vacancy now creating...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'NewCompany': _('New company contract now creating...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'MemberSubscribe': _('Vacancy subscribing now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
}
| 1.796875
| 2
|
src/simplempu2.py
|
k323r/roller
| 0
|
12775859
|
import machine
class MPUSimple():
def __init__(self, i2c, address=0x69):
self._i2c = i2c
self._addr = address
self._i2c.start()
self._i2c.writeto(self._addr, bytearray([107, 0]))
self._i2c.stop()
def _get_raw_values(self):
self._i2c.start()
raw_data = self._i2c.readfrom_mem(self._addr, 0x3B, 14)
self._i2c.stop()
return raw_data
def _bytes2Int(self, firstbyte, secondbyte):
if not firstbyte & 0x80:
return firstbyte << 8 | secondbyte
return - (((firstbyte ^ 255) << 8) | (secondbyte ^ 255) + 1)
def getSensors(self):
raw_data = self._get_raw_values()
vals = {}
vals["AcX"] = self._bytes2Int(raw_data[0], raw_data[1])
vals["AcY"] = self._bytes2Int(raw_data[2], raw_data[3])
vals["AcZ"] = self._bytes2Int(raw_data[4], raw_data[5])
vals["Tmp"] = self._bytes2Int(raw_data[6], raw_data[7]) / 340.00 + 36.53
vals["GyX"] = self._bytes2Int(raw_data[8], raw_data[9])
vals["GyY"] = self._bytes2Int(raw_data[10], raw_data[11])
vals["GyZ"] = self._bytes2Int(raw_data[12], raw_data[13])
return vals # returned in range of Int16
| 2.609375
| 3
|
adventure_anywhere/s3_bucket_saves.py
|
zhammer/adventure-anywhere
| 0
|
12775860
|
import io
from typing import Optional
import boto3
import botocore
from adventure_anywhere.definitions import SavesGateway
s3 = boto3.resource("s3")
class S3BucketSavesGateway(SavesGateway):
bucket_name: str
def __init__(self, bucket_name: str) -> None:
self.bucket_name = bucket_name
def fetch_save(self, player_id: str) -> Optional[io.BytesIO]:
s3_object = s3.Object(self.bucket_name, player_id)
try:
content = s3_object.get()
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "NoSuchKey":
return None
raise e
return io.BytesIO(content["Body"].read())
def update_save(self, player_id: str, save: io.BytesIO) -> None:
s3_object = s3.Object(self.bucket_name, player_id)
s3_object.put(Body=save.getvalue())
| 2.296875
| 2
|
actrie/__init__.py
|
ifplusor/actrie
| 8
|
12775861
|
#!/usr/bin/env python
# encoding=utf-8
from .matcher import Matcher, Context, PrefixMatcher
__all__ = ["Matcher", "Context", "PrefixMatcher"]
__version__ = "3.2.4"
| 1.195313
| 1
|
fym/models/quadrotor.py
|
JungYT/fym
| 14
|
12775862
|
import numpy as np
from fym.core import BaseEnv, BaseSystem
from fym.utils import rot
def hat(v):
v1, v2, v3 = v.squeeze()
return np.array([
[0, -v3, v2],
[v3, 0, -v1],
[-v2, v1, 0]
])
class Quadrotor(BaseEnv):
"""
Prof. <NAME>'s model for quadrotor UAV is used.
- https://www.math.ucsd.edu/~mleok/pdf/LeLeMc2010_quadrotor.pdf
Description:
- an NED frame is used for the inertia and body fixed frame.
Hence, `+z` direction is downward.
- ``pos`` and ``vel`` are resolved in the inertial frame,
whereas ``R`` and ``omega`` are resolved in the body frame
- ``fis`` is a vector of thrusts generated by the rotors.
Variables:
R: SO(3)
The rotation matrix from the body-fixed frame to the inertial frame
R = C_{i/b} = C_{b/i}^T
"""
g = 9.81 # m/s^2
e3 = np.vstack((0, 0, 1))
J = np.diag([0.0820, 0.0845, 0.1377])
m = 4.34 # Mass
d = 0.315 # The distance from the center of mass to the center of each rotor
ctf = 8.004e-4 # The torque coefficient. ``torque_i = (-1)^i ctf f_i``
B = np.array(
[[1, 1, 1, 1],
[0, -d, 0, d],
[d, 0, -d, 0],
[-ctf, ctf, -ctf, ctf]]
)
Binv = np.linalg.pinv(B)
name = "quadrotor"
def __init__(self,
pos=np.zeros((3, 1)),
vel=np.zeros((3, 1)),
R=np.eye(3),
omega=np.zeros((3, 1)),
config="Quadrotor"):
super().__init__()
self.pos = BaseSystem(pos)
self.vel = BaseSystem(vel)
self.R = BaseSystem(R)
self.omega = BaseSystem(omega)
def deriv(self, pos, vel, R, omega, fis):
m, g, J, e3 = self.m, self.g, self.J, self.e3
f, *M = self.fis2fM(fis)
M = np.vstack(M)
dpos = vel
dvel = g * e3 - f * R @ e3 / m
dR = R @ hat(omega)
domega = np.linalg.inv(J).dot(M - np.cross(omega, J.dot(omega), axis=0))
return dpos, dvel, dR, domega
def set_dot(self, t, fis):
pos, vel, R, omega = self.observe_list()
dots = self.deriv(pos, vel, R, omega, fis)
self.pos.dot, self.vel.dot, self.R.dot, self.omega.dot = dots
def fis2fM(self, fis):
"""Convert f_i's to force and moments
Parameters:
fis: (4, 1) array
Return:
f, M1, M2, M3: (4,) array of force and moments
"""
return (self.B @ fis).ravel()
def fM2fis(self, f, M1, M2, M3):
"""Convert force and moments to f_i's
Parameters:
f: scalar, the total thrust
M1, M2, M3: scalars, the moments
Return:
fis: (4, 1) array of f_i's
"""
return self.Binv @ np.vstack((f, M1, M2, M3))
def angle2R(self, angle):
"""angle: phi, theta, psi in radian"""
return rot.angle2dcm(*np.ravel(angle)[::-1]).T
def R2angle(self, R):
"""angle: phi, theta, psi in radian"""
return rot.dcm2angle(R.T)[::-1]
| 2.734375
| 3
|
app.py
|
mattfeltonma/azure-tenant-activity-logs
| 0
|
12775863
|
import os
import sys
import logging
import json
import requests
import datetime
from msal import ConfidentialClientApplication
# Reusable function to create a logging mechanism
def create_logger(logfile=None):
# Create a logging handler that will write to stdout and optionally to a log file
stdout_handler = logging.StreamHandler(sys.stdout)
try:
if logfile != None:
file_handler = logging.FileHandler(filename=logfile)
handlers = [file_handler, stdout_handler]
else:
handlers = [stdout_handler]
except:
handlers = [stdout_handler]
logging.error('Log file could not be created. Error: ', exc_info=True)
# Configure logging mechanism
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=handlers
)
# Reusable function to obtain an access token
def get_token(resource):
client = ConfidentialClientApplication(
client_id=os.getenv('CLIENT_ID'),
client_credential=os.getenv('CLIENT_SECRET'),
authority='https://login.microsoftonline.com/' +
os.getenv('TENANT_NAME')
)
logging.info('Issuing request to obtain access token...')
response = client.acquire_token_for_client(resource)
if "token_type" in response:
logging.info('Access token obtained successfully.')
return response['access_token']
else:
logging.error('Error obtaining access token')
logging.error(response['error'] + ': ' + response['error_description'])
# Query Azure REST API
def rest_api_request(url, token, query_params=None):
try:
# Create authorization header
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
# Issue request to Azure API
logging.info(f"Issuing request to {url}")
response = requests.get(
headers=headers,
url=url,
params=query_params
)
# Validate and process response
if response.status_code == 200:
return json.loads(response.text)
else:
logging.error('Error encountered querying Azure API')
logging.error(
f"Error code was: {(json.loads(response.text))['code']}")
logging.error(
f"Error message was: {(json.loads(response.text))['message']}")
raise Exception
except Exception:
return json.loads(response.text)
def main():
# Create logging mechanism
create_logger()
# Obtain an access token to Azure REST API
token = get_token(
resource="https://management.core.windows.net//.default"
)
# Create date/time stamps and filter
todaydate = (datetime.datetime.now() +
datetime.timedelta(days=int(2))).strftime("%Y-%m-%d")
startdate = (datetime.datetime.today() -
datetime.timedelta(days=int(os.getenv('DAYS')))).strftime("%Y-%m-%d")
filter = "eventTimestamp ge " + startdate + " and eventTimestamp le " + \
todaydate + " and eventChannels eq 'Admin,Operation'" #and resourceProvider eq 'Microsoft.Authorization'"
# Get first set of tenant activity logs and write to a file
response = rest_api_request(
url="https://management.azure.com/providers/Microsoft.Insights/eventtypes/management/values",
token=token,
query_params={
'api-version': '2015-04-01',
'$filter': filter
}
)
# Create a new file and get it formatted for an array of json objects
logging.info('Creating output file...')
try:
with open('logs.json', 'w') as log_file:
log_file.write('[')
except Exception:
logging.error('Output file could not be created. Error: ', exc_info=True)
# Iterate through each returned log and write it the file
logging.info('Adding entries to output file...')
try:
with open('logs.json', 'a') as log_file:
for log_entry in response['value']:
log_file.write(json.dumps(log_entry) + ',')
except Exception:
logging.error('Unable to append to log file. Error: ', exc_info=True)
# If paged results are returned, retreive them and write to a file
while 'nextLink' in response:
logging.info(
f"Paged results returned. Retrieving from {response['nextLink']}")
response = rest_api_request(
url=response['nextLink'],
token=token,
query_params={
}
)
try:
with open('logs.json', 'a') as log_file:
for log_entry in response['value']:
log_file.write(json.dumps(log_entry) + ',')
except Exception:
logging.error('Unable to append to output file. Error: ', exc_info=True)
# Remove the trailing comma from the file
try:
logging.info('Formatting output file...')
with open('logs.json', 'rb+') as log_file:
log_file.seek(-1, os.SEEK_END)
log_file.truncate()
# Close out the array
with open('logs.json', 'a') as log_file:
log_file.write(']')
logging.info('Output file created successfully.')
except Exception:
logging.error('Unable to format output file. Error: ', exc_info=True)
if __name__ == "__main__":
main()
| 2.578125
| 3
|
cnn_train.py
|
sg-nm/Operation-wise-attention-network
| 102
|
12775864
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from skimage.measure import compare_psnr as ski_psnr
from skimage.measure import compare_ssim as ski_ssim
import os
import csv
import logging
from model import Network
import torch.nn.functional as F
from data_load_own import get_training_set, get_test_set
from data_load_mix import get_dataset_deform
import utils
class CNN_train():
def __init__(self, dataset_name, imgSize=63, batchsize=32):
self.imgSize = imgSize
self.batchsize = batchsize
self.dataset_name = dataset_name
# load dataset
if dataset_name == 'mix' or dataset_name == 'yourdata':
if dataset_name == 'mix':
self.num_work = 8
train_dir = '/dataset/train/'
val_dir = '/dataset/val/'
test_dir = '/dataset/test/'
train_set = get_dataset_deform(train_dir, val_dir, test_dir, 0)
val_set = get_dataset_deform(train_dir, val_dir, test_dir, 1)
# test_set = get_dataset_deform(train_dir, val_dir, test_dir, 2)
self.dataloader = DataLoader(dataset=train_set, num_workers=self.num_work, batch_size=self.batchsize, shuffle=True, pin_memory=True)
self.val_loader = DataLoader(dataset=val_set, num_workers=self.num_work, batch_size=1, shuffle=False, pin_memory=False)
# self.test_dataloader = DataLoader(dataset=test_set, num_workers=self.num_work, batch_size=1, shuffle=False, pin_memory=False)
elif dataset_name == 'yourdata':
self.num_work = 8
# Specify the path of your data
train_input_dir = '/dataset/yourdata_train/input/'
train_target_dir = '/dataset/yourdata_train/target/'
test_input_dir = '/dataset/yourdata_test/input/'
test_target_dir = '/dataset/yourdata_test/target/'
train_set = get_training_set(train_input_dir, train_target_dir, True)
test_set = get_training_set(test_input_dir, test_target_dir, False)
self.dataloader = DataLoader(dataset=train_set, num_workers=self.num_work, batch_size=self.batchsize, shuffle=True, drop_last=True)
self.test_dataloader = DataLoader(dataset=test_set, num_workers=self.num_work, batch_size=1, shuffle=False)
else:
print('\tInvalid input dataset name at CNN_train()')
exit(1)
def __call__(self, cgp, gpuID, epoch_num=150, gpu_num=1):
print('GPUID :', gpuID)
print('epoch_num:', epoch_num)
# define model
torch.manual_seed(2018)
torch.cuda.manual_seed(2018)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
L1_loss = nn.L1Loss()
L1_loss = L1_loss.cuda(gpuID)
model = Network(16, 10, L1_loss, gpuID=gpuID)
if gpu_num > 1:
device_ids = [i for i in range(gpu_num)]
model = torch.nn.DataParallel(model, device_ids=device_ids)
model = model.cuda(gpuID)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
print('Param:', utils.count_parameters_in_MB(model))
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epoch_num)
test_interval = 5
# for output images
if not os.path.exists('./results'):
os.makedirs('./results/Inputs')
os.makedirs('./results/Outputs')
os.makedirs('./results/Targets')
# Train loop
for epoch in range(1, epoch_num+1):
scheduler.step()
start_time = time.time()
print('epoch', epoch)
train_loss = 0
for module in model.children():
module.train(True)
for ite, (input, target) in enumerate(self.dataloader):
lr_patch = Variable(input, requires_grad=False).cuda(gpuID)
hr_patch = Variable(target, requires_grad=False).cuda(gpuID)
optimizer.zero_grad()
output = model(lr_patch)
l1_loss = L1_loss(output, hr_patch)
l1_loss.backward()
optimizer.step()
train_loss += l1_loss.item()
if ite % 500 == 0:
vutils.save_image(lr_patch.data, './input_sample%d.png' % gpuID, normalize=False)
vutils.save_image(hr_patch.data, './target_sample%d.png' % gpuID, normalize=False)
vutils.save_image(output.data, './output_sample%d.png' % gpuID, normalize=False)
print('Train set : Average loss: {:.4f}'.format(train_loss))
print('time ', time.time()-start_time)
# check val/test performance
if epoch % test_interval == 0:
with torch.no_grad():
print('------------------------')
for module in model.children():
module.train(False)
test_psnr = 0
test_ssim = 0
eps = 1e-10
test_ite = 0
for _, (input, target) in enumerate(self.val_loader):
lr_patch = Variable(input, requires_grad=False).cuda(gpuID)
hr_patch = Variable(target, requires_grad=False).cuda(gpuID)
output = model(lr_patch)
# save images
vutils.save_image(output.data, './results/Outputs/%05d.png' % (int(i)), padding=0, normalize=False)
vutils.save_image(lr_patch.data, './results/Inputs/%05d.png' % (int(i)), padding=0, normalize=False)
vutils.save_image(hr_patch.data, './results/Targets/%05d.png' % (int(i)), padding=0, normalize=False)
# Calculation of SSIM and PSNR values
output = output.data.cpu().numpy()[0]
output[output>1] = 1
output[output<0] = 0
output = output.transpose((1,2,0))
hr_patch = hr_patch.data.cpu().numpy()[0]
hr_patch[hr_patch>1] = 1
hr_patch[hr_patch<0] = 0
hr_patch = hr_patch.transpose((1,2,0))
# SSIM
test_ssim+= ski_ssim(output, hr_patch, data_range=1, multichannel=True)
# PSNR
imdf = (output - hr_patch) ** 2
mse = np.mean(imdf) + eps
test_psnr+= 10 * math.log10(1.0/mse)
test_ite += 1
test_psnr /= (test_ite)
test_ssim /= (test_ite)
print('Valid PSNR: {:.4f}'.format(test_psnr))
print('Valid SSIM: {:.4f}'.format(test_ssim))
f = open('PSNR.txt', 'a')
writer = csv.writer(f, lineterminator='\n')
writer.writerow([epoch, test_psnr, test_ssim])
f.close()
print('------------------------')
torch.save(model.state_dict(), './model_%d.pth' % int(epoch))
return train_loss
| 2.3125
| 2
|
smc-monitoring/smc_monitoring/wsocket.py
|
gabstopper/smc-python
| 30
|
12775865
|
<reponame>gabstopper/smc-python
import os
import ssl
import json
import select
import logging
import threading
from pprint import pformat
from smc import session
import websocket
logger = logging.getLogger(__name__)
def websocket_debug():
websocket.enableTrace(True)
class FetchAborted(Exception):
pass
class InvalidFetch(Exception):
pass
class SessionNotFound(Exception):
pass
def _get_ca_bundle():
"""
If verify=True, then requests is using the built in
certifi CA database. Attempt to get that path for
the websocket.
"""
try:
import certifi
return certifi.where()
except ImportError:
pass
class SMCSocketProtocol(websocket.WebSocket):
"""
SMCSocketProtocol manages the web socket connection between this
client and the SMC. It provides the interface to monitor the query
results and yield them back to the caller as a context manager.
"""
def __init__(self, query, sock_timeout=3, **kw):
"""
Initialize the web socket.
:param Query query: Query type from `smc_monitoring.monitors`
:param int sock_timeout: length of time to wait on a select call
before trying to receive data. For LogQueries, this should be
short, i.e. 1 second. For other queries the default is 3 sec.
:param int max_recv: for queries that are not 'live', set
this to supply a max number of receive iterations.
:param kw: supported keyword args:
cert_reqs: ssl.CERT_NONE|ssl.CERT_REQUIRED|ssl.CERT_OPTIONAL
check_hostname: True|False
enable_multithread: True|False (Default: True)
.. note:: The keyword args are not required unless you want to override
default settings. If SSL is used for the SMC session, the settings
for verifying the server with the root CA is based on whether the
'verify' setting has been provided with a path to the root CA file.
"""
if not session.session or not session.session.cookies:
raise SessionNotFound('No SMC session found. You must first '
'obtain an SMC session through session.login before making '
'a web socket connection.')
sslopt = {}
if session.is_ssl:
# SSL verification is based on the session settings since the
# session must be made before calling this class. If verify=True,
# try to get the CA bundle from certifi if the package exists
# Set check_hostname to False because python ssl doesn't appear
# to validate the subjectAltName properly, however requests does
# and would have already validated this when the session was set
# up. This can still be overridden by setting check_hostname=True.
sslopt.update(
cert_reqs=ssl.CERT_NONE,
check_hostname=False)
certfile = session.session.verify
if certfile:
if isinstance(certfile, bool): # verify=True
certfile = _get_ca_bundle()
if certfile is None:
certfile = ''
sslopt.update(
cert_reqs=kw.pop('cert_reqs', ssl.CERT_REQUIRED),
check_hostname=kw.pop('check_hostname', False))
if sslopt.get('cert_reqs') != ssl.CERT_NONE:
os.environ['WEBSOCKET_CLIENT_CA_BUNDLE'] = certfile
# Enable multithread locking
if 'enable_multithread' not in kw:
kw.update(enable_multithread=True)
# Max number of receives, configurable for batching
self.max_recv = kw.pop('max_recv', 0)
super(SMCSocketProtocol, self).__init__(sslopt=sslopt, **kw)
self.query = query
self.fetch_id = None
# Inner thread used to keep socket select alive
self.thread = None
self.event = threading.Event()
self.sock_timeout = sock_timeout
def __enter__(self):
self.connect(
url=session.web_socket_url + self.query.location,
cookie=session.session_id)
if self.connected:
self.settimeout(self.sock_timeout)
self.on_open()
return self
def __exit__(self, exctype, value, traceback):
if exctype in (SystemExit, GeneratorExit):
return False
elif exctype in (InvalidFetch,):
raise FetchAborted(value)
return True
def on_open(self):
"""
Once the connection is made, start the query off and
start an event loop to wait for a signal to
stop. Results are yielded within receive().
"""
def event_loop():
logger.debug(pformat(self.query.request))
self.send(json.dumps(self.query.request))
while not self.event.is_set():
#print('Waiting around on the socket: %s' % self.gettimeout())
self.event.wait(self.gettimeout())
logger.debug('Event loop terminating.')
self.thread = threading.Thread(
target=event_loop)
self.thread.setDaemon(True)
self.thread.start()
def send_message(self, message):
"""
Send a message down the socket. The message is expected
to have a `request` attribute that holds the message to
be serialized and sent.
"""
if self.connected:
self.send(
json.dumps(message.request))
def abort(self):
"""
Abort the connection
"""
logger.info("Abort called, cleaning up.")
raise FetchAborted
def receive(self):
"""
Generator yielding results from the web socket. Results
will come as they are received. Even though socket select
has a timeout, the SMC will not reply with a message more
than every two minutes.
"""
try:
itr = 0
while self.connected:
r, w, e = select.select(
(self.sock, ), (), (), 10.0)
if r:
message = json.loads(self.recv())
if 'fetch' in message:
self.fetch_id = message['fetch']
if 'failure' in message:
raise InvalidFetch(message['failure'])
if 'records' in message:
if 'added' in message['records']:
num = len(message['records']['added'])
else:
num = len(message['records'])
logger.debug('Query returned %s records.', num)
if self.max_recv:
itr += 1
if 'end' in message:
logger.debug('Received end message: %s' % message['end'])
yield message
break
yield message
if self.max_recv and self.max_recv <= itr:
break
except (Exception, KeyboardInterrupt, SystemExit, FetchAborted) as e:
logger.info('Caught exception in receive: %s -> %s', type(e), str(e))
if isinstance(e, (SystemExit, InvalidFetch)):
# propagate SystemExit, InvalidFetch
raise
finally:
if self.connected:
if self.fetch_id:
self.send(json.dumps({'abort': self.fetch_id}))
self.close()
if self.thread:
self.event.set()
while self.thread.isAlive():
self.event.wait(1)
logger.info('Closed web socket connection normally.')
| 2.5
| 2
|
IssueTypes.py
|
ckelleyRH/3age
| 0
|
12775866
|
from enum import Enum
class IssueTypes(Enum):
REGRESSIONS = "REGRESSIONS"
NEW = "NEW"
OLD = "OLD"
| 2.296875
| 2
|
sampler/Sampler.py
|
epistoteles/unlearning-fairness
| 0
|
12775867
|
<filename>sampler/Sampler.py
from statistics import harmonic_mean
from scipy.interpolate import CubicSpline
import random
class Sampler:
def __init__(self, strategy=None):
if strategy is None:
strategy = ['age', 'gender', 'race']
if not set(strategy).issubset({'age', 'gender', 'race'}):
raise ValueError('Strategy can only be age, gender and/or race or empty list (=random)')
# approximated from Special Eurobarometer 487a
# 0 = 0-2, 1 = 3-9, 2 = 10-20, 3 = 21-27, 4 = 28-45, 5 = 46-65, 6 = 66-120
x = [0, 10, 20, 25, 33, 40, 48, 53, 62, 80, 90, 100, 120]
y = [0.3, 0.32, 0.36, 0.4, 0.45, 0.46, 0.43, 0.36, 0.29, 0.23, 0.21, 0.2, 0.18]
cs = CubicSpline(x, y)
keys = list(range(0, 121))
values = [float(cs(x)) for x in keys]
age_probabilities = dict(zip(keys, values)) # {0: 0.3, 1: 0.302, ..., 120: 0.18}
# directly taken from Special Eurobarometer 487a
# 0 = male, 1 = female
gender_probabilities = {0: 0.38, 1: 0.34}
# roughly estimated with SAT score distribution over races
# 0 = white, 1 = black, 2 = asian, 3 = indian, 4 = others
race_probabilities = {0: 0.4, 1: 0.3, 2: 0.43, 3: 0.43, 4: 0.32}
self.probabilities = {'age': age_probabilities,
'gender': gender_probabilities,
'race': race_probabilities}
self.strategy = strategy
def get_gdpr_knowledge(self, face):
probabilities = [self.probabilities[feature][face.__dict__[feature]] for feature in self.strategy]
return harmonic_mean(probabilities) if probabilities else 0.36
def changed_privacy_settings(self, face):
knowledge = self.get_gdpr_knowledge(face)
# around 23.7% of users have ever changed their privacy settings (0.36 * 0.66 = 0.237)
return random.random() < (knowledge * 0.66)
def sample_unlearning_request(self, face):
knowledge = self.get_gdpr_knowledge(face)
# 0.1 percent of people who know the GDPR will make a request
# for 23700 people, this means an average of 8.532 unlearning requests (23700 * 0.36 * 0.001 = 8.532)
return random.random() < (knowledge * 0.001)
| 3.03125
| 3
|
app/__init__.py
|
rockyCheung/pursoul
| 2
|
12775868
|
<gh_stars>1-10
#coding:utf-8
from flask import Flask, request, redirect
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_migrate import Migrate
from datetime import datetime
from config import config, Config
bootstrap = Bootstrap()
db = SQLAlchemy()
migrate = Migrate()
login_manager = LoginManager()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
bootstrap.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
login_manager.init_app(app)
# home 蓝图
from .home import home as home_blueprint
app.register_blueprint(home_blueprint)
# user蓝图
from .user import user as user_blueprint
app.register_blueprint(user_blueprint, url_prefix="/user")
#文章
from .article import article as article_blueprint
app.register_blueprint(article_blueprint, url_prefix="/article")
# setting 蓝图
from .setting import setting as setting_blueprint
app.register_blueprint(setting_blueprint, url_prefix="/setting")
login_manager.login_view = "home.login"
login_manager.login_message = "请先登录!!!"
@app.template_filter("omit")
def omit(data, length):
if len(data) > length:
return data[:length-3] + '...'
return data
@app.template_filter("friendly_time")
def friendly_time(date):
delta = datetime.now() - date
if delta.days >= 365:
return u'%d年前' % (delta.days / 365)
elif delta.days >= 30:
return u'%d个月前' % (delta.days / 30)
elif delta.days > 0:
return u'%d天前' % delta.days
elif delta.seconds < 60:
return u"%d秒前" % delta.seconds
elif delta.seconds < 60 * 60:
return u"%d分钟前" % (delta.seconds / 60)
else:
return u"%d小时前" % (delta.seconds / 60 / 60)
return app
| 2.5
| 2
|
stringFunctions.py
|
marcos8896/Python-Crash-Course-For-Beginners
| 0
|
12775869
|
<filename>stringFunctions.py
#String functions
myStr = 'Hello world!'
#Capitalize
print(myStr.capitalize())
#Swap case
print(myStr.swapcase())
#Get length
print(len(myStr))
#Replace
print(myStr.replace('world', 'everyone'))
#Count
sub = 'l'
print(myStr.count(sub))
#Startswith
print(myStr.startswith('Hello'))
#Endswith
print(myStr.endswith('Hello'))
#Split to list
print(myStr.split())
#Find
print(myStr.find('world'))
#Index
print(myStr.index('world'))
#Is all alphanumeric?
print(myStr.isalnum())
#Is all alphabetic?
print(myStr.isalpha())
#Is all numeric?
print(myStr.isnumeric())
| 3.828125
| 4
|
cogrun.py
|
dribnet/ResearchPortfolioCode
| 0
|
12775870
|
<filename>cogrun.py<gh_stars>0
# Prediction interface for Cog ⚙️
# Reference: https://github.com/replicate/cog/blob/main/docs/python.md
import os
import cog
import pathlib
from pathlib import Path
from explorer import do_setup, perform_analysis, prepare_folder
# https://stackoverflow.com/a/6587648/1010653
import tempfile, shutil
def create_temporary_copy(src_path):
_, tf_suffix = os.path.splitext(src_path)
temp_dir = tempfile.gettempdir()
temp_path = os.path.join(temp_dir, f"tempfile{tf_suffix}")
shutil.copy2(src_path, temp_path)
return temp_path
class Explorer(cog.Predictor):
def setup(self):
"""Load the model into memory to make running multiple predictions efficient"""
do_setup()
self.photos_path = Path('twem/')
self.features_path, self.analysis_path = prepare_folder(self.photos_path)
@cog.input("query", type=str, help="text to input")
def predict(self, query):
"""Run a single prediction on the model"""
numberResults = 5
newAnalysisPath = perform_analysis(query, numberResults, self.photos_path, self.features_path, self.analysis_path, show_result=0)
temp_copy = create_temporary_copy(f"{newAnalysisPath}/analysisResults.jpg")
return pathlib.Path(os.path.realpath(temp_copy))
| 2.609375
| 3
|
rsHRF/rsHRF_GUI/datatypes/timeseries/bold_preprocessed.py
|
BIDS-Apps/rsHRF
| 16
|
12775871
|
<reponame>BIDS-Apps/rsHRF
import numpy as np
from scipy.io import savemat
from copy import deepcopy
from ...datatypes.misc.parameters import Parameters
from .timeseries import TimeSeries
from .bold_raw import BOLD_Raw
class BOLD_Preprocessed(TimeSeries):
"""
This stores the Preprocessed BOLD Time-series
Attributes:
1. BOLD_Raw : The Raw BOLD time-series object through which it was derived
2. mask_file : The mask-file path
"""
def __init__(self, label="",ts=np.array([]),subject_index="", para=Parameters()):
TimeSeries.__init__(self, label="",ts=np.array([]),subject_index="", para=Parameters())
self.label = label
self.subject_index = subject_index
self.timeseries = ts
self.shape = ts.shape
self.parameters = deepcopy(para)
self.BOLD_Raw = BOLD_Raw()
self.mask_file = ""
# setters
def set_maskfile(self, mask_file):
self.mask_file = mask_file
def set_BOLD_Raw(self, BOLD_Raw):
self.BOLD_Raw = BOLD_Raw
# getters
def get_maskfile(self):
return self.mask_file
def get_BOLD_Raw(self):
return self.BOLD_Raw
# misc.
def compareTimeSeries(self, ts):
""" Compares another time-series with itself to determine if both are identical
Four checks are performed:
1. Label
2. Parameters
3. Raw BOLD associated with it
4. Mask-file
If all the three comparisions return true, then both the HRF
time-series objects are identical
"""
if self.label == ts.get_label() \
and self.parameters == ts.get_parameters() \
and self.BOLD_Raw.compareTimeSeries(ts.get_BOLD_Raw()) \
and ts.get_maskfile() == self.mask_file:
return True
else:
return False
def save_info(self, name):
""" Saves the information about the time-series in a .mat file """
try:
dic = {}
dic["timeseries"] = self.timeseries
dic["mask_file"] = self.mask_file
savemat(name, dic)
return True
except:
return False
| 2.390625
| 2
|
magmap/atlas/edge_seg.py
|
kaparna126/magellanmapper
| 0
|
12775872
|
<gh_stars>0
# Segmentation based on edge detection
# Author: <NAME>, 2019
"""Re-segment atlases based on edge detections.
"""
import os
from time import time
import SimpleITK as sitk
import numpy as np
from magmap.atlas import atlas_refiner
from magmap.settings import config
from magmap.cv import chunking, cv_nd, segmenter
from magmap.settings import profiles
from magmap.io import df_io, libmag, sitk_io
from magmap.stats import vols
def _mirror_imported_labels(labels_img_np, start, mirror_mult, axis):
# mirror labels that have been imported and transformed may have had
# axes swapped, requiring them to be swapped back
labels_img_np = atlas_refiner.mirror_planes(
np.swapaxes(labels_img_np, 0, axis), start, mirror_mult=mirror_mult,
check_equality=True)
labels_img_np = np.swapaxes(labels_img_np, 0, axis)
return labels_img_np
def _is_profile_mirrored():
# check if profile is set for mirroring, though does not necessarily
# mean that the image itself is mirrored; allows checking for
# simplification by operating on one half and mirroring to the other
mirror = config.atlas_profile["labels_mirror"]
return (mirror and mirror[profiles.RegKeys.ACTIVE]
and mirror["start"] is not None)
def _get_mirror_mult():
# get the mirrored labels multiplier, which is -1 if set to neg labels
# and 1 if otherwise
mirror = config.atlas_profile["labels_mirror"]
mirror_mult = -1 if mirror and mirror["neg_labels"] else 1
return mirror_mult
def make_edge_images(path_img, show=True, atlas=True, suffix=None,
path_atlas_dir=None):
"""Make edge-detected atlas and associated labels images.
The atlas is assumed to be a sample (eg microscopy) image on which
an edge-detection filter will be applied. The labels image is
assumed to be an annotated image whose edges will be found by
obtaining the borders of all separate labels.
Args:
path_img: Path to the image atlas. The labels image will be
found as a corresponding, registered image, unless
``path_atlas_dir`` is given.
show (bool): True if the output images should be displayed; defaults
to True.
atlas: True if the primary image is an atlas, which is assumed
to be symmetrical. False if the image is an experimental/sample
image, in which case erosion will be performed on the full
images, and stats will not be performed.
suffix: Modifier to append to end of ``path_img`` basename for
registered image files that were output to a modified name;
defaults to None.
path_atlas_dir: Path to atlas directory to use labels from that
directory rather than from labels image registered to
``path_img``, such as when the sample image is registered
to an atlas rather than the other way around. Typically
coupled with ``suffix`` to compare same sample against
different labels. Defaults to None.
"""
# load intensity image from which to detect edges
atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS]
if not atlas_suffix:
if atlas:
# atlases default to using the atlas volume image
print("generating edge images for atlas")
atlas_suffix = config.RegNames.IMG_ATLAS.value
else:
# otherwise, use the experimental image
print("generating edge images for experiment/sample image")
atlas_suffix = config.RegNames.IMG_EXP.value
# adjust image path with suffix
mod_path = path_img
if suffix is not None:
mod_path = libmag.insert_before_ext(mod_path, suffix)
labels_from_atlas_dir = path_atlas_dir and os.path.isdir(path_atlas_dir)
if labels_from_atlas_dir:
# load labels from atlas directory
# TODO: consider applying suffix to labels dir
path_atlas = path_img
path_labels = os.path.join(
path_atlas_dir, config.RegNames.IMG_LABELS.value)
print("loading labels from", path_labels)
labels_sitk = sitk.ReadImage(path_labels)
else:
# load labels registered to sample image
path_atlas = mod_path
labels_sitk = sitk_io.load_registered_img(
mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
labels_img_np = sitk.GetArrayFromImage(labels_sitk)
# load atlas image, set resolution from it
atlas_sitk = sitk_io.load_registered_img(
path_atlas, atlas_suffix, get_sitk=True)
config.resolutions = np.array([atlas_sitk.GetSpacing()[::-1]])
atlas_np = sitk.GetArrayFromImage(atlas_sitk)
# output images
atlas_sitk_log = None
atlas_sitk_edge = None
labels_sitk_interior = None
log_sigma = config.atlas_profile["log_sigma"]
if log_sigma is not None and suffix is None:
# generate LoG and edge-detected images for original image
print("generating LoG edge-detected images with sigma", log_sigma)
thresh = (config.atlas_profile["atlas_threshold"]
if config.atlas_profile["log_atlas_thresh"] else None)
atlas_log = cv_nd.laplacian_of_gaussian_img(
atlas_np, sigma=log_sigma, labels_img=labels_img_np, thresh=thresh)
atlas_sitk_log = sitk_io.replace_sitk_with_numpy(atlas_sitk, atlas_log)
atlas_edge = cv_nd.zero_crossing(atlas_log, 1).astype(np.uint8)
atlas_sitk_edge = sitk_io.replace_sitk_with_numpy(
atlas_sitk, atlas_edge)
else:
# if sigma not set or if using suffix to compare two images,
# load from original image to compare against common image
atlas_edge = sitk_io.load_registered_img(
path_img, config.RegNames.IMG_ATLAS_EDGE.value)
erode = config.atlas_profile["erode_labels"]
if erode["interior"]:
# make map of label interiors for interior/border comparisons
print("Eroding labels to generate interior labels image")
erosion = config.atlas_profile[
profiles.RegKeys.EDGE_AWARE_REANNOTATION]
erosion_frac = config.atlas_profile["erosion_frac"]
interior, _ = erode_labels(
labels_img_np, erosion, erosion_frac,
atlas and _is_profile_mirrored(), _get_mirror_mult())
labels_sitk_interior = sitk_io.replace_sitk_with_numpy(
labels_sitk, interior)
# make labels edge and edge distance images
dist_to_orig, labels_edge = edge_distances(
labels_img_np, atlas_edge, spacing=atlas_sitk.GetSpacing()[::-1])
dist_sitk = sitk_io.replace_sitk_with_numpy(atlas_sitk, dist_to_orig)
labels_sitk_edge = sitk_io.replace_sitk_with_numpy(labels_sitk, labels_edge)
# show all images
imgs_write = {
config.RegNames.IMG_ATLAS_LOG.value: atlas_sitk_log,
config.RegNames.IMG_ATLAS_EDGE.value: atlas_sitk_edge,
config.RegNames.IMG_LABELS_EDGE.value: labels_sitk_edge,
config.RegNames.IMG_LABELS_INTERIOR.value: labels_sitk_interior,
config.RegNames.IMG_LABELS_DIST.value: dist_sitk,
}
if show:
for img in imgs_write.values():
if img: sitk.Show(img)
# write images to same directory as atlas with appropriate suffix
sitk_io.write_reg_images(imgs_write, mod_path)
def erode_labels(labels_img_np, erosion, erosion_frac=None, mirrored=True,
mirror_mult=-1):
"""Erode labels image for use as markers or a map of the interior.
Args:
labels_img_np (:obj:`np.ndarray`): Numpy image array of labels in
z,y,x format.
erosion (dict): Dictionary of erosion filter settings from
:class:`profiles.RegKeys` to pass to
:meth:`segmenter.labels_to_markers_erosion`.
erosion_frac (int): Target erosion fraction; defaults to None.
mirrored (bool): True if the primary image mirrored/symmatrical, in
which case erosion will only be performed one symmetric half
and mirrored to the other half. If False or no symmetry is
found, such as unmirrored atlases or experimental/sample
images, erosion will be performed on the full image.
mirror_mult (int): Multiplier for mirrored labels; defaults to -1
to make mirrored labels the inverse of their source labels.
Returns:
:obj:`np.ndarray`, :obj:`pd.DataFrame`: The eroded labels as a new
array of same shape as that of ``labels_img_np`` and a data frame
of erosion stats.
"""
labels_to_erode = labels_img_np
sym_axis = atlas_refiner.find_symmetric_axis(labels_img_np, mirror_mult)
is_mirrored = mirrored and sym_axis >= 0
len_half = None
if is_mirrored:
# if symmetric, erode only one symmetric half
len_half = labels_img_np.shape[sym_axis] // 2
slices = [slice(None)] * labels_img_np.ndim
slices[sym_axis] = slice(len_half)
labels_to_erode = labels_img_np[tuple(slices)]
# convert labels image into markers
#eroded = segmenter.labels_to_markers_blob(labels_img_np)
eroded, df = segmenter.labels_to_markers_erosion(
labels_to_erode, erosion[profiles.RegKeys.MARKER_EROSION],
erosion_frac, erosion[profiles.RegKeys.MARKER_EROSION_MIN])
if is_mirrored:
# mirror changes onto opposite symmetric half
eroded = _mirror_imported_labels(
eroded, len_half, mirror_mult, sym_axis)
return eroded, df
def edge_aware_segmentation(path_atlas, show=True, atlas=True, suffix=None,
exclude_labels=None, mirror_mult=-1):
"""Segment an atlas using its previously generated edge map.
Labels may not match their own underlying atlas image well,
particularly in the orthogonal directions in which the labels
were not constructed. To improve alignment between the labels
and the atlas itself, register the labels to an automated, roughly
segmented version of the atlas. The goal is to improve the
labels' alignment so that the atlas/labels combination can be
used for another form of automated segmentation by registering
them to experimental brains via :func:``register``.
Edge files are assumed to have been generated by
:func:``make_edge_images``.
Args:
path_atlas (str): Path to the fixed file, typically the atlas file
with stained sections. The corresponding edge and labels
files will be loaded based on this path.
show (bool): True if the output images should be displayed; defaults
to True.
atlas (bool): True if the primary image is an atlas, which is assumed
to be symmetrical. False if the image is an experimental/sample
image, in which case segmentation will be performed on the full
images, and stats will not be performed.
suffix (str): Modifier to append to end of ``path_atlas`` basename for
registered image files that were output to a modified name;
defaults to None. If ``atlas`` is True, ``suffix`` will only
be applied to saved files, with files still loaded based on the
original path.
exclude_labels (List[int]): Sequence of labels to exclude from the
segmentation; defaults to None.
mirror_mult (int): Multiplier for mirrored labels; defaults to -1
to make mirrored labels the inverse of their source labels.
"""
# adjust image path with suffix
load_path = path_atlas
mod_path = path_atlas
if suffix is not None:
mod_path = libmag.insert_before_ext(mod_path, suffix)
if atlas: load_path = mod_path
# load corresponding files via SimpleITK
atlas_sitk = sitk_io.load_registered_img(
load_path, config.RegNames.IMG_ATLAS.value, get_sitk=True)
atlas_sitk_edge = sitk_io.load_registered_img(
load_path, config.RegNames.IMG_ATLAS_EDGE.value, get_sitk=True)
labels_sitk = sitk_io.load_registered_img(
load_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
labels_sitk_markers = sitk_io.load_registered_img(
load_path, config.RegNames.IMG_LABELS_MARKERS.value, get_sitk=True)
# get Numpy arrays of images
atlas_img_np = sitk.GetArrayFromImage(atlas_sitk)
atlas_edge = sitk.GetArrayFromImage(atlas_sitk_edge)
labels_img_np = sitk.GetArrayFromImage(labels_sitk)
markers = sitk.GetArrayFromImage(labels_sitk_markers)
# segment image from markers
sym_axis = atlas_refiner.find_symmetric_axis(atlas_img_np)
mirrorred = atlas and sym_axis >= 0
len_half = None
seg_args = {"exclude_labels": exclude_labels}
edge_prof = config.atlas_profile[profiles.RegKeys.EDGE_AWARE_REANNOTATION]
if edge_prof:
edge_filt = edge_prof[profiles.RegKeys.WATERSHED_MASK_FILTER]
if edge_filt and len(edge_filt) > 1:
# watershed mask filter settings from atlas profile
seg_args["mask_filt"] = edge_filt[0]
seg_args["mask_filt_size"] = edge_filt[1]
if mirrorred:
# segment only half of image, assuming symmetry
len_half = atlas_img_np.shape[sym_axis] // 2
slices = [slice(None)] * labels_img_np.ndim
slices[sym_axis] = slice(len_half)
sl = tuple(slices)
labels_seg = segmenter.segment_from_labels(
atlas_edge[sl], markers[sl], labels_img_np[sl], **seg_args)
else:
# segment the full image, including excluded labels on the opposite side
exclude_labels = exclude_labels.tolist().extend(
(mirror_mult * exclude_labels).tolist())
seg_args["exclude_labels"] = exclude_labels
labels_seg = segmenter.segment_from_labels(
atlas_edge, markers, labels_img_np, **seg_args)
smoothing = config.atlas_profile["smooth"]
if smoothing is not None:
# smoothing by opening operation based on profile setting
atlas_refiner.smooth_labels(
labels_seg, smoothing, config.SmoothingModes.opening)
if mirrorred:
# mirror back to other half
labels_seg = _mirror_imported_labels(
labels_seg, len_half, mirror_mult, sym_axis)
# expand background to smoothed background of original labels to
# roughly match background while still allowing holes to be filled
crop = config.atlas_profile["crop_to_orig"]
atlas_refiner.crop_to_orig(
labels_img_np, labels_seg, crop)
if labels_seg.dtype != labels_img_np.dtype:
# watershed may give different output type, so cast back if so
labels_seg = labels_seg.astype(labels_img_np.dtype)
labels_sitk_seg = sitk_io.replace_sitk_with_numpy(labels_sitk, labels_seg)
# show DSCs for labels
print("\nMeasuring overlap of atlas and combined watershed labels:")
atlas_refiner.measure_overlap_combined_labels(atlas_sitk, labels_sitk_seg)
print("Measuring overlap of individual original and watershed labels:")
atlas_refiner.measure_overlap_labels(labels_sitk, labels_sitk_seg)
print("\nMeasuring overlap of combined original and watershed labels:")
atlas_refiner.measure_overlap_labels(
atlas_refiner.make_labels_fg(labels_sitk),
atlas_refiner.make_labels_fg(labels_sitk_seg))
print()
# show and write image to same directory as atlas with appropriate suffix
sitk_io.write_reg_images(
{config.RegNames.IMG_LABELS.value: labels_sitk_seg}, mod_path)
if show: sitk.Show(labels_sitk_seg)
return path_atlas
def merge_atlas_segmentations(img_paths, show=True, atlas=True, suffix=None):
"""Merge atlas segmentations for a list of files as a multiprocessing
wrapper for :func:``merge_atlas_segmentations``, after which
edge image post-processing is performed separately since it
contains tasks also performed in multiprocessing.
Args:
img_paths (List[str]): Sequence of image paths to load.
show (bool): True if the output images should be displayed; defaults
to True.
atlas (bool): True if the image is an atlas; defaults to True.
suffix (str): Modifier to append to end of ``img_path`` basename for
registered image files that were output to a modified name;
defaults to None.
"""
start_time = time()
# erode all labels images into markers for watershed; not multiprocessed
# since erosion is itself multiprocessed
erode = config.atlas_profile["erode_labels"]
erosion = config.atlas_profile[profiles.RegKeys.EDGE_AWARE_REANNOTATION]
erosion_frac = config.atlas_profile["erosion_frac"]
mirrored = atlas and _is_profile_mirrored()
mirror_mult = _get_mirror_mult()
dfs_eros = []
for img_path in img_paths:
mod_path = img_path
if suffix is not None:
mod_path = libmag.insert_before_ext(mod_path, suffix)
labels_sitk = sitk_io.load_registered_img(
mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
print("Eroding labels to generate markers for atlas segmentation")
df = None
if erode["markers"]:
# use default minimal post-erosion size (not setting erosion frac)
markers, df = erode_labels(
sitk.GetArrayFromImage(labels_sitk), erosion,
mirrored=mirrored, mirror_mult=mirror_mult)
labels_sitk_markers = sitk_io.replace_sitk_with_numpy(
labels_sitk, markers)
sitk_io.write_reg_images(
{config.RegNames.IMG_LABELS_MARKERS.value: labels_sitk_markers},
mod_path)
df_io.data_frames_to_csv(
df, "{}_markers.csv".format(os.path.splitext(mod_path)[0]))
dfs_eros.append(df)
pool = chunking.get_mp_pool()
pool_results = []
for img_path, df in zip(img_paths, dfs_eros):
print("setting up atlas segmentation merge for", img_path)
# convert labels image into markers
exclude = df.loc[
np.isnan(df[config.SmoothingMetrics.FILTER_SIZE.value]),
config.AtlasMetrics.REGION.value]
print("excluding these labels from re-segmentation:\n", exclude)
pool_results.append(pool.apply_async(
edge_aware_segmentation,
args=(img_path, show, atlas, suffix, exclude, mirror_mult)))
for result in pool_results:
# edge distance calculation and labels interior image generation
# are multiprocessed, so run them as post-processing tasks to
# avoid nested multiprocessing
path = result.get()
mod_path = path
if suffix is not None:
mod_path = libmag.insert_before_ext(path, suffix)
# make edge distance images and stats
labels_sitk = sitk_io.load_registered_img(
mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
labels_np = sitk.GetArrayFromImage(labels_sitk)
dist_to_orig, labels_edge = edge_distances(
labels_np, path=path, spacing=labels_sitk.GetSpacing()[::-1])
dist_sitk = sitk_io.replace_sitk_with_numpy(labels_sitk, dist_to_orig)
labels_sitk_edge = sitk_io.replace_sitk_with_numpy(
labels_sitk, labels_edge)
labels_sitk_interior = None
if erode["interior"]:
# make interior images from labels using given targeted
# post-erosion frac
interior, _ = erode_labels(
labels_np, erosion, erosion_frac=erosion_frac,
mirrored=mirrored, mirror_mult=mirror_mult)
labels_sitk_interior = sitk_io.replace_sitk_with_numpy(
labels_sitk, interior)
# write images to same directory as atlas
imgs_write = {
config.RegNames.IMG_LABELS_DIST.value: dist_sitk,
config.RegNames.IMG_LABELS_EDGE.value: labels_sitk_edge,
config.RegNames.IMG_LABELS_INTERIOR.value: labels_sitk_interior,
}
sitk_io.write_reg_images(imgs_write, mod_path)
if show:
for img in imgs_write.values():
if img: sitk.Show(img)
print("finished {}".format(path))
pool.close()
pool.join()
print("time elapsed for merging atlas segmentations:", time() - start_time)
def edge_distances(labels, atlas_edge=None, path=None, spacing=None):
"""Measure the distance between edge images.
Args:
labels: Labels image as Numpy array.
atlas_edge: Image as a Numpy array of the atlas reduced to its edges.
Defaults to None to load from the corresponding registered
file path based on ``path``.
path: Path from which to load ``atlas_edge`` if it is None.
spacing: Grid spacing sequence of same length as number of image
axis dimensions; defaults to None.
Returns:
An image array of the same shape as ``labels_edge`` with
label edge values replaced by corresponding distance values.
"""
if atlas_edge is None:
atlas_edge = sitk_io.load_registered_img(
path, config.RegNames.IMG_ATLAS_EDGE.value)
# create distance map between edges of original and new segmentations
labels_edge = vols.make_labels_edge(labels)
dist_to_orig, _, _ = cv_nd.borders_distance(
atlas_edge != 0, labels_edge != 0, spacing=spacing)
return dist_to_orig, labels_edge
def make_sub_segmented_labels(img_path, suffix=None):
"""Divide each label based on anatomical borders to create a
sub-segmented image.
The segmented labels image will be loaded, or if not available, the
non-segmented labels will be loaded instead.
Args:
img_path: Path to main image from which registered images will
be loaded.
suffix: Modifier to append to end of ``img_path`` basename for
registered image files that were output to a modified name;
defaults to None.
Returns:
Sub-segmented image as a Numpy array of the same shape as
the image at ``img_path``.
"""
# adjust image path with suffix
mod_path = img_path
if suffix is not None:
mod_path = libmag.insert_before_ext(mod_path, suffix)
# load labels
labels_sitk = sitk_io.load_registered_img(
mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
# atlas edge image is associated with original, not modified image
atlas_edge = sitk_io.load_registered_img(
img_path, config.RegNames.IMG_ATLAS_EDGE.value)
# sub-divide the labels and save to file
labels_img_np = sitk.GetArrayFromImage(labels_sitk)
labels_subseg = segmenter.sub_segment_labels(labels_img_np, atlas_edge)
labels_subseg_sitk = sitk_io.replace_sitk_with_numpy(
labels_sitk, labels_subseg)
sitk_io.write_reg_images(
{config.RegNames.IMG_LABELS_SUBSEG.value: labels_subseg_sitk}, mod_path)
return labels_subseg
| 2.234375
| 2
|
teeth_overlord/tests/unit/util.py
|
rackerlabs/teeth-overlord
| 0
|
12775873
|
<reponame>rackerlabs/teeth-overlord
"""
Copyright 2013 Rackspace, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import mock
import unittest
from teeth_overlord import util
class TestIntervalTimer(unittest.TestCase):
@mock.patch('time.sleep')
def test_no_error_no_event(self, mocked_sleep):
t = util.IntervalTimer(1, 10, backoff_factor=5, jitter=.1)
t.wait()
self.assertEqual(t.next_interval, t.base_interval)
mocked_sleep.assert_called_once_with(1)
event = mock.Mock()
t.wait(event=event)
self.assertEqual(t.next_interval, t.base_interval)
event.wait.assert_called_once_with(1)
def test_no_error_with_event(self):
t = util.IntervalTimer(1, 10, backoff_factor=5, jitter=.1)
event = mock.Mock()
t.wait(event=event)
self.assertEqual(t.next_interval, t.base_interval)
event.wait.assert_called_once_with(1)
@mock.patch('random.normalvariate')
def test_error_flow(self, mocked_normalvariate):
t = util.IntervalTimer(1, 10, backoff_factor=5, jitter=.1)
mocked_normalvariate.return_value = 5.5
event = mock.Mock()
t.wait(event=event, error=True)
mocked_normalvariate.assert_called_once_with(5, .5)
self.assertEqual(t.next_interval, 5.5)
event.wait.assert_called_once_with(5.5)
mocked_normalvariate.reset_mock()
event.wait.reset_mock()
mocked_normalvariate.return_value = 12
t.wait(event=event, error=True)
mocked_normalvariate.assert_called_once_with(10, 1.0)
self.assertEqual(t.next_interval, 12)
event.wait.assert_called_once_with(12)
event.wait.reset_mock()
t.wait(event=event)
self.assertEqual(t.next_interval, 1)
event.wait.assert_called_once_with(1)
| 1.804688
| 2
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_10/models/_resource_management_client_enums.py
|
rsdoherty/azure-sdk-for-python
| 2,728
|
12775874
|
<reponame>rsdoherty/azure-sdk-for-python<filename>sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_10/models/_resource_management_client_enums.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class DeploymentMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The mode that is used to deploy resources. This value can be either Incremental or Complete. In
Incremental mode, resources are deployed without deleting existing resources that are not
included in the template. In Complete mode, resources are deployed and existing resources in
the resource group that are not included in the template are deleted. Be careful when using
Complete mode as you may unintentionally delete resources.
"""
INCREMENTAL = "Incremental"
COMPLETE = "Complete"
class OnErrorDeploymentType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The deployment on error behavior type. Possible values are LastSuccessful and
SpecificDeployment.
"""
LAST_SUCCESSFUL = "LastSuccessful"
SPECIFIC_DEPLOYMENT = "SpecificDeployment"
class ResourceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
| 2.03125
| 2
|
Blackjack2.py
|
gusghrlrl101/BlackJack-AI
| 0
|
12775875
|
import random
import numpy as np
import matplotlib.pyplot as plt
import copy
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib import colors
##
counting = [0 for i in range(12)]
counting_temp = [0 for i in range(12)]
def refresh_counting(num):
global counting
counting[num] += 1
if sum(counting) >= 52:
counting = [0 for i in range(12)]
def refresh_counting_temp(num):
global counting_temp
counting_temp[num] += 1
if sum(counting_temp) >= 52:
counting_temp = [0 for i in range(12)]
def get_counting():
global counting
return counting
def get_counting_temp():
global counting_temp
return counting_temp
def copy_counting():
global counting, counting_temp
counting_temp = copy.deepcopy(counting)
def calculate_counting1():
global counting_temp
result = 0
for i, cnt in enumerate(counting_temp):
if 2 <= i and i <= 6:
result += cnt
elif 10 <= i:
result -= cnt
result /= (52 - len(counting_temp)) / 13.0
return round(result)
def calculate_counting2():
global counting_temp
result = 0
for i, cnt in enumerate(counting_temp):
if 2 <= i and i <= 7:
if i == 4 or i == 5:
result += cnt * 2
else:
result += cnt
elif i == 10:
result -= cnt * 2
result /= (52 - len(counting_temp)) / 13.0
return round(result)
def calculate_counting3():
global counting_temp
result = 0
for i, cnt in enumerate(counting_temp):
if i == 2 or i == 3 or i == 6:
result += cnt * 2
elif i == 4:
result += cnt * 3
elif i == 5:
result += cnt * 4
elif i == 7:
result += cnt
elif i == 9:
result -= cnt * 2
elif i == 10:
result -= cnt * 3
result /= (52 - len(counting_temp)) / 13.0
return round(result)
##
class Deck(object):
"""
Deck : Card deck, which can be shuffled, drawn, and reset.
"""
def __init__(self):
deck = [2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11]
self.card_deck = deck * 4
self.shuffle()
def shuffle(self):
random.shuffle(self.card_deck)
def draw(self):
return self.card_deck.pop()
def reset(self):
deck = [2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11]
self.card_deck = deck * 4
self.shuffle()
global counting, counting_temp
counting = [0 for i in range(12)]
counting_temp = [0 for i in range(12)]
class Dealer(object):
"""
Dealer : 딜러 클래스
딜러는 두 장의 카드를 받고, 카드의 합이 16 이하이면 Hit, 17이상이면 Stick 함.
처음 두 장을 받았을 때 한 장의 카드를 랜덤하게 오픈함.
"""
def __init__(self):
"""
hand : 딜러가 가진 카드
usable_ace : 딜러가 가진 카드 리스트 중 ace의 인덱스
natural : 두 장의 카드로 21이 되면 True, 아니면 False
"""
self.hands = list()
self.usable_ace = list()
def hit(self, deck: Deck):
"""
딜러의 Hit. 새로운 카드가 Ace라면 사용 가능한 Ace 리스트에 추가함
:param deck: Deck Object
:return:
"""
new_card = deck.draw()
if new_card == 11:
self.usable_ace.append(len(self.hands))
self.hands.append(new_card)
##
refresh_counting(new_card)
##
def show(self):
"""
딜러가 가진 카드 중 하나를 랜덤하게 보여줌
:return: 딜러의 카드 중 랜덤한 카드 숫자
"""
card = random.choice(self.hands)
##
refresh_counting_temp(card)
##
if card == 11:
card = 1
return card
def calculate_sum(self):
"""
딜러가 가진 카드의 합을 구함
21을 넘을 때 사용 가능한 Ace가 있으면 사용함
:return: 딜러 카드의 합
"""
sums = sum(self.hands)
if sums > 21 and len(self.usable_ace) > 0:
self.hands[self.usable_ace.pop()] = 1
sums = sum(self.hands)
return sums
def action(self, deck: Deck):
"""
딜러의 순서 때 딜러의 행동.
숫자의 합이 16 이하일 때는 Hit, 17 이상이면 Stick
:param deck:
:return:
"""
while True:
sums = self.calculate_sum()
if sums < 17:
self.hit(deck)
else:
return sums
def observation(self, action, agent, deck):
"""
플레이어의 Action을 받아, 그에 맞는 Observation과 Reward를 반환
:param action: agent 의 Action
:param agent: agent 클래스
:param deck: deck 클래스
:return: 에피소드 종료 여부, reward
"""
done = False
reward = 0
if action == True: # Hit
agent.hit(deck)
if agent.calculate_sum() > 21: #플레이어의 Hit으로 인해 카드 합이 21이 넘으면 즉시 종료
done = True
reward = -1
else: # Stick
done = True
reward = self.calcuate_reward(agent, deck)
return done, reward
def calcuate_reward(self, agent, deck):
"""
플레이어가 Stick했을 때 딜러와의 카드 비교 수행
:param agent:
:param deck:
:return: Reward
"""
agent_sum = agent.calculate_sum() # 플레이어의 카드 합 계산
if agent_sum > 21: # 플레이어의 Bust (패)
return -1
dealer_sum = self.action(deck) # 딜러의 카드 합 계산
if dealer_sum > 21: # 딜러가 Bust (승)
return 1
if dealer_sum > agent_sum: # 딜러의 카드 합 > 플레이어 합 (패)
return -1
if dealer_sum < agent_sum: # 딜러의 카드 합 < 플레이어 합 (승)
return 1
return 0 # 딜러의 카드 합 == 플레이어의 합 (무)
def reset(self):
"""
딜러 초기화 (새로운 에피소드 시작을 위해)
"""
self.hands = list()
self.usable_ace = list()
class Agent(object):
def __init__(self):
"""
hand : 플레이어의 카드
usable_ace : 사용 가능한 ace 리스트
Q_table : q(s,a) 값을 저장할 딕셔너리
"""
self.hands = list()
self.usable_ace = list()
self.Q_table = dict()
def hit(self, deck: Deck):
"""
덱에서 새로운 카드를 뽑음
:param deck: Deck for draw a card
:return: None
"""
new_card = deck.draw()
##
refresh_counting(new_card)
refresh_counting_temp(new_card)
##
if new_card == 11:
self.usable_ace.append(len(self.hands))
self.hands.append(new_card)
def calculate_sum(self):
"""
플레이어가 가진 카드의 합을 구함.
21을 넘을 때 사용 가능한 ace가 있으면 사용함
:return:
"""
sums = sum(self.hands)
if sums > 21 and len(self.usable_ace) > 0:
self.hands[self.usable_ace.pop()] = 1
sums = sum(self.hands)
return sums
def random_action(self):
"""
랜덤하게 행동
True = hit, False = stick
:return:
"""
return random.choice([True, False])
def policy(self, state):
"""
Agent의 policy 함수.
e의 확률로 랜덤 행동을 하며, 그 외에는 현재 state에서 큰 q(s,a)값을 갖는 action을 선택함
:param state: Agent에게 주어진 state
:return: agent의 action을 반환 , True = hit and False = stick
"""
# Q_table에서 현재 state-action에 대해 값이 존재하는지 검사함
for action in (True, False):
if (state, action) not in self.Q_table.keys(): # Q_table에 값이 없으면 0으로 초기화
self.Q_table[(state, action)] = [0, 0] # (mean return, visit count)
else:
continue
# q값이 큰 action 선택
if self.Q_table[(state, True)] > self.Q_table[(state, False)]:
return True # Hit
elif self.Q_table[(state, True)] == self.Q_table[(state, False)]: # q값이 같으면 무작위추출
return self.random_action()
else:
return False # Stick
def reset(self):
"""
Agent를 리셋함
:return: None
"""
self.hands = list()
self.usable_ace = list()
def update_qval(self, episode):
"""
에피소드(한 번의 게임)으로부터 Q_table 을 업데이트함
Q 테이블에 없는 state-action 쌍이 나오면 새로 생성
Q 테이블에 state-action 쌍이 존재한다면 Incremental mean 적용하여 업데이트
:param episode: Episode generated from environment
:return: None
"""
total_return = 0
for state, action, reward in episode[::-1]: # 에피소드의 뒤에서부터 (역순)
total_return += reward # return Gt 계산
if (state, action) not in self.Q_table.keys(): # state-action 쌍이 없다면
self.Q_table[(state, action)] = [total_return, 1] # 새롭게 엔트리 생성 (Gt, count)
else: #이미 존재하는 state-action 쌍이면 Incremental mean 적용
prev_val = self.Q_table[(state, action)][0] # 이전의 평균 return
count = self.Q_table[(state, action)][1] + 1 # count 증가
mean = prev_val + (total_return - prev_val) / count # 평균 계산 : Incremental Mean 적용
self.Q_table[(state, action)] = [mean, count] # 업데이트
class MonteCarlo(object):
def generate_episode(self, dealer: Dealer, agent: Agent, deck: Deck):
"""
하나의 에피소드(게임)를 생성함
:param dealer:
:param agent:
:param deck:
:return:
"""
global counting, counting_temp
# 카드 덱, 딜러, Agent를 초기화
##
if len(deck.card_deck) < 15:
deck2 = Deck()
deck.card_deck = deck2.card_deck + deck.card_deck
##
dealer.reset()
agent.reset()
agent.hit(deck)
agent.hit(deck)
dealer.hit(deck)
dealer.hit(deck)
done = False # 에피소드의 종료 여부
episode = list() # 에피소드
showed = dealer.show()
while not done:
# 에피소드가 끝날 때까지 State, Action, Reward를 생성
sums = agent.calculate_sum()
if sums < 12:
agent.hit(deck)
continue
##
# changes counting method
state = (sums, bool(agent.usable_ace), showed, calculate_counting3())
##
######## Exploring Start ~!!!!!!!!! :
if len(episode) == 0: # 첫번째 State 일 때는 무작위 Action 선택
action = agent.random_action()
else: # 그 외에는 Q 테이블에서 큰 값을 갖는 Action 선택
action = agent.policy(state)
done, reward = dealer.observation(action, agent, deck) # 에피소드 종료 여부, Reward 계산
# 생성된 State, Action, Reward를 에피소드에 추가
episode.append([state, action, reward])
##
copy_counting()
##
return episode
def train(self, dealer: Dealer, agent: Agent, deck: Deck, it=10000, verbose=True):
count = 0
win = 0
loss = 0
draw = 0
total_win = 0
total_loss = 0
total_draw = 0
result = str()
for i in range(it):
count += 1
episode = self.generate_episode(dealer, agent, deck)
agent.update_qval(episode)
if episode[-1][-1] == 1:
win += 1
elif episode[-1][-1] == 0:
draw += 1
else:
loss += 1
if count % 1000 == 0 and verbose == True:
total_win += win
total_loss += loss
total_draw += draw
print("========== Training : Episode ", count, " ===========")
print("Recent 1000 games win rate :{:.3f}%".format(win / (win + loss) * 100))
print(" -- 1000 Games WIN :", win, "DRAW :", draw, "LOSS :", loss)
print("Total win rate : {:.3f}%".format(total_win / (total_win + total_loss) * 100))
print(" -- TOTAL Games WIN :", total_win, "DRAW :", total_draw, "LOSS :", total_loss)
win = 0
loss = 0
draw = 0
| 3.125
| 3
|
predictor.py
|
RKJenamani/La_Liga_Predictor_ML
| 0
|
12775876
|
<reponame>RKJenamani/La_Liga_Predictor_ML<filename>predictor.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.preprocessing import scale
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import xgboost as xgb
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.metrics import f1_score
from sklearn.metrics import *
data=pd.read_csv('final_dataset.csv')
y_all=data['FTR']
features=['HTPTN','ATPTN','HGSTN','HGCTN','AGSTN','AGCTN','histhome','histaway']
x_all=data[features]
x_train=x_all[0:2660]
x_test=x_all[2661:]
y_train=y_all[0:2660]
y_test=y_all[2661:]
forest_clf = RandomForestClassifier(n_estimators = 100, max_depth = 10, random_state = 1)
forest_clf.fit(x_train, y_train)
y_pred = forest_clf.predict(x_test)
print("ACCURACY:")
print(accuracy_score(y_test, y_pred, normalize=True, sample_weight=None))
teams = {}
for i in data.groupby('HomeTeam').mean().T.columns:
teams[i] = 0
for i in range(len(y_pred)):
if y_pred[i] == 'H':
teams[data.loc[2660+i, 'HomeTeam']] += 3
if y_pred[i] == 'A':
teams[data.loc[2660+i, 'AwayTeam']] += 3
if y_pred[i] == 'D':
teams[data.loc[2660+i, 'HomeTeam']] += 1
teams[data.loc[2660+i, 'AwayTeam']] += 1
sorted_teams = sorted( ((value,key) for (key,value) in teams.items()), reverse = True)
print sorted_teams
| 2.984375
| 3
|
baselines/deepq/prediction/example/atari-gray/example.py
|
yenchenlin/rl-attack-detection
| 66
|
12775877
|
import tensorflow as tf
import numpy as np
import cv2
import argparse
import sys, os
import logging
def get_config(args):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return config
def get_cv_image(img, mean, scale):
return img
def main(args):
from tfacvp.model import ActionConditionalVideoPredictionModel
from tfacvp.util import post_process_gray, pre_process_state_gray
with tf.Graph().as_default() as graph:
# Define tensorflow computation graph
# In this example, I hardcode the arguments num_channel and num_frame for grayscale atari settings
logging.info('Create model [num_act = %d, num_channel = %d, num_frame = %d] for testing' % (args.num_act, 1, 4))
model = ActionConditionalVideoPredictionModel(num_act=args.num_act,
num_channel=1, num_frame=4,
is_train=False)
# Get tensorflow session configuration
config = get_config(args)
# Load testing state for predicting next frame
scale = 255.0
s = np.load(args.data)
mean = np.load(args.mean)
with tf.Session(config=config) as sess:
# Restore the model from checkpoint
# If you want to combine with your model, you should notice variable scope otherwise you might get some bugs
logging.info('Loading weights from %s' % (args.load))
model.restore(sess, args.load)
# Predict next frame condition on specified action
logging.info('Predict next frame condition on action %d' % (args.act))
# To one hot vector
a = np.identity(args.num_act)[args.act]
# Predict next frame
s = pre_process_state_gray(s, mean, (1.0 / scale), 4)
print np.max(s), np.min(s)
x_t_1_pred_batch = model.predict(sess, s[np.newaxis, :], a[np.newaxis, :])[0]
# Post process predicted frame for visualization
img = x_t_1_pred_batch[0]
img = post_process_gray(img, mean, scale)
cv2.imwrite('pred.png' , img)
if __name__ == '__main__':
logging.basicConfig(format='[%(asctime)s] %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--data', help='testing data (.npy), ndarray(shape = [84,84,4])', type=str, required=True)
parser.add_argument('--mean', help='image mean path (should be shipped with pre-trained model)', type=str, required=True)
parser.add_argument('--load', help='model weight path (tensorflow checkpoint)', type=str, required=True)
parser.add_argument('--num_act', help='number of actions in the game\'s action space', type=int, required=True)
parser.add_argument('--act', help='which action you want to take', type=int, required=True)
args = parser.parse_args()
main(args)
| 2.546875
| 3
|
src/pywebapp/www/app.py
|
WalsonTung/pywebapp
| 0
|
12775878
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
'''
async web application
'''
import logging;logging.basicConfig(level=logging.INFO)
import asyncio,os,json,time
from datetime import datetime
from aiohttp import web
from jinja2 import Environment,FileSystemLoader
from config import configs
import orm
from coreweb import add_routes,add_static
from handlers import cookie2user,COOKIE_NAME
def init_jinja2(app,**kw):
logging.info('init jinja2...')
options = dict(
autoescape = kw.get('autoescape',True),
block_start_string = kw.get('block_start_string','{%'),
block_end_string = kw.get('block_end_string','%}'),
variable_start_string = kw.get('variable_start_string','{{'),
variable_end_string = kw.get('variable_end_string','}}'),
auto_reload = kw.get('auto_reload',True)
)
path = kw.get('path',None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'templates')
logging.info('set jinja2 template path:%s' % path)
env = Environment(loader=FileSystemLoader(path),**options)
filters = kw.get('filters',None)
if filters is not None:
for name,f in filters.items():
env.filters[name] = f
app['__templating__'] = env
async def logger_factory(app,handler):
async def logger(request):
logging.info('Request:%s %s' %(request.method,request.path))
#await asyncio.sleep(0.3)
return (await handler(request))
return logger
async def auth_factory(app,handler):
async def auth(request):
logging.info('check user:%s %s' % (request.method,request.path))
request.__user__ = None
cookie_str = request.cookies.get(COOKIE_NAME)
if cookie_str:
user = await cookie2user(cookie_str)
if user:
logging.info('set current user: %s' % user.email)
request.__user__ = user
if request.path.startswith('/manage/') and (request.__user__ is None or not request.__user__.admin):
return web.HTTPFound('/signin')
return (await handler(request))
return auth
async def data_factory(app,handler):
async def parse_data(request):
if request.method == 'POST':
if request.content_type.startswith('application/json'):
request.__data__ = await request.json()
logging.info('request json:%s' % str(request.__data__))
elif request.content_type.startswith('application/x-www-form-urlencode'):
request.__data__ = await request.post()
logging.info('request form: %s' % str(request.__data__))
return (await handler(request))
return parse_data
async def response_factory(app,handler):
async def response(request):
logging.info('Response handler...')
r = await handler(request)
if isinstance(r,web.StreamResponse):
return r
if isinstance(r,bytes):
resp = web.Response(body=r)
resp.content_type = 'application/octet-stream'
return resp
if isinstance(r,str):
if r.startswith('redirect:'):
return web.HTTPFound(r[9:])
resp = web.Response(body=r.encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r,dict):
template = r.get('__template__')
if template is None:
resp = web.Response(body=json.dumps(r,ensure_ascii=False,default=lambda o:o.__dict__).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
else:
resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r,int) and r >= 100 and r < 600:
return web.Response(r)
if isinstance(r,tuple) and len(r) == 2:
t,m = r
if isinstance(t,int) and t >= 100 and t < 600:
return web.Response(t,str(m))
# default:
resp = web.Response(body=str(r).encode('utf-8'))
resp.content_type = 'text/plain;charset=utf-8'
return resp
return response
def datetime_filter(t):
delta = int(time.time() -t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta//60)
if delta < 86400:
return u'%s小时前' % (delta//3600)
if delta < 604800:
return u'%天前' % (delta//86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year,dt.month,dt.day)
async def init(loop):
await orm.create_pool(loop=loop,host='127.0.0.1',port=3306,user='root',password='<PASSWORD>',db='py_blog')
app = web.Application(loop = loop,middlewares=[
logger_factory,auth_factory,response_factory])
init_jinja2(app,filters=dict(datetime=datetime_filter))
add_routes(app,'handlers')
add_static(app)
srv = await loop.create_server(app.make_handler(),'127.0.0.1',9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
loop = asyncio.get_event_loop();
loop.run_until_complete(init(loop))
loop.run_forever()
| 2.078125
| 2
|
main.py
|
DaveLorenz/DeepLearningApp
| 3
|
12775879
|
<gh_stars>1-10
# load Flask
import flask
app = flask.Flask(__name__)
from flask import Flask, render_template,request
# load model preprocessing
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import keras.models
from keras.models import model_from_json
from keras.layers import Input
# Load pre-trained model into memory
json_file = open('model.json','r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded Model from disk")
# Helper function for tokenizing text to feed through pre-trained deep learning
def prepDataForDeepLearning(text):
trainWordFeatures = tokenizer.texts_to_sequences(text)
textTokenized = pad_sequences(trainWordFeatures, 201, padding='post')
return textTokenized
# Load files needed to create proper matrix using tokens from training data
inputDataTrain = pd.DataFrame(pd.read_csv("train_DrugExp_Text.tsv", "\t", header=None))
trainText = [item[1] for item in inputDataTrain.values.tolist()]
trainingLabels = [0 if item[0] == -1 else 1 for item in inputDataTrain.values.tolist()]
VOCABULARY_SIZE = 10000
tokenizer = Tokenizer(num_words=VOCABULARY_SIZE)
tokenizer.fit_on_texts(trainText)
textTokenized = prepDataForDeepLearning(trainText)
loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# define a predict function as an endpoint
@app.route('/', methods=['GET', 'POST'])
def predict():
# whenever the predict method is called, we're going
# to input the user entered text into the model
# and return a prediction
if request.method == 'POST':
textData = request.form.get('text_entered')
textDataArray = [textData]
textTokenized = prepDataForDeepLearning(textDataArray)
prediction = int((1-np.asscalar(loaded_model.predict(textTokenized)))*100)
# return prediction in new page
return render_template('prediction.html', prediction=prediction)
else:
return render_template("search_page.html")
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080)
| 2.765625
| 3
|
scripts/bin2hex.py
|
buehlerIBM/microwatt
| 0
|
12775880
|
#!/usr/bin/python
#!/usr/bin/python3
import sys
import subprocess
import struct
with open(sys.argv[1], "rb") as f:
while True:
word = f.read(8)
if len(word) == 8:
print("%016x" % struct.unpack('Q', word));
elif len(word) == 4:
print("00000000%08x" % struct.unpack('I', word));
elif len(word) == 0:
exit(0);
else:
raise Exception("Bad length")
| 2.96875
| 3
|
models/folder.py
|
tranquilitybase-io/tb-houston-service
| 1
|
12775881
|
from config import db, ma
class Folder(db.Model):
__tablename__ = "folder"
__table_args__ = {"schema": "eagle_db"}
id = db.Column(db.Integer, primary_key=True)
parentFolderId = db.Column(db.String(45))
folderId = db.Column(db.String(45))
folderName = db.Column(db.String(100))
status = db.Column(db.String(50))
taskId = db.Column(db.String(50))
def __repr__(self):
return "<Folder(id={self.id!r}, name={self.folderName!r})>".format(self=self)
class FolderSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Folder
include_fk = True
load_instance = True
| 2.40625
| 2
|
tests/conftest.py
|
jajimer/sinergym
| 23
|
12775882
|
<filename>tests/conftest.py
import os
import shutil
from glob import glob # to find directories with patterns
import pkg_resources
import pytest
from opyplus import Epm, Idd, WeatherData
from sinergym.envs.eplus_env import EplusEnv
from sinergym.simulators.eplus import EnergyPlus
from sinergym.utils.config import Config
from sinergym.utils.constants import *
from sinergym.utils.rewards import BaseReward, LinearReward
from sinergym.utils.wrappers import (LoggerWrapper, MultiObsWrapper,
NormalizeObservation)
# ---------------------------------------------------------------------------- #
# Root Directory #
# ---------------------------------------------------------------------------- #
@pytest.fixture(scope='session')
def sinergym_path():
return os.path.abspath(
os.path.join(
pkg_resources.resource_filename(
'sinergym',
''),
os.pardir))
# ---------------------------------------------------------------------------- #
# Paths #
# ---------------------------------------------------------------------------- #
@pytest.fixture(scope='session')
def eplus_path():
return os.environ['EPLUS_PATH']
@pytest.fixture(scope='session')
def bcvtb_path():
return os.environ['BCVTB_PATH']
@pytest.fixture(scope='session')
def pkg_data_path():
return PKG_DATA_PATH
@pytest.fixture(scope='session')
def idf_path(pkg_data_path):
return os.path.join(pkg_data_path, 'buildings', '5ZoneAutoDXVAV.idf')
@pytest.fixture(scope='session')
def weather_path(pkg_data_path):
return os.path.join(
pkg_data_path,
'weather',
'USA_PA_Pittsburgh-Allegheny.County.AP.725205_TMY3.epw')
@pytest.fixture(scope='session')
def ddy_path(pkg_data_path):
return os.path.join(
pkg_data_path,
'weather',
'USA_PA_Pittsburgh-Allegheny.County.AP.725205_TMY3.ddy')
@pytest.fixture(scope='session')
def idf_path2(pkg_data_path):
return os.path.join(
pkg_data_path,
'buildings',
'2ZoneDataCenterHVAC_wEconomizer.idf')
@pytest.fixture(scope='session')
def weather_path2(pkg_data_path):
return os.path.join(
pkg_data_path,
'weather',
'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw')
@pytest.fixture(scope='session')
def ddy_path2(pkg_data_path):
return os.path.join(
pkg_data_path,
'weather',
'USA_AZ_Davis-Monthan.AFB.722745_TMY3.ddy')
# 5zones variables
@pytest.fixture(scope='session')
def variables_5zone():
variables = {}
variables['observation'] = DEFAULT_5ZONE_OBSERVATION_VARIABLES
variables['action'] = DEFAULT_5ZONE_ACTION_VARIABLES
return variables
# datacenter variables
@pytest.fixture(scope='session')
def variables_datacenter():
variables = {}
variables['observation'] = DEFAULT_DATACENTER_OBSERVATION_VARIABLES
variables['action'] = DEFAULT_DATACENTER_ACTION_VARIABLES
return variables
# ---------------------------------------------------------------------------- #
# Environments #
# ---------------------------------------------------------------------------- #
@pytest.fixture(scope='function')
def env_demo(idf_path, weather_path):
idf_file = idf_path.split('/')[-1]
weather_file = weather_path.split('/')[-1]
return EplusEnv(
env_name='TESTGYM',
idf_file=idf_file,
weather_file=weather_file,
observation_space=DEFAULT_5ZONE_OBSERVATION_SPACE,
observation_variables=DEFAULT_5ZONE_OBSERVATION_VARIABLES,
action_space=DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
action_variables=DEFAULT_5ZONE_ACTION_VARIABLES,
action_mapping=DEFAULT_5ZONE_ACTION_MAPPING,
reward=LinearReward,
reward_kwargs={
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
20.0,
23.5),
'range_comfort_summer': (
23.0,
26.0)},
weather_variability=None,
config_params=DEFAULT_5ZONE_CONFIG_PARAMS)
@pytest.fixture(scope='function')
def env_demo_continuous(idf_path, weather_path):
idf_file = idf_path.split('/')[-1]
weather_file = weather_path.split('/')[-1]
return EplusEnv(
env_name='TESTGYM',
idf_file=idf_file,
weather_file=weather_file,
observation_space=DEFAULT_5ZONE_OBSERVATION_SPACE,
observation_variables=DEFAULT_5ZONE_OBSERVATION_VARIABLES,
action_space=DEFAULT_5ZONE_ACTION_SPACE_CONTINUOUS,
action_variables=DEFAULT_5ZONE_ACTION_VARIABLES,
action_mapping=DEFAULT_5ZONE_ACTION_MAPPING,
reward=LinearReward,
reward_kwargs={
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
20.0,
23.5),
'range_comfort_summer': (
23.0,
26.0)},
weather_variability=None,
config_params=DEFAULT_5ZONE_CONFIG_PARAMS)
@pytest.fixture(scope='function')
def env_datacenter(idf_path2, weather_path):
idf_file = idf_path2.split('/')[-1]
weather_file = weather_path.split('/')[-1]
return EplusEnv(
env_name='TESTGYM',
idf_file=idf_file,
weather_file=weather_file,
observation_space=DEFAULT_DATACENTER_OBSERVATION_SPACE,
observation_variables=DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
action_space=DEFAULT_DATACENTER_ACTION_SPACE_DISCRETE,
action_variables=DEFAULT_DATACENTER_ACTION_VARIABLES,
action_mapping=DEFAULT_DATACENTER_ACTION_MAPPING,
reward=LinearReward,
reward_kwargs={
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
18,
27),
'range_comfort_summer': (
18,
27)},
weather_variability=None,
config_params=DEFAULT_DATACENTER_CONFIG_PARAMS)
@pytest.fixture(scope='function')
def env_datacenter_continuous(
idf_path2,
weather_path):
idf_file = idf_path2.split('/')[-1]
weather_file = weather_path.split('/')[-1]
return EplusEnv(
env_name='TESTGYM',
idf_file=idf_file,
weather_file=weather_file,
observation_space=DEFAULT_DATACENTER_OBSERVATION_SPACE,
observation_variables=DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
action_space=DEFAULT_DATACENTER_ACTION_SPACE_CONTINUOUS,
action_variables=DEFAULT_DATACENTER_ACTION_VARIABLES,
action_mapping=DEFAULT_DATACENTER_ACTION_MAPPING,
reward=LinearReward,
reward_kwargs={
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
18,
27),
'range_comfort_summer': (
18,
27)},
weather_variability=None,
config_params=DEFAULT_DATACENTER_CONFIG_PARAMS)
# ---------------------------------------------------------------------------- #
# Simulators #
# ---------------------------------------------------------------------------- #
@pytest.fixture(scope='function')
def simulator(eplus_path, bcvtb_path, idf_path, weather_path, variables_5zone):
env_name = 'TEST'
return EnergyPlus(
eplus_path,
weather_path,
bcvtb_path,
idf_path,
env_name,
variables=variables_5zone,
act_repeat=1,
max_ep_data_store_num=10,
config_params=DEFAULT_5ZONE_CONFIG_PARAMS)
# ---------------------------------------------------------------------------- #
# Simulator Config class #
# ---------------------------------------------------------------------------- #
@pytest.fixture(scope='function')
def config(idf_path, weather_path2, variables_5zone):
env_name = 'TESTCONFIG'
max_ep_store = 10
return Config(
idf_path=idf_path,
weather_path=weather_path2,
env_name=env_name,
variables=variables_5zone,
max_ep_store=max_ep_store,
extra_config={
'timesteps_per_hour': 2,
'runperiod': (1, 2, 1993, 2, 3, 1993),
'action_definition': {
'ThermostatSetpoint:DualSetpoint': [{
'name': 'Space1-DualSetP-RL',
'heating_name': 'Space1-HtgSetP-RL',
'cooling_name': 'Space1-ClgSetP-RL',
'zones': ['space1-1']
}]
}
})
# ---------------------------------------------------------------------------- #
# Environments with Wrappers #
# ---------------------------------------------------------------------------- #
@pytest.fixture(scope='function')
def env_wrapper_normalization(env_demo_continuous):
return NormalizeObservation(env=env_demo_continuous, ranges=RANGES_5ZONE)
@pytest.fixture(scope='function')
def env_wrapper_logger(env_demo_continuous):
return LoggerWrapper(env=env_demo_continuous, flag=True)
@pytest.fixture(scope='function')
def env_wrapper_multiobs(env_demo_continuous):
return MultiObsWrapper(env=env_demo_continuous, n=5, flatten=True)
@pytest.fixture(scope='function')
def env_all_wrappers(env_demo_continuous):
env = NormalizeObservation(env=env_demo_continuous, ranges=RANGES_5ZONE)
env = LoggerWrapper(env=env, flag=True)
env = MultiObsWrapper(env=env, n=5, flatten=True)
return env
# ---------------------------------------------------------------------------- #
# Building and weather python models #
# ---------------------------------------------------------------------------- #
@pytest.fixture(scope='function')
def epm(idf_path, eplus_path):
idd = Idd(os.path.join(eplus_path, 'Energy+.idd'))
return Epm.from_idf(idf_path, idd_or_version=idd)
@pytest.fixture(scope='function')
def weather_data(weather_path):
return WeatherData.from_epw(weather_path)
# ---------------------------------------------------------------------------- #
# Rewards #
# ---------------------------------------------------------------------------- #
@pytest.fixture(scope='function')
def custom_reward():
class CustomReward(BaseReward):
def __init__(self, env):
super(CustomReward, self).__init__(env)
def __call__(self):
return -1.0, {}
return CustomReward
@pytest.fixture(scope='function')
def env_custom_reward(
idf_path,
weather_path,
custom_reward):
idf_file = idf_path.split('/')[-1]
weather_file = weather_path.split('/')[-1]
return EplusEnv(
env_name='TESTGYM',
idf_file=idf_file,
weather_file=weather_file,
observation_space=DEFAULT_5ZONE_OBSERVATION_SPACE,
observation_variables=DEFAULT_5ZONE_OBSERVATION_VARIABLES,
action_space=DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
action_variables=DEFAULT_5ZONE_ACTION_VARIABLES,
action_mapping=DEFAULT_5ZONE_ACTION_MAPPING,
reward=custom_reward,
weather_variability=None,
config_params=DEFAULT_5ZONE_CONFIG_PARAMS
)
@pytest.fixture(scope='function')
def env_linear_reward(idf_path, weather_path):
idf_file = idf_path.split('/')[-1]
weather_file = weather_path.split('/')[-1]
return EplusEnv(
env_name='TESTGYM',
idf_file=idf_file,
weather_file=weather_file,
observation_space=DEFAULT_5ZONE_OBSERVATION_SPACE,
observation_variables=DEFAULT_5ZONE_OBSERVATION_VARIABLES,
action_space=DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
action_variables=DEFAULT_5ZONE_ACTION_VARIABLES,
action_mapping=DEFAULT_5ZONE_ACTION_MAPPING,
reward=LinearReward,
reward_kwargs={
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
20.0,
23.5),
'range_comfort_summer': (
23.0,
26.0)},
weather_variability=None,
config_params=DEFAULT_5ZONE_CONFIG_PARAMS)
@pytest.fixture(scope='function')
def env_linear_reward_args(idf_path, weather_path):
idf_file = idf_path.split('/')[-1]
weather_file = weather_path.split('/')[-1]
return EplusEnv(
env_name='TESTGYM',
idf_file=idf_file,
weather_file=weather_file,
observation_space=DEFAULT_5ZONE_OBSERVATION_SPACE,
observation_variables=DEFAULT_5ZONE_OBSERVATION_VARIABLES,
action_space=DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
action_variables=DEFAULT_5ZONE_ACTION_VARIABLES,
action_mapping=DEFAULT_5ZONE_ACTION_MAPPING,
reward=LinearReward,
reward_kwargs={
'energy_weight': 0.2,
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (20.0, 23.5),
'range_comfort_summer': (18.0, 20.0)},
weather_variability=None,
config_params=DEFAULT_5ZONE_CONFIG_PARAMS)
# ---------------------------------------------------------------------------- #
# WHEN TESTS HAVE BEEN FINISHED #
# ---------------------------------------------------------------------------- #
def pytest_sessionfinish(session, exitstatus):
""" whole test run finishes. """
# Deleting all temporal directories generated during tests
directories = glob('Eplus-env-TEST*/')
for directory in directories:
shutil.rmtree(directory)
# Deleting new random weather files once it has been checked
files = glob('sinergym/data/weather/*Random*.epw')
for file in files:
os.remove(file)
| 1.734375
| 2
|
pixAssist.py
|
vinicius9141/pixAssistInformatica
| 0
|
12775883
|
import sqlite3
# import win32api
banco = sqlite3.connect('pixClientes.db')
cursor = banco.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS registros (
data_pagamento_pix DATE,
valor_pix NUMERIC (10,2)
);''')
#criando a função que insere um pix
def inserirPix():
cursor.execute(f''' INSERT INTO registros (data_pagamento_pix, valor_pix)
VALUES ('{data_pagamento_input}', '{valor_pix_input}')
''')
banco.commit()
##############################################################################################
#seleciona o registro de acordo com a data
def selecionaRegistro():
cursor.execute(f'''
SELECT valor_pix FROM registros WHERE data_pagamento_pix = '{data_input}';
''')
with open ('rel.txt', 'w') as arquivo:
relatorio = arquivo.write(f' Novo relatorio ')
print(relatorio)
for data_pagamento_pix in cursor.fetchall():
print ("Valor de pagamento pix: R$",data_pagamento_pix)
with open ('rel.txt', 'a') as arquivo:
relatorio = arquivo.write(f'\n Data selecionada: {data_input}, Valor do pagamento PIX: R${data_pagamento_pix}')
print(relatorio)
#menu simples
print (" SELECIONE A OPÇÃO DESEJADA 1 PARA PAGAMENTO E 2 PARA RELATORIO")
op = int (input("O que deseja fazer: "))
if op == 1:
data_pagamento_input = input ("Qual a data do recebimento? ")
valor_pix_input = input ("Qual o valor do pagamento? ")
inserirPix()
# win32api.MessageBox(0, 'Cadastrado com Sucesso', 'Sucesso')
if op == 2:
print ('#####################################################################')
print ('################### DIGITE COMO NO EXMPLO A BAIXO ###################')
print ('################### EX: 30/09/2021 ##################################')
print ('#####################################################################')
data_input = input ("qual data deseja selecionar: ")
selecionaRegistro()
# win32api.MessageBox(0, 'O Relatorio Da Data Seleciona Foi Gerado Procure Pelo Arquivo "rel"', 'Relatorio Gerado Com Sucesso')
| 3.53125
| 4
|
CursoEmVideo/Mundo3/Exercicios/ex106.py
|
rafaelgama/Curso_Python
| 1
|
12775884
|
<filename>CursoEmVideo/Mundo3/Exercicios/ex106.py
# Faça um mini-sistema que utilize o Interactive Help do Python. O usuário vai digitar o comando e o manual vai aparecer.
# Quando o usuário digitar a palavra 'FIM', o programa se encerrará. Importante: use cores.
c = ('\033[m','\033[1;32m','\033[1;31m','\033[7:30m')
cores = {'limpa':'\033[m',
'bverde':'\033[1;32m',
'bvermelho': '\033[1;31m',
'pretoebranco':'\033[7:30m'}
print('-=-'*10)
print(cores['pretoebranco']+'_____INICIO_____'+cores['limpa'])
print('-=-'*10)
# definição da função
def ajuda(com):
titulo(f'Acessando o manuel do comando: {com} ', 3)
print(c[3],end='')
help(com)
print(c[0])
def titulo(msg, cor=0):
tam = len(msg) + 4
print(c[cor],end='')
print('-=' * tam )
print(f' {msg}')
print('-=' * tam )
print(c[0],end='')
# programa principal.
comando = ''
while True:
titulo("SSITEMA DE AJUDA PyHELP",1)
comando = str(input('Função ou Biblioteca> ')).strip()
if comando.upper() == 'FIM':
break
else:
ajuda(comando)
titulo('ATÉ LOGO!')
print()
print('-=-'*10)
print(cores['pretoebranco']+'______FIM_______'+cores['limpa'])
print(cores['pretoebranco']+'_Code by Rafael_'+cores['limpa'])
print('-=-'*10)
| 3.921875
| 4
|
src/predict_emotions.py
|
sorizeta/face-emotion-recognition
| 0
|
12775885
|
<gh_stars>0
CUDA_VISIBLE_DEVICES=0
import csv
import numpy as np
import cv2
import glob
from tensorflow.keras.models import load_model
from facial_analysis import FacialImageProcessing
imgProcessing=FacialImageProcessing(False)
INPUT_SIZE = (224, 224)
model=load_model('../models/affectnet_emotions/mobilenet_7.h5')
model.summary()
idx_to_class={0: 'Anger', 1: 'Disgust', 2: 'Fear', 3: 'Happiness', 4: 'Neutral', 5: 'Sadness', 6: 'Surprise'}
for fpath in glob.glob('/home/ubuntu/emotion_images/*.jpg'):
frame_bgr=cv2.imread(fpath)
frame = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
bounding_boxes, points = imgProcessing.detect_faces(frame)
points = points.T
for bbox,p in zip(bounding_boxes, points):
box = bbox.astype(np.int)
x1,y1,x2,y2=box[0:4]
face_img=frame[y1:y2,x1:x2,:]
face_img=cv2.resize(face_img,INPUT_SIZE)
inp=face_img.astype(np.float32)
inp[..., 0] -= 103.939
inp[..., 1] -= 116.779
inp[..., 2] -= 123.68
inp = np.expand_dims(inp, axis=0)
scores=model.predict(inp)[0]
max_score = idx_to_class[np.argmax(scores)]
with open('emotion_labels.csv', 'a') as csv_file:
writer = csv.writer(csv_file)
writer.writerow([str(fpath), str(max_score)])
| 2.390625
| 2
|
master/TaskMaster.py
|
MaastrichtU-BISS/PyTaskManager
| 6
|
12775886
|
<filename>master/TaskMaster.py
from flask import Flask, Response, request
import json
from DbDao import DbDao
import signal
import sys
import time
# Init configuration file
configFile = open("config.json")
config = json.load(configFile)
configFile.close()
time.sleep(10)
app = Flask('TaskMaster')
dbDao = DbDao(config["connectionString"])
def signal_handler(sig, frame):
print("closing application and db connection")
dbDao.closeConnection()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
@app.route('/')
def index():
return "Hello, World"
@app.route('/client')
def clientList():
clientList = dbDao.getClients()
return Response(json.dumps(clientList), mimetype='application/json')
@app.route('/client/add', methods=["POST"])
def addClient():
try:
data = request.get_json()
except:
return Response(json.dumps({"success": False, 'message': "Could not parse input as JSON"}), mimetype="application/json")
try:
clientId = dbDao.addClient(data["name"], data["email"], data["institute"], data["country"], request.remote_addr)
data = {
'success': True,
'clientId': clientId
}
except:
data = {
'success': False,
'message': "Could not insert data in database"
}
return Response(json.dumps(data), mimetype="application/json")
@app.route('/client/<int:clientId>/task')
def getClientTasks(clientId):
openTasks = dbDao.getClientOpenTasks(clientId)
dbDao.setClientTimestamp(clientId)
return Response(json.dumps(openTasks), mimetype='application/json')
@app.route('/client/<int:clientId>/task/add', methods=["POST"])
def addClientTask(clientId):
try:
data = request.get_json()
except:
return Response(json.dumps({"success": False, 'message': "Could not parse input as JSON"}), mimetype="application/json")
try:
taskId = dbDao.addTask(clientId, data["runId"], data["image"], data["inputString"])
data = {
'success': True,
'taskId': taskId
}
except:
data = {
'success': False,
'message': "Could not insert task in database"
}
return Response(json.dumps(data), mimetype="application/json")
@app.route('/client/<int:clientId>/task/<int:taskId>/result')
def getTaskResult(clientId, taskId):
taskResult = dbDao.getTaskResult(taskId)
return Response(json.dumps(taskResult), mimetype='application/json')
@app.route('/client/<int:clientId>/task/<int:taskId>/result/output')
def getTaskResultOutput(clientId, taskId):
taskResult = dbDao.getTaskResult(taskId)[0]
responseStr = str(taskResult["response"])
return Response(responseStr, mimetype="text/plain")
@app.route('/client/<int:clientId>/task/<int:taskId>/result/log')
def getTaskResultLog(clientId, taskId):
taskResult = dbDao.getTaskResult(taskId)[0]
return Response(str(taskResult["log"]), mimetype="text/plain")
@app.route('/client/<int:clientId>/task/<int:taskId>/result/add', methods=["POST"])
def addTaskResult(clientId, taskId):
try:
data = request.get_json()
except:
return Response(json.dumps({"success": False, 'message': "Could not parse input as JSON"}), mimetype="application/json")
try:
resultId = dbDao.addTaskResult(taskId, data["response"], data["log"])
dbDao.setClientTimestamp(clientId)
data = {
'success': True,
'taskId': resultId
}
except:
data = {
'success': False,
'message': "Could not insert task result in database"
}
return Response(json.dumps(data), mimetype="application/json")
app.run(debug=True, host='0.0.0.0', port=5000)
| 2.40625
| 2
|
models/component/attention_cell_sequence.py
|
foocker/Image2Katex
| 3
|
12775887
|
'''
File: attention_cell_sequence.py
Project: component
File Created: Friday, 28th December 2018 6:05:05 pm
Author: xiaofeng (<EMAIL>)
-----
Last Modified: Friday, 28th December 2018 6:50:40 pm
Modified By: xiaofeng (<EMAIL>>)
-----
Copyright 2018.06 - 2018 onion Math, onion Math
'''
import collections
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import RNNCell
# AttentionState = {"att_weight": [], "decoder_out": [], "logits": [], "decoder_state": []}
AttentionState = collections.namedtuple("AttentionState", ("cell_state", "output"))
Attention_weight = list()
class AttCell(RNNCell):
""" Bahdanau Attention compile for the errorchecker model"""
def __init__(self, name, attention_in, decoder_cell, n_hid, dim_att, dim_o, dropuout,
vacab_size, tiles=1, dtype=tf.float32):
self._scope_name = name
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
self._encoder_sequence = attention_in
if isinstance(attention_in, tuple):
self._encoder_sequence = tf.concat(attention_in, 2)
self._cell = decoder_cell # decoder rnn cell
self._n_hid = n_hid # decoder num_unit D_DIM
self._dim_att = dim_att # Attention size,计算的中间变量,一般可以选择输入的_encoder_sequence相同的维度
self._dim_o = dim_o # the dim of output, same with the param: n_hid
self._dropout = dropuout # droupout rate
self._vacab_size = vacab_size # the vocabulary size of the decoder, same with the machine translation model
# in the decoder stage, if use the beamsearch trick, the tiles is needed, default value is 1 for the greedy trick
self._tiles = tiles
self._dtype = dtype # default is tf.float32
self._length = tf.shape(self._encoder_sequence)[1] # length of the input sequence
self._en_dim = self._encoder_sequence.shape[2].value # dims of the encoder
self._state_size = AttentionState(self._n_hid, self._dim_o)
self._att_seq = tf.layers.dense(
inputs=self._encoder_sequence, units=self._dim_att, use_bias=False, name="att_img") # B,L,dim_att
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
# beacause in the function the return is logits,so the size is vocab_size
return self._vacab_size
@property
def output_dtype(self):
return self._dtype
def _CalStateBasedSeq(self, name, dim):
"""Returns initial state of dimension specified by dim"""
scope = tf.get_variable_scope()
with tf.variable_scope(scope):
# (B*T,L,E_DIM) -->(B*T,E_DIM)
img_mean = tf.reduce_mean(self._encoder_sequence, axis=1)
W = tf.get_variable("W_{}_0".format(name), shape=[self._en_dim, dim])
b = tf.get_variable("b_{}_0".format(name), shape=[1, dim])
h = tf.tanh(tf.matmul(img_mean, W) + b)
return h
def initial_state(self):
""" setting initial state and output """
initial_states = self._CalStateBasedSeq('init_state', self._n_hid) # (B,HID)
initial_out = self._CalStateBasedSeq('init_out', self._dim_o) # (B,DIM_O)
return AttentionState(initial_states, initial_out)
def _cal_att(self, hid_cur):
with tf.variable_scope('att_cal'):
if self._tiles > 1:
_encoder_sequence = tf.expand_dims(self._encoder_sequence, axis=1) # (B,1,L,E_DIM)
_encoder_sequence = tf.tile(_encoder_sequence, multiples=[
1, self._tiles, 1, 1]) # (B,T,L,E_DIM)
_encoder_sequence = tf.reshape(
_encoder_sequence, shape=[-1, self._length, self._en_dim]) # (B*T,L,E_DIM)
_att_seq = tf.expand_dims(self._att_seq, axis=1) # B,1,L,dim_att
_att_seq = tf.tile(_att_seq, multiples=[1, self._tiles, 1, 1])
_att_seq = tf.reshape(
_att_seq, shape=[-1, self._length, self._dim_att]) # (B*T,L,dim_att)
else:
_att_seq = self._att_seq
_encoder_sequence = self._encoder_sequence
# computes attention over the hidden vector
# hid_cur shape is [ B,num_units]
# att_h [B,dim_att]
att_h = tf.layers.dense(inputs=hid_cur, units=self._dim_att, use_bias=False)
# sums the two contributions
# att_h --> [B,1,dim_att]
att_h = tf.expand_dims(att_h, axis=1)
# Computes the score for the Bahdanau style
# _att_seq contains the full encoder output, shape is [batch,L, _dim_att]
# att_h contains the current hiddent of the deocder, shape is [B,1,dim_att]
att = tf.tanh(_att_seq + att_h) # shape [B,L,dim_att]
# computes scalar product with beta vector
# works faster with a matmul than with a * and a tf.reduce_sum
# For each of the timestamps its vector of size A from `att` is reduced with `att_beta` vector
att_beta = tf.get_variable("att_beta", shape=[self._dim_att, 1], dtype=tf.float32)
# att_flat shape is [B*L,dim_att]
att_flat = tf.reshape(att, shape=[-1, self._dim_att])
# computes score
e = tf.matmul(att_flat, att_beta) # shape is [B*L,1]
e = tf.reshape(e, shape=[-1, self._length]) # shape is [B,L]
# computes attention weights
attention = tf.nn.softmax(e) # shape is (B,L)
_att = tf.expand_dims(attention, axis=-1) # (B,L,1)
# computes the contex vector with the attention and encoder_sequence
contex = tf.reduce_sum(_att * _encoder_sequence, axis=1) # [B,L,1]*[B,L,E]=(B,E)
return attention, contex
def step(self, embeding, attention_cell_state):
"""
Args:
embeding: shape is (B,EMBEDING_DIM)
attention_cell_state: state from previous step comes from AttentionState
"""
_initial_state, output_tm1 = attention_cell_state
scope = tf.get_variable_scope()
with tf.variable_scope(scope, initializer=tf.orthogonal_initializer()):
x = tf.concat([embeding, output_tm1], axis=-1)
# compute current hidden and cell states
new_hid, new_cell_state = self._cell.__call__(inputs=x, state=_initial_state)
_attention, contex = self._cal_att(new_hid)
def _debug_att(val):
global Attention_weight
Attention_weight = []
Attention_weight += [val]
return False
print_func = tf.py_func(_debug_att, [_attention], [tf.bool])
with tf.control_dependencies(print_func):
_attention = tf.identity(_attention, name='Attention_weight')
o_W_c = tf.get_variable("o_W_c", dtype=tf.float32,
shape=(self._en_dim, self._n_hid))
o_W_h = tf.get_variable("o_W_h", dtype=tf.float32,
shape=(self._n_hid, self._dim_o))
new_o = tf.tanh(tf.matmul(new_hid, o_W_h) + tf.matmul(contex, o_W_c))
new_o = tf.nn.dropout(new_o, self._dropout)
y_W_o = tf.get_variable("y_W_o", dtype=tf.float32,
shape=(self._dim_o, self._vacab_size))
# logits for current step
# shape is [B,vocabsize] for each size
logits = tf.matmul(new_o, y_W_o)
new_state = AttentionState(new_cell_state, new_o)
return logits, new_state
def __call__(self, _inputs, _state):
"""
The dynamic rnn function will use this call function to calculate step by step
Args:
inputs: the embedding of the previous word for training only,decoder sequence
state: (AttentionState) (h,c, o) where h is the hidden state and
o is the vector used to make the prediction of
the previous word
"""
logits, state = self.step(_inputs, _state)
return (logits, state)
| 2.671875
| 3
|
dataworkspace/dataworkspace/apps/explorer/utils.py
|
uktrade/jupyterhub-data-auth-admin
| 1
|
12775888
|
<filename>dataworkspace/dataworkspace/apps/explorer/utils.py
import json
import logging
import re
from contextlib import contextmanager
from datetime import timedelta
import psycopg2
import sqlparse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.views import LoginView
from django.core.cache import cache
from django.shortcuts import get_object_or_404
from dataworkspace.apps.core.models import Database
from dataworkspace.apps.core.utils import (
close_admin_db_connection_if_not_in_atomic_block,
new_private_database_credentials,
source_tables_for_user,
db_role_schema_suffix_for_user,
postgres_user,
USER_SCHEMA_STEM,
)
from dataworkspace.apps.explorer.models import QueryLog
logger = logging.getLogger("app")
EXPLORER_PARAM_TOKEN = "$$"
def param(name):
return "%s%s%s" % (EXPLORER_PARAM_TOKEN, name, EXPLORER_PARAM_TOKEN)
def safe_login_prompt(request):
defaults = {
"template_name": "admin/login.html",
"authentication_form": AuthenticationForm,
"extra_context": {
"title": "Log in",
"app_path": request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
},
}
return LoginView.as_view(**defaults)(request)
def safe_cast(val, to_type, default=None):
try:
return to_type(val)
except ValueError:
return default
def get_int_from_request(request, name, default):
val = request.GET.get(name, default)
return safe_cast(val, int, default) if val else None
def get_params_from_request(request):
val = request.GET.get("params", None)
try:
d = {}
tuples = val.split("|")
for t in tuples:
res = t.split(":")
d[res[0]] = res[1]
return d
except Exception: # pylint: disable=broad-except
return None
def url_get_rows(request):
rows = request.POST.get("query-rows", str(settings.EXPLORER_DEFAULT_ROWS))
if not rows.isnumeric():
return settings.EXPLORER_DEFAULT_ROWS
return int(rows)
def url_get_page(request):
page = request.POST.get("query-page", "1")
if not page.isnumeric():
return 1
return int(page)
def url_get_query_id(request):
return get_int_from_request(request, "query_id", None)
def url_get_log_id(request):
return get_int_from_request(request, "querylog_id", None)
def url_get_show(request):
return bool(get_int_from_request(request, "show", 1))
def url_get_save(request):
return bool(get_int_from_request(request, "save", 0))
def url_get_params(request):
return get_params_from_request(request)
def fmt_sql(sql):
return sqlparse.format(sql, reindent=True, keyword_case="upper")
def noop_decorator(f):
return f
class InvalidExplorerConnectionException(Exception):
pass
class QueryException(Exception):
pass
credentials_version_key = "explorer_credentials_version"
def get_user_cached_credentials_key(user):
# Set to never expire as reverting to a previous version will cause
# potentially invalid cached credentials to be used if the user loses
# or gains access to a dashboard
cache.set(credentials_version_key, 1, nx=True, timeout=None)
credentials_version = cache.get(credentials_version_key, None)
return f"explorer_credentials_{credentials_version}_{user.profile.sso_id}"
def get_user_explorer_connection_settings(user, alias):
from dataworkspace.apps.explorer.connections import ( # pylint: disable=import-outside-toplevel
connections,
)
if not alias:
alias = settings.EXPLORER_DEFAULT_CONNECTION
if alias not in connections:
raise InvalidExplorerConnectionException(
"Attempted to access connection %s, but that is not a registered Explorer connection."
% alias
)
def get_available_user_connections(_user_credentials):
return {data["memorable_name"]: data for data in _user_credentials}
user_profile_sso_id = user.profile.sso_id
close_admin_db_connection_if_not_in_atomic_block()
with cache.lock(
f"get-explorer-connection-{user_profile_sso_id}",
blocking_timeout=30,
timeout=180,
):
cache_key = get_user_cached_credentials_key(user)
user_credentials = cache.get(cache_key, None)
# Make sure that the connection settings are still valid
if user_credentials:
db_aliases_to_credentials = get_available_user_connections(user_credentials)
try:
with user_explorer_connection(db_aliases_to_credentials[alias]):
pass
except psycopg2.OperationalError:
logger.exception(
"Unable to connect using existing cached explorer credentials for %s",
user,
)
user_credentials = None
if not user_credentials:
db_role_schema_suffix = db_role_schema_suffix_for_user(user)
source_tables = source_tables_for_user(user)
db_user = postgres_user(user.email, suffix="explorer")
duration = timedelta(hours=24)
cache_duration = (duration - timedelta(minutes=15)).total_seconds()
user_credentials = new_private_database_credentials(
db_role_schema_suffix,
source_tables,
db_user,
user,
valid_for=duration,
force_create_for_databases=Database.objects.filter(
memorable_name__in=connections.keys()
).all(),
)
cache.set(cache_key, user_credentials, timeout=cache_duration)
db_aliases_to_credentials = get_available_user_connections(user_credentials)
if alias not in db_aliases_to_credentials:
raise RuntimeError(
f"The credentials for {user.email} did not include any for the `{alias}` database."
)
return db_aliases_to_credentials[alias]
def remove_data_explorer_user_cached_credentials(user):
cache_key = get_user_cached_credentials_key(user)
cache.delete(cache_key)
def invalidate_data_explorer_user_cached_credentials():
credentials_version = cache.get(credentials_version_key, None)
if credentials_version:
cache.incr(credentials_version_key)
@contextmanager
def user_explorer_connection(connection_settings):
with psycopg2.connect(
dbname=connection_settings["db_name"],
host=connection_settings["db_host"],
user=connection_settings["db_user"],
password=<PASSWORD>_settings["<PASSWORD>"],
port=connection_settings["db_port"],
) as conn:
yield conn
def get_total_pages(total_rows, page_size):
if not total_rows or not page_size:
return 1
remainder = total_rows % page_size
if remainder:
remainder = 1
return int(total_rows / page_size) + remainder
def tempory_query_table_name(user, query_log_id):
schema_name = f"{USER_SCHEMA_STEM}{db_role_schema_suffix_for_user(user)}"
return f"{schema_name}._data_explorer_tmp_query_{query_log_id}"
def fetch_query_results(query_log_id):
query_log = get_object_or_404(QueryLog, pk=query_log_id)
user = query_log.run_by_user
user_connection_settings = get_user_explorer_connection_settings(user, query_log.connection)
table_name = tempory_query_table_name(user, query_log.id)
with user_explorer_connection(user_connection_settings) as conn:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute("select oid from pg_type where typname='jsonb'")
jsonb_code = cursor.fetchone()[0]
cursor.execute(f"SELECT * FROM {table_name}")
# strip the prefix from the results
description = [(re.sub(r"col_\d*_", "", s.name),) for s in cursor.description]
headers = [d[0].strip() for d in description] if description else ["--"]
data_list = [list(r) for r in cursor]
types = ["jsonb" if t.type_code == jsonb_code else None for t in cursor.description]
data = [
[
json.dumps(row, indent=2) if types[i] == "jsonb" else row
for i, row in enumerate(record)
]
for record in data_list
]
return headers, data, query_log
| 2
| 2
|
algorithms/djb2_nokoyawa.py
|
CryptEncrypt/hashdb
| 0
|
12775889
|
#!/usr/bin/env python
DESCRIPTION = "Variant of djb2 hash in use by Nokoyawa ransomware"
# Type can be either 'unsigned_int' (32bit) or 'unsigned_long' (64bit)
TYPE = 'unsigned_int'
# Test must match the exact has of the string 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
TEST_1 = 3792689168
def hash(data):
generated_hash = 5381
for b in data:
generated_hash = (generated_hash * 33 + (b if b < 0x61 else (b - 0x20))) & 0xFFFFFFFF
return generated_hash
| 2.875
| 3
|
plotting.py
|
peterwinter/boxcluster_tutorial
| 0
|
12775890
|
import matplotlib.pyplot as plt
def add_cuts(ax, cuts, N):
if cuts[-1] != N:
cuts.append(N)
print(len(cuts))
c_last = 0
for c in cuts:
color = 'k'
ax.plot([c, c], [c, c_last], color)
ax.plot([c, c_last], [c, c], color)
ax.plot([c, c_last], [c_last, c_last], color)
ax.plot([c_last, c_last], [c, c_last], color)
c_last = c
def mplot(a, title=None, boxes=None):
cmap = 'spectral'
fig, ax = plt.subplots(figsize=(4, 4))
ax.pcolor(a, vmin=0.1, vmax=0.8, cmap=cmap)
N = len(a)
if boxes is not None:
add_cuts(ax, cuts=boxes.copy(), N=N)
ax.set_xlim([0, N])
ax.set_ylim([0, N])
if title is not None:
ax.set_title(title, size=20)
plt.show()
def mplot2(a, title=None, boxes=None, cmap='spectral'):
fig, ax = plt.subplots(figsize=(4, 4))
ax.pcolor(a, cmap=cmap)
N = len(a)
if boxes is not None:
add_cuts(ax, cuts=boxes.copy(), N=N)
ax.set_xlim([0, N])
ax.set_ylim([0, N])
if title is not None:
ax.set_title(title, size=20)
plt.show()
def plot_modules(modules, G):
""" Plot the modules of a graph"""
import networkx as nx
values = [modules[n] for n in G.nodes()]
nx.draw(G, node_color=values)
plt.show()
def get_graph(filename):
""" return a graph from an edgelist """
G = nx.Graph()
f = open(filename)
data = f.readlines()
edges = []
for line in data:
entry = map(int, line.rstrip().split())
if entry:
edges.append(tuple(entry))
G.add_edges_from(edges)
f.close()
return G
| 2.640625
| 3
|
test/BaseCurrencyAdjustmentTest.py
|
harshal-choudhari/books-python-wrappers
| 1
|
12775891
|
#$Id$#
from books.model.BaseCurrencyAdjustment import BaseCurrencyAdjustment
from books.service.ZohoBooks import ZohoBooks
import os
access_token = os.environ.get('ACCESS_TOKEN')
organization_id = os.environ.get('ORGANIZATION_ID')
zoho_books = ZohoBooks(access_token, organization_id)
base_currency_adjustment_api = zoho_books.get_base_currency_adjustment_api()
base_currency_adjustment_id = base_currency_adjustment_api.get_base_currency_adjustments().get_base_currency_adjustments()[0].get_base_currency_adjustment_id()
#List base currency adjustment
parameter = { 'filter_by': 'Date.All',
'sort_column': 'adjustment_date'}
print base_currency_adjustment_api.get_base_currency_adjustments()
print base_currency_adjustment_api.get_base_currency_adjustments(parameter)
#Get a base currency adjustment
print base_currency_adjustment_api.get(base_currency_adjustment_id)
# List account details for base currency adjustments
settings_api = zoho_books.get_settings_api()
currency_id = settings_api.get_currencies().get_currencies()[0].get_currency_id()
parameter = {'currency_id': currency_id,
'adjustment_date': '2014-04-21',
'exchange_rate': 20.0,
'notes': 'sdfs'}
print base_currency_adjustment_api.list_account_details(parameter)
# Create a base currency adjustment
account_ids = '71127000000000367'
base_currency_adjustment = BaseCurrencyAdjustment()
base_currency_adjustment.set_currency_id('71127000000000105')
base_currency_adjustment.set_adjustment_date('2014-04-21')
base_currency_adjustment.set_exchange_rate(20.0)
base_currency_adjustment.set_notes('hello')
print base_currency_adjustment_api.create(base_currency_adjustment, account_ids)
#Delete a base currency adjustment
print base_currency_adjustment_api.delete(base_currency_adjustment_id)
| 1.976563
| 2
|
track3/utils/vis/vis_split.py
|
NVIDIAAICITYCHALLENGE/2018AICITY_Beihang
| 4
|
12775892
|
<reponame>NVIDIAAICITYCHALLENGE/2018AICITY_Beihang
import numpy as np
import sys
import os
import cv2
#version_num = sys.argv[1]
root_path = 'home_directory/VIC/track3/new/tracklets'
track_res_file = os.path.join(root_path, 'track_res_idx_v5_1_0.4_nodate.txt') #'track_res_idx_' + version_num + '.txt')
with open(track_res_file, 'r') as f:
lines = f.readlines()
num = len(lines)
total_split_num = 50
each = num / total_split_num
# split the res
'''
for i in range(total_split_num):
sid = i * each
eid = (i + 1) * each
with open(os.path.join(root_path, 'v5_1_0.4_nodate_splits/%d.txt' % i), 'w') as f: # version_num + '_splits/%d.txt' % i), 'w') as f:
f.writelines(lines[sid:eid])
print i, 'done'
print 'all done'
exit(0)
'''
split_id = int(sys.argv[1])
track_res_file = os.path.join(root_path, 'v5_1_0.4_nodate_splits/%d.txt' % split_id) #version_num + '_splits/%d.txt' % split_id)
with open(track_res_file, 'r') as f:
lines = f.readlines()
tracks = [x.strip().split() for x in lines]
video_dir = '/mnt/lustre/share/aicitychallenge/track3'
raw_dets_dir = os.path.join(video_dir, 'dets')
vis_dir = 'home_directory/VIC/track3/new/tracklets/vis/v5_1_0.4_nodate' #+ version_num
if not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def vis(im, x, y, w, h, im_write_name):
im = im[y:y+h, x:x+w]
cv2.imwrite(im_write_name, im)
# for i in range(tracks.shape[0]):
# for j in range(tracks.shape[1]):
# loc = tracks[i, j][:6]
# tid = int(tracks[i, j][7:])
for i in range(len(tracks)):
for j in range(len(tracks[i])):
loc = tracks[i][j][:6]
tid = int(tracks[i][j][7:])
raw_det_file = os.path.join(raw_dets_dir, loc + '.mp4-res.txt')
assert os.path.isfile(raw_det_file)
raw_dets = np.genfromtxt(raw_det_file, delimiter=',')
raw_dets_t = raw_dets[np.where(raw_dets[:, 1] == tid)[0]][:, :-4]
video_name = os.path.join(video_dir, loc + '.mp4')
assert os.path.isfile(video_name)
cap = cv2.VideoCapture(video_name)
if not cap.isOpened():
print video_name, 'cannot open!'
exit(-1)
# if raw_dets_t.shape[0] > 50:
# T = 50
# else:
T = raw_dets_t.shape[0]
vis_cnt = 0
for t in range(T): #raw_dets_t.shape[0]):
# if t > 10:
# break
x, y, w, h = raw_dets_t[t, 2:]
x, y, w, h = int(x), int(y), int(w), int(h)
if w * h < 6000:
continue
cap.set(1, int(raw_dets_t[t, 0]))
flag, frame = cap.read()
while not flag:
print 'read', tracks[i][j], 'failed!'
flag, frame = cap.read()
dir_name = os.path.join(vis_dir, str(i + split_id * each))
if not os.path.exists(dir_name):
os.makedirs(dir_name)
sub_dir_name = os.path.join(dir_name, '%03d' % j)
if not os.path.exists(sub_dir_name):
os.makedirs(sub_dir_name)
vis(frame, x, y, w, h, os.path.join(sub_dir_name, loc + '_%06d.jpg' % t))
vis_cnt += 1
# if vis_cnt > 100:
# break
print i + split_id * each, 'finished'
print 'done'
| 2.046875
| 2
|
CODE/run_dapt_task.py
|
Zaaachary/CSQA
| 0
|
12775893
|
#! -*- encoding:utf-8 -*-
"""
@File : run_dapt_task.py
@Author : <NAME>
@Contact : <EMAIL>
@Dscpt :
"""
import argparse
import logging
import os
import time
from pprint import pprint
from transformers import AlbertTokenizer, BertTokenizer
from dapt_task.data import *
from dapt_task.controller import DomainAdaptivePreTrain
from model.DAPTModels import BertForPreTraining, BertForMaskedLM
from utils.common import mkdir_if_notexist, result_dump, set_seed
logger = logging.getLogger("run_task")
console = logging.StreamHandler();console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(name)s - %(message)s', datefmt = r"%y/%m/%d %H:%M")
console.setFormatter(formatter)
logger.addHandler(console)
def select_tokenizer(args):
if "albert" in args.PTM_model_vocab_dir:
return AlbertTokenizer.from_pretrained(args.PTM_model_vocab_dir)
elif "bert" in args.PTM_model_vocab_dir:
return BertTokenizer.from_pretrained(args.PTM_model_vocab_dir)
else:
logger.error("No Tokenizer Matched")
def select_task(args):
model_dict = {
"BertPT": (BertForPreTraining, []),
"BertMLM": (BertForMaskedLM, []),
}
processor_dict = {
"Webster": Webster_Processor,
"OMCS": OMCS_Processor,
}
processor_name, model_name = args.task_name.split('_', maxsplit=1)
ModelClass, args_list = model_dict[model_name]
ProcessorClass = processor_dict[processor_name]
model_kwargs = {arg: args.__dict__[arg] for arg in args_list}
return ModelClass, ProcessorClass, model_kwargs
def set_result(args):
'''
set result dir name accroding to the task
'''
if args.mission in ('train', 'conti-train'):
task_str = time.strftime(r'%H%M-%b%d') + f'_seed{args.seed}'
if 'webster' in args.task_name:
task_str += f'_websterv{args.DAPT_version}'
args.result_dir = os.path.join(
args.result_dir,
os.path.basename(args.PTM_model_vocab_dir),
args.task_name,
task_str, ''
)
args.task_str = task_str
else:
args.task_str = 'predict or dev'
args.result_dir = args.saved_model_dir
mkdir_if_notexist(args.result_dir)
# set logging
log_file_dir = os.path.join(args.result_dir, 'task_log.txt')
logging.basicConfig(
filename = log_file_dir,
filemode = 'a',
level = logging.INFO,
format = '%(asctime)s %(name)s - %(message)s',
datefmt = r"%y/%m/%d %H:%M"
)
result_dump(args, args.__dict__, 'task_args.json')
pprint(args.__dict__)
def main(args):
start = time.time()
logger.info(f"start in {start}")
set_result(args)
set_seed(args)
# load data and preprocess
logger.info(f"select tokenizer and model for task {args.task_name}")
tokenizer = select_tokenizer(args)
model, Processor, model_kwargs = select_task(args)
# initalize controller by model
controller = DomainAdaptivePreTrain(args, model_kwargs)
controller.load_model(model)
controller.load_data(Processor, tokenizer)
# run task accroading to mission
if args.mission in ('train', 'conti-train'):
controller.train()
elif args.mission == 'eval':
controller.run_dev()
elif args.mission == 'predict':
controller.predict_test()
end = time.time()
logger.info(f"task total run time {end-start:.2f} second")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# other param
parser.add_argument('--task_name', type=str, help="model & processor will be selected according to task")
parser.add_argument('--mission', type=str, choices=['train', 'eval', 'predict', 'conti-train'])
parser.add_argument('--fp16', type=int, default=0)
parser.add_argument('--gpu_ids', type=str, default='-1')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--save_mode', type=str, choices=['epoch', 'step', 'end'], default='epoch')
parser.add_argument('--print_step', type=int, default=250)
parser.add_argument('--evltest_batch_size', type=int, default=8)
parser.add_argument('--eval_after_tacc', type=float, default=0)
parser.add_argument('--clip_batch_off', action='store_true', default=False, help="clip batch to shortest case")
# task-specific hyper param
parser.add_argument('--Webster_version', type=str, default=None)
parser.add_argument('--nsp', action='store_true', default=False)
parser.add_argument('--mask_pct', type=float, default=0.15)
parser.add_argument('--max_seq_len', type=int, default=40)
parser.add_argument('--mask_method', type=str, choices=['random'])
# train hyper param
parser.add_argument('--train_batch_size', type=int, default=4)
parser.add_argument('--gradient_accumulation_steps', type=int, default=1)
parser.add_argument('--num_train_epochs', type=int, default=5)
parser.add_argument('--learning_rate', type=float, default=2e-5)
parser.add_argument('--warmup_proportion', type=float, default=0.1)
parser.add_argument('--weight_decay', type=float, default=0.1)
# data param
parser.add_argument('--dataset_dir', type=str, default='../DATA')
parser.add_argument('--result_dir', type=str, default=None)
parser.add_argument('--saved_model_dir', type=str, default=None)
parser.add_argument('--PTM_model_vocab_dir', type=str, default=None)
args_str = r"""
--task_name Webster_Bert
--mission train
--fp16 0
--gpu_ids 0
--seed 42
--save_mode epoch
--print_step 50
--evltest_batch_size 12
--eval_after_tacc 0.8
--DAPT_version 1.0
--mask_pct 0.20
--max_seq_len 40
--mask_method random
--train_batch_size 2
--gradient_accumulation_steps 8
--num_train_epochs 2
--learning_rate 2e-5
--warmup_proportion 0.1
--weight_decay 0.1
--dataset_dir ..\DATA
--result_dir ..\DATA\result
--saved_model_dir D:\CODE\Python\Transformers-Models\bert-base-cased
--PTM_model_vocab_dir D:\CODE\Python\Transformers-Models\bert-base-cased
"""
args = parser.parse_args()
# args = parser.parse_args(args_str.split())
main(args)
| 2.140625
| 2
|
sip/execution_control/configuration_db/sip_config_db/states/tests/test_services.py
|
SKA-ScienceDataProcessor/integration-prototype
| 3
|
12775894
|
# coding=utf-8
"""Unit testing for the states.services module."""
from ..service_state import ServiceState
from ..services import get_service_state_list
from ... import ConfigDb
DB = ConfigDb()
def test_states_get_service_list():
"""Get the list of known services."""
DB.flush_db()
service = ServiceState('ExecutionControl', 'MasterController',
'1.0.0')
assert service.id == 'ExecutionControl:MasterController:1.0.0'
assert service.subsystem == 'ExecutionControl'
assert service.name == 'MasterController'
assert service.version == '1.0.0'
ServiceState('TangoControl', 'SDPMaster', '1.0.0')
ServiceState('TangoControl', 'TangoDatabaseDS', '1.0.0')
ServiceState('TangoControl', 'TangoMySQL', '1.0.0')
ServiceState('Platform', 'Redis', '1.0.0')
ServiceState('Platform', 'Kafka', '1.0.0')
services = get_service_state_list()
assert service.id in [service.id for service in services]
| 2.421875
| 2
|
Pyscripts/ShortPaper/1.GriddingData/GD05_LandCoverData.py
|
ArdenB/fireflies
| 0
|
12775895
|
"""
Script goal,
Open land cover data and build a simple cover map
"""
#==============================================================================
__title__ = "LandCover"
__author__ = "<NAME>"
__version__ = "v1.0(12.03.2021)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
# import geopandas as gpd
import argparse
import datetime as dt
import warnings as warn
import xarray as xr
import bottleneck as bn
import scipy as sp
import glob
import shutil
import time
from dask.diagnostics import ProgressBar
import rasterio
from collections import OrderedDict
# from scipy import stats
# from numba import jit
# from netCDF4 import Dataset, num2date, date2num
# from scipy import stats
# import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
# import seaborn as sns
import matplotlib as mpl
import cartopy.crs as ccrs
import cartopy.feature as cpf
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import make_axes_locatable
import socket
# ========== Import my dunctions ==========
import myfunctions.corefunctions as cf
import myfunctions.PlotFunctions as pf
# import cartopy.feature as cpf
# from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# # Import debugging packages
# import pdb as ipdb
# import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
def main():
# ========== Setup the broad infomation
region = "SIBERIA"
box = [-10.0, 180, 40, 70]
# ========== Load in the different data from glc ==========
path = "./data/LandCover/"
# years = [2000, 2010]
legendfn = [f"{path}glc2000_v1_1/Tiff/Global_Legend.csv", f"{path}gez2010/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv"]
# geotiffn = [f"{path}glc2000_v1_1/Tiff/glc2000_v1_1.tif", f"{path}gez2010/OUTPUT.tif", f"{path}gez2010/IsBorealV3.tif"]
Down = ["MODIS", "esacci", "COPERN_BA"]
res = ["MODIS", "GFED", "TerraClimate", ] #"COPERN_BA", "esacci",
force = False
for dsres in res:
fnout = f"{path}Regridded_forestzone_{dsres}.nc"
if os.path.isfile(fnout) and not force:
print(f"{dsres} has an existing file")
continue
else:
print(dsres)
dataname = ["LandCover", "GlobalEcologicalZones", "DinersteinRegions", "BorealMask"]
if dsres in Down:
datares = "MODIS"
else:
datares = dsres
geotiffn = [f"{path}glc2000_v1_1/Tiff/glc2000_v1_1.tif", f"{path}Dinerstein_Aggregated/Masks/Boreal_climatic_{datares}.tif", f"{path}Dinerstein_Aggregated/Masks/BorealEco_2017_{datares}.tif", f"{path}Dinerstein_Aggregated/Masks/Boreal_buf_{datares}.tif"]
mskfn = "./data/masks/broad/Hansen_GFC-2018-v1.6_%s_ProcessedTo%s.nc" % (region, dsres)
ds_msk = xr.open_dataset(mskfn).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1]))).chunk()
mask = ds_msk.datamask
# out_dic = OrderedDict()
outlist = []
key_dic = OrderedDict()
for dsnx, legfn, tiffn in zip(dataname, legendfn, geotiffn):
print(dsnx)
# +++++ open the dataarray +++++
key_dic[dsnx] = pd.read_csv(legfn)
da = xr.open_rasterio(tiffn).transpose("y", "x", "band").rename({"x":"longitude", "y":"latitude", "band":"time"}).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1]))).chunk()
da["time"] = [pd.Timestamp("2018-12-31")]
if da.longitude.shape > ds_msk.longitude.shape:
print(da.latitude.shape[0], ds_msk.latitude.shape[0])
print ("Coarsnening data started at: ", pd.Timestamp.now())
# breakpoint()
# Coarsen/ downscale
latscale = int(da.latitude.shape[0] / ds_msk.latitude.shape[0])
lonscale = int(da.longitude.shape[0] / ds_msk.longitude.shape[0])
da = da.coarsen(latitude=latscale, longitude=lonscale, boundary ="pad").median()
da = da.round()
da = da.reindex_like(mask, method="nearest")
delay = xr.Dataset({dsnx:da}).to_netcdf(f"/tmp/{dsres}_{dsnx}.nc", format = 'NETCDF4', unlimited_dims = ["time"], compute=False)
print(f"Creating temp netcdf for {dsres} {dsnx} at: {pd.Timestamp.now()}")
with ProgressBar():
delay.compute()
# out_dic[dsnx]
outlist.append(f"/tmp/{dsres}_{dsnx}.nc")
da = None
# ========== get the FAO climate zones ==========
# ds = xr.Dataset(out_dic)
ds = xr.open_mfdataset(outlist).transpose('time', 'latitude', 'longitude')
# breakpoint()
GlobalAttributes(ds, dsres, fnameout=fnout)
delayed_obj = ds.to_netcdf(fnout, format = 'NETCDF4', unlimited_dims = ["time"], compute=False)
print(f"Starting write of {dsres} data at: {pd.Timestamp.now()}")
with ProgressBar():
results = delayed_obj.compute()
print(f"{dsres} completed at: {pd.Timestamp.now()}")
if dsres == "MODIS":
for dsin in ["esacci", "COPERN_BA"]:
print(dsin)
mskfn = "./data/masks/broad/Hansen_GFC-2018-v1.6_%s_ProcessedTo%s.nc" % (region, dsin)
ds_msk = xr.open_dataset(mskfn).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1]))).chunk()
mask = ds_msk.datamask
ds_out = ds.reindex_like(mask, method="nearest")
fnout = f"{path}Regridded_forestzone_{dsin}.nc"
delayed_obj = ds_out.to_netcdf(fnout, format = 'NETCDF4', unlimited_dims = ["time"], compute=False)
print(f"Starting write of {dsin} data at: {pd.Timestamp.now()}")
with ProgressBar():
results = delayed_obj.compute()
# breakpoint()
breakpoint()
for dsn in ["TerraClimate","GFED", "MODIS", "esacci", "COPERN_BA"]:
print(dsn)
mskfn = "./data/masks/broad/Hansen_GFC-2018-v1.6_%s_ProcessedTo%s.nc" % (region, dsn)
ds_msk = xr.open_dataset(mskfn).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1])))
# ds_mod = ds.reindex_like(ds_msk, method="nearest")
# mask = ds_msk.datamask
# # mask = ds_msk.datamask.reindex_like(ds, method="nearest")
# # boreal mask
# title = "FAO Boreal Zone"
# plotmaker(ds_mod.Boreal, title, mask)
# # Tree cover mask
# title = "Needle Leaf Tree species"
# plotmaker(((ds_mod.LandCover == 4)+(ds_mod.LandCover == 5)), title, mask)
# title = "Needle Leaf and mixed fores"
# plotmaker(((ds_mod.LandCover == 6)+(ds_mod.LandCover == 4)+(ds_mod.LandCover == 5)), title, mask)
# title = "Broadleaf forest"
# plotmaker(((ds_mod.LandCover == 1)+(ds_mod.LandCover == 2)+(ds_mod.LandCover == 3)), title, mask)
breakpoint()
breakpoint()
#==============================================================================
# def _lookupkeys():
# dataname = ["LandCover", "GlobalEcologicalZones", "DinersteinRegions", "BorealMask"]
# legendfn = ([f"{path}glc2000_v1_1/Tiff/Global_Legend.csv", f"{path}gez2010/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv"])
# for nm, lfn in zip(dataname, legendfn)
def GlobalAttributes(ds, dsn, fnameout=""):
"""
Creates the global attributes for the netcdf file that is being written
these attributes come from :
https://www.unidata.ucar.edu/software/thredds/current/netcdf-java/metadata/DataDiscoveryAttConvention.html
args
ds: xarray ds
Dataset containing the infomation im intepereting
fnout: str
filename out
returns:
attributes Ordered Dictionary cantaining the attribute infomation
"""
# ========== Create the ordered dictionary ==========
if ds is None:
attr = OrderedDict()
else:
attr = ds.attrs
# fetch the references for my publications
# pubs = puplications()
# ========== Fill the Dictionary ==========
# ++++++++++ Highly recomended ++++++++++
attr["FileName"] = fnameout
attr["title"] = "Datamasks"
attr["summary"] = "BorealForestCovermaks_%sData" % (dsn)
attr["Conventions"] = "CF-1.7"
# ++++++++++ Data Provinance ++++++++++
attr["history"] = "%s: Netcdf file created using %s (%s):%s by %s. FRI caluculated using %s data" % (
str(pd.Timestamp.now()), __title__, __file__, __version__, __author__, dsn)
if not ds is None:
attr["history"] += ds.history
attr["creator_name"] = __author__
attr["creator_url"] = "ardenburrell.com"
attr["creator_email"] = __email__
attr["Institution"] = "Woodwell"
attr["date_created"] = str(pd.Timestamp.now())
ds.longitude.attrs['units'] = 'degrees_east'
ds.latitude.attrs['units'] = 'degrees_north'
# ++++++++++ Netcdf Summary infomation ++++++++++
# attr["time_coverage_start"] = str(dt.datetime(ds['time.year'].min(), 1, 1))
# attr["time_coverage_end"] = str(dt.datetime(ds['time.year'].max() , 12, 31))
return attr
def _mode(da):
vals = sp.stats.mode(da, axis=None, nan_policy="omit")
return vals[0][0]
def plotmaker(ds_in, title, mask):
# breakpoint()
latiMid=np.mean([70.0, 40.0])
longMid=np.mean([-10.0, 180.0])
fig, ax = plt.subplots(1, 1, figsize=(20,12), subplot_kw={'projection': ccrs.Orthographic(longMid, latiMid)})
ds_in.where(mask==1).plot(transform=ccrs.PlateCarree(), ax=ax)
coast = cpf.GSHHSFeature(scale="intermediate")
ax.add_feature(cpf.BORDERS, linestyle='--', zorder=102)
ax.add_feature(cpf.LAND, facecolor='dimgrey', alpha=1, zorder=0)
ax.add_feature(coast, zorder=101, alpha=0.5)
# coast_50m = cpf.GSHHSFeature(scale="high")
ax.add_feature(cpf.OCEAN, facecolor="w", alpha=1, zorder=100)
ax.set_title(f"{title}")
plt.show()
#==============================================================================
if __name__ == '__main__':
main()
| 2.515625
| 3
|
anime/demo/demo9.py
|
SodaCookie/pygame-animations
| 14
|
12775896
|
import pygame
import anime
import random
pygame.init()
screen = pygame.display.set_mode((800, 600))
squares = []
entrance = {
'x' : -50,
'y' : 300
}
exit = {
'x' : 850,
'y' : 300
}
episode = anime.Episode(entrance, exit)
playing = True
while playing:
mx, my = pygame.mouse.get_pos()
for e in pygame.event.get():
if e.type == pygame.QUIT:
playing = False
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_EQUALS:
tmp_surf = pygame.Surface((100, 100))
tmp_surf.fill((random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)))
tmp_anime = anime.AnimeBase(tmp_surf, random.randint(200, 600),
random.randint(50, 550))
tmp_anime.set_filter('x', anime.filter.Spring(0.1, 0.5))
tmp_anime.set_filter('y', anime.filter.Spring(0.1, 0.5))
squares.append(tmp_anime)
elif e.key == pygame.K_MINUS:
if squares:
squares.pop(0)
screen.fill((255, 255, 255))
episode.update(squares)
episode.render(episode.get_unmounting(), screen)
episode.render(squares, screen)
pygame.display.flip()
pygame.time.wait(10)
pygame.quit()
| 2.75
| 3
|
1D-Burger-SWAG/utils/post.py
|
tailintalent/ar-pde-cnn
| 51
|
12775897
|
import torch
import matplotlib as mpl
mpl.use('agg')
import numpy as np
import os
import scipy.integrate as integrate
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.lines import Line2D
from matplotlib import rc
def plotPred(args, t, xT, uPred, uTarget, epoch, bidx=0):
'''
Plots a single prediction contour
'''
plt.close("all")
# Create figure
mpl.rcParams['font.family'] = ['serif'] # default is sans-serif
rc('text', usetex=False)
fig = plt.figure(figsize=(15, 8), dpi=150)
ax = []
ax.append(plt.subplot2grid((3, 15), (0, 0), colspan=14))
ax.append(plt.subplot2grid((3, 15), (1, 0), colspan=14))
ax.append(plt.subplot2grid((3, 15), (2, 0), colspan=14))
cmap = "inferno"
c0 = ax[1].imshow(uPred.T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c_max = np.max(uPred.T)
c_min = np.min(uPred.T)
c0.set_clim(vmin=c_min, vmax=c_max)
c0 = ax[0].imshow(uTarget.T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c0.set_clim(vmin=c_min, vmax=c_max)
p0 = ax[0].get_position().get_points().flatten()
p1 = ax[1].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p1[2]+0.015, p1[1], 0.020, p0[3]-p1[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c_min, c_max, 5)
tickLabels = ["{:02.2f}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=cmap, orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
cmap = "viridis"
c0 = ax[2].imshow(np.abs(uPred.T - uTarget.T), interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
p0 = ax[2].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p0[2]+0.015, p0[1], 0.020, p0[3]-p0[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c0.norm.vmin, c0.norm.vmax, 5)
tickLabels = ["{:.2e}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=cmap, orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
ax[0].set_ylabel('x', fontsize=14)
ax[1].set_ylabel('x', fontsize=14)
ax[2].set_ylabel('x', fontsize=14)
ax[2].set_xlabel('t', fontsize=14)
file_name = args.pred_dir+"/burgerPred-epoch{0:03d}-{1:01d}.png".format(epoch, bidx)
plt.savefig(file_name, bbox_inches='tight')
def plotSamples(args, t, xT, uPred, uTarget, epoch=0):
'''
Plots prediction contour of Baysian model samples
'''
plt.close("all")
# Create figure
mpl.rcParams['font.family'] = ['serif'] # default is sans-serif
# rc('text', usetex=True)
n_sample = uPred.shape[0] + 1
nrow = int(np.sqrt(n_sample))
ncol = 6*nrow + 1
fig = plt.figure(figsize=(20, 10), dpi=150)
ax = []
for i in range(nrow):
for j in range(nrow):
ax.append(plt.subplot2grid((nrow, ncol), (i, 6*j), colspan=5))
cmap = "inferno"
# Target in top left
uTarget = uTarget[:uPred.shape[1]]
c0 = ax[0].imshow(uTarget.T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c_max = np.max(uPred.T)
c_min = np.min(uPred.T)
c0.set_clim(vmin=c_min, vmax=c_max)
# Prediction samples
for i in range(1, len(ax)):
c0 = ax[i].imshow(uPred[i-1].T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c0.set_clim(vmin=c_min, vmax=c_max)
p0 = ax[nrow-1].get_position().get_points().flatten()
p1 = ax[-1].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p1[2]+0.01, p1[1], 0.020, p0[3]-p1[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c_min, c_max, 5)
tickLabels = ["{:02.2f}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=cmap, orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
# Axis labels
for i in range(len(ax)-nrow, len(ax)):
ax[i].set_xlabel('t')
for i in range(nrow):
ax[int(i*nrow)].set_ylabel('x')
file_name = args.pred_dir+"/burgerSamples_epoch{:03d}.png".format(epoch)
plt.savefig(file_name, bbox_inches='tight')
def calcR2score(uPred, uTarget, epoch=0, save=True):
'''
Calculates the total and time dependent average R2 score
Args:
uPred (torch.Tensor): [b x t x d] tensor of model predictions
uTarget (torch.Tensor): [b x t x d] tensor of corresponding target values
epoch (int): current training epoch (for logging)
'''
# Following:
# https://en.wikipedia.org/wiki/Coefficient_of_determination
# First total average
ybar = torch.mean(uTarget.view(uTarget.size(0),-1), dim=-1)
ss_tot = torch.sum(torch.pow(uTarget - ybar.unsqueeze(-1).unsqueeze(-1), 2).view(uTarget.size(0), -1), dim=-1)
ss_res = torch.sum(torch.pow(uTarget - uPred, 2).view(uTarget.size(0), -1), dim=-1)
r2_avg = torch.mean(1 - ss_res/ss_tot).cpu().numpy()
# Now time dependent
ybar = torch.mean(uTarget, dim=-1)
ss_tot = torch.sum(torch.pow(uTarget - ybar.unsqueeze(-1), 2), dim=-1)
ss_res = torch.sum(torch.pow(uTarget - uPred, 2), dim=-1)
r2_time = torch.mean(1 - ss_res/ss_tot, dim=0).cpu().numpy()
if(save):
f=open('r2score_time.dat','ab')
np.savetxt(f, np.insert(r2_time, 0, epoch)[np.newaxis,:], delimiter=',')
f.close()
f=open('r2score.dat','ab')
np.savetxt(f, np.insert(r2_avg, 0, epoch)[np.newaxis,:], delimiter=',')
f.close()
| 2.1875
| 2
|
bsbang-suggester.py
|
buzzbangorg/bsbang-indexer
| 0
|
12775898
|
#!/usr/bin/env python3
import argparse
import requests
from bioschemas_indexer import indexer
# MAIN
parser = argparse.ArgumentParser('Run a test query against the Solr instance')
parser.add_argument('query')
args = parser.parse_args()
_, solr = indexer.read_conf()
solrSuggester = 'http://' + solr['SOLR_SERVER'] + ':' + \
solr['SOLR_PORT'] + '/solr/' + solr['SOLR_CORE'] + \
'/suggest?suggest.dictionary=mySuggester&suggest=true&suggest.build=true&suggest.q='
# params = {'q': args.query}
r = requests.get(solrSuggester + args.query)
resp = r.json()
# print(r.text)
for word in resp["suggest"]["mySuggester"][args.query]["suggestions"]:
print(word["term"])
| 2.609375
| 3
|
2021/8b.py
|
combatopera/advent2020
| 2
|
12775899
|
#!/usr/bin/env python3
from itertools import permutations
from pathlib import Path
class Figure(frozenset):
@classmethod
def parse(cls, text):
lines = [l for l in text.splitlines() if l]
for digit in range(10):
digittext = ''.join(l[3 * digit:3 * (digit + 1)] for l in lines)
assert {' '} == set(digittext[::2])
yield cls(i for i in range(7) if digittext[1 + 2 * i] != ' ')
class Patch:
def __init__(self, chartosegment):
self.chartosegment = chartosegment
def _patches(self, pattern, figure):
if len(figure) != len(pattern):
return
knownsegments = {i for c in pattern for i in [self.chartosegment.get(c)] if i is not None}
if not knownsegments <= figure:
return
unknownsegments = list(figure - knownsegments)
unknownchars = [c for c in pattern if c not in self.chartosegment]
for chars in permutations(unknownchars):
yield type(self)(dict(zip(chars, unknownsegments), **self.chartosegment))
def search(self, patterns, figures):
if patterns:
for f in figures:
for q in self._patches(patterns[0], f):
yield from q.search(patterns[1:], figures - {f})
else:
yield self
def _decodeone(self, pattern):
return figures[Figure(self.chartosegment[c] for c in pattern)]
def decode(self, patterns):
return sum(10 ** i * self._decodeone(p) for i, p in enumerate(reversed(patterns)))
emptypatch = Patch({})
figures = {f: digit for digit, f in enumerate(Figure.parse('''
- - - - - - - - $
| | | | || || | || || |$
- - - - - - - $
| | || | | || | || | |$
- - - - - - - $
'''))}
def main():
n = 0
with Path('input', '8').open() as f:
for line in f:
patterns, digits = (s.split() for s in line.split('|'))
patch, = emptypatch.search(sorted(patterns, key = len), figures.keys())
n += patch.decode(digits)
print(n)
if '__main__' == __name__:
main()
| 3.03125
| 3
|
mgn/datasets/clevr_questions.py
|
realRaBot/mgn
| 13
|
12775900
|
<filename>mgn/datasets/clevr_questions.py
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File: clevr_questions.py
# Author: anon
# Email: <EMAIL>
# Created on: 2020-05-18
#
# This file is part of MGN
# Distributed under terms of the MIT License
import logging
import os
import os.path as osp
import sys
from itertools import zip_longest
# logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(message)s")
from rsmlkit.logging import get_logger, set_default_level
logger = get_logger(__file__)
set_default_level(logging.INFO)
## quick HACK
PROJECT_PATH = '..'
CLEVR_PARSER_PATH = f'{PROJECT_PATH}/vendors/clevr-parser'
print(f"CLEVR_PARSER_PATH={CLEVR_PARSER_PATH}")
if PROJECT_PATH not in sys.path:
sys.path.insert(0, PROJECT_PATH)
if CLEVR_PARSER_PATH not in sys.path:
sys.path.insert(0, CLEVR_PARSER_PATH)
import clevr_parser
from .data import PairData, ClevrData
graph_parser = clevr_parser.Parser(backend='spacy', model='en_core_web_sm',
has_spatial=True,
has_matching=True).get_backend(identifier='spacy')
embedder = clevr_parser.Embedder(backend='torch', parser=graph_parser).get_backend(identifier='torch')
import utils.utils as utils
from utils.mgn_preproc_utils import get_question_file, get_img_scenes
import torch
from torch.utils.data.dataloader import default_collate
# noinspection PyProtectedMember
from torch._six import container_abcs, string_classes, int_classes
import torch_geometric
from torch_geometric.data import Data, Batch
from torch_geometric.debug import set_debug_enabled
set_debug_enabled(True)
import traceback
class ModifiedBatch(Batch):
def __init__(self, **kwargs):
super(ModifiedBatch, self).__init__(**kwargs)
@staticmethod
def from_data_list(data_list, follow_batch=[]):
r"""Constructs a batch object from a python list holding
:class:`torch_geometric.data.Data` objects.
The assignment vector :obj:`batch` is created on the fly.
Additionally, creates assignment batch vectors for each key in
:obj:`follow_batch`."""
keys = [set(data.keys) for data in data_list]
keys = list(set.union(*keys))
assert 'batch' not in keys
batch = Batch()
batch.__data_class__ = data_list[0].__class__
batch.__slices__ = {key: [0] for key in keys}
for key in keys:
batch[key] = []
for key in follow_batch:
batch['{}_batch'.format(key)] = []
cumsum = {key: 0 for key in keys}
batch.batch = []
for i, data in enumerate(data_list):
for key in data.keys:
# logger.info(f"key={key}")
item = data[key]
if torch.is_tensor(item) and item.dtype != torch.bool:
item = item + cumsum[key]
if torch.is_tensor(item):
size = item.size(data.__cat_dim__(key, data[key]))
else:
size = 1
batch.__slices__[key].append(size + batch.__slices__[key][-1])
cumsum[key] = cumsum[key] + data.__inc__(key, item)
batch[key].append(item)
if key in follow_batch:
item = torch.full((size,), i, dtype=torch.long)
batch['{}_batch'.format(key)].append(item)
num_nodes = data.num_nodes
if num_nodes is not None:
item = torch.full((num_nodes,), i, dtype=torch.long)
batch.batch.append(item)
if num_nodes is None:
batch.batch = None
for key in batch.keys:
item = batch[key][0]
logger.debug(f"key = {key}")
if torch.is_tensor(item):
logger.debug(f"batch[{key}]")
logger.debug(f"item.shape = {item.shape}")
elem = data_list[0] # type(elem) = Data or ClevrData
dim_ = elem.__cat_dim__(key, item) # basically, which dim we want to concat
batch[key] = torch.cat(batch[key], dim=dim_)
# batch[key] = torch.cat(batch[key],
# dim=data_list[0].__cat_dim__(key, item))
elif isinstance(item, int) or isinstance(item, float):
batch[key] = torch.tensor(batch[key])
if torch_geometric.is_debug_enabled():
batch.debug()
return batch.contiguous()
class ClevrQuestionDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, collate_fn=None, follow_batch=[], **kwargs):
def collate_wrapper(batch):
return collate_fn(batch) if collate_fn else collate(batch)
def collate(batch):
elem = batch[0]
elem_type = type(elem)
is_tensor = isinstance(elem, torch.Tensor)
is_numpy = elem_type.__module__ == 'numpy' \
and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_'
is_regular_elem = (is_tensor or is_numpy) \
or isinstance(elem, int_classes) \
or isinstance(elem, float)
if is_regular_elem:
# Collate question, program, answer, image_idx #
return default_collate(batch)
else:
# Collate graph data #
if isinstance(elem, PairData):
return Batch.from_data_list(batch, follow_batch=['x_s', 'x_t'])
elif isinstance(elem, Data):
try:
#return Batch.from_data_list(batch, follow_batch)
return ModifiedBatch.from_data_list(batch, follow_batch)
except RuntimeError as rte:
logger.error(f"{rte}")
logger.debug(f"traceback.format_exc(): {traceback.format_exc()}")
#logger.debug(f"traceback.print_stack(): {traceback.print_stack()}")
return batch
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, container_abcs.Mapping):
return {key: collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'):
return type(elem)(*(collate(s) for s in zip(*batch)))
elif isinstance(elem, container_abcs.Sequence):
return [collate(s) for s in zip(*batch)]
raise TypeError('DataLoader found invalid type: {}'.format(
type(elem)))
super(ClevrQuestionDataLoader, self).__init__(dataset, batch_size, shuffle,
collate_fn=lambda batch: collate_wrapper(batch), **kwargs)
class ClevrQuestionDataset(torch.utils.data.Dataset):
def _init_graph_data(self, graph_data_dir_path=None):
"""
Used for driving the flow using preprocessed graph data (*.pt) file. Tries to find and return
a corresponding, complementary {question_h5}.pt file in the graph_data_dir (if path provided),
o.w. in the same folder as the question_h5_path.
"""
question_h5_path = self.question_h5_path
logger.debug(f"Getting graph data from question_h5_path: {question_h5_path}")
fdir = osp.dirname(question_h5_path)
fnp = osp.basename(question_h5_path).split('.')[0]
logger.debug(f"fnp = {fnp}")
## Load PairData from {fp}_pairdata.pt ##
try:
if graph_data_dir_path is None:
graph_data_dir_path = fdir # same as the question_h5 dir
preprocessed_graph_data_fn = f"{fnp}_directed_pairdata.pt" if self.is_directed_graph \
else f"{fnp}_pairdata.pt"
data_fp = f"{graph_data_dir_path}/{preprocessed_graph_data_fn}"
if not os.path.exists(data_fp):
logger.info(f"Couln't find preprocessed graph data {preprocessed_graph_data_fn}. "
f"Falling back to dynamic processing flow")
return None
logger.debug(f"Loading preprocessed pairdata from: {data_fp} ")
data_file = torch.load(data_fp)
data_s_list = data_file['data_s_list']
data_t_list = data_file['data_t_list']
return tuple([data_s_list, data_t_list])
except FileNotFoundError as fne:
logger.error(f"{fnp}_[directed]_pairdata.pt file not found")
return None
def __init__(self, opt, split, *args, **kwargs):
self.max_samples = opt.max_train_samples if split == 'train' \
else opt.max_val_samples
self.question_h5_path = opt.clevr_train_question_path if split == 'train' \
else opt.clevr_val_question_path
vocab_json = opt.clevr_vocab_path
self.vocab = utils.load_vocab(vocab_json)
self.is_directed_graph = opt.is_directed_graph
#### Init Questions.h5 Data - Invariant same data as in baseline (ques, progs, ans, img_idx) ####
questions, programs, answers, image_idxs, orig_idxs, question_families = \
utils.load_data_from_h5(self.question_h5_path)
self.questions = questions
self.programs = programs
self.answers = answers
self.image_idxs = image_idxs
self.orig_idxs = orig_idxs
self.question_families = question_families
#### Init Graph Data: START ####
self.graph_data = None
# Uncomment the below line to activate preprocessed embedding flow
data_list = self._init_graph_data() # Load graph_data from preprocessed embeddings
if data_list:
logger.info(f"Found preprocessed graph data: self.__init_graph_data(..)")
data_s_list, data_t_list = data_list
self.graph_data = list(zip_longest(data_s_list, data_t_list))
else:
# Dynamically load graph_data embeddings (skips the preprocessing requirement)
# N.b Just remove the corresponding *_pairdata.pt file
logger.debug(f"Preprocessed graph data *_pairdata.pt not found, dynammicall generate g_data")
logger.info(f"Dynamic Graph Data Gen Flow")
# raise ValueError if any of the following are None, required for Dynamic Flow
self.graph_parser = kwargs.get('graph_parser')
self.embedder = kwargs.get('embedder')
self.raw_question_path = opt.clevr_train_raw_question_path if split=='train' \
else opt.clevr_val_raw_question_path
self.parsed_img_scene_path = opt.clevr_train_parsed_scene_path if split=='train' \
else opt.clevr_val_parsed_scene_path
logger.debug(f"split: {split}, raw_question_path: {self.raw_question_path}, "
f" parsed_img_scene_path: {self.parsed_img_scene_path}")
try:
self.raw_questions = get_question_file(self.raw_question_path)
self.img_scenes = get_img_scenes(self.parsed_img_scene_path)
except FileNotFoundError as fne:
logger.error(f"Raw questions.json or parsed image scenes not found: {fne}")
#### Init Graph Data: END ####
def __len__(self):
if self.max_samples:
return min(self.max_samples, len(self.questions))
else:
return len(self.questions)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('index %d out of range (%d)' % (idx, len(self)))
question = self.questions[idx]
image_idx = self.image_idxs[idx]
program = -1
answer = -1
if self.programs is not None:
program = self.programs[idx]
if self.answers is not None:
answer = self.answers[idx]
orig_idx = self.orig_idxs[idx]
if self.question_families is not None:
question_family = self.question_families[idx]
# ---- Get Graph Data Item -------#
data_s, data_t = None, None
if self.graph_data:
g_data = self.graph_data[idx]
data_s, data_t = g_data
else:
# Dynamically generate graph data item #
logger.info(f"Dynamic Graph Data Gen for img_idx: {image_idx}")
def get_question_from_token_seq(q_seq):
q = []
for i in q_seq.tolist():
q_token = self.vocab['question_idx_to_token'][i]
q.append(q_token)
return ' '.join(q)
img_scene = list(filter(lambda x: x['image_index'] == image_idx, self.img_scenes))[0]
s = list(filter(lambda x: x['question_index'] == orig_idx, self.raw_questions))[0]
assert s['image_index'] == image_idx
s = s['question']
Gs, s_doc = graph_parser.parse(s, return_doc=True, is_directed_graph=self.is_directed_graph)
X_t, ei_t, e_attr_t = embedder.embed_t(image_idx, self.parsed_img_scene_path,
img_scene=img_scene)
X_s, ei_s, e_attr_s = embedder.embed_s(s, Gs=Gs, s_doc=s_doc)
# Using ClevrData allows us a debug extension to Data
data_s = ClevrData(x=X_s, edge_index=ei_s, edge_attr=e_attr_s)
data_t = ClevrData(x=X_t, edge_index=ei_t, edge_attr=e_attr_t)
return question, program, answer, image_idx, (data_s, data_t)
| 1.9375
| 2
|
DiffTRe/custom_space.py
|
moradza/difftre
| 10
|
12775901
|
<reponame>moradza/difftre<filename>DiffTRe/custom_space.py
from jax_md import space
from jax import ops
import jax.numpy as jnp
def rectangular_boxtensor(box, spacial_dim):
return ops.index_update(jnp.eye(spacial_dim), jnp.diag_indices(spacial_dim), box)
def scale_to_fractional_coordinates(R_init, box):
spacial_dim = R_init.shape[1]
box_tensor = rectangular_boxtensor(box, spacial_dim)
inv_box_tensor = space.inverse(box_tensor)
R_init = jnp.dot(R_init, inv_box_tensor) # scale down to hypercube
return R_init, box_tensor
| 2.21875
| 2
|
process.py
|
GBLin5566/An-Automated-Traditional-Chinese-Dialogue-Generating-System
| 4
|
12775902
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import range
import utils
import argparse
import time
import os
import sys
import random
import math
import json
import codecs
import numpy as np
import utils
from utils import check_cuda_for_var, check_directory
parser = argparse.ArgumentParser(description=\
"Dialog2Vec Generator")
parser.add_argument('--data', type=str,\
help='location of the data corpus(json file)')
parser.add_argument('--validation_p', type=float, default=0.2,
help='percentage of validation data / all data')
parser.add_argument('--seed', type=int, default=55665566,
help='random seed')
parser.add_argument('--only_stat', type=bool, default=False,
help='only do statistic or not')
args = parser.parse_args()
random.seed(args.seed)
my_lang, document_list = utils.build_lang(args.data, dump_torch_variable=False)
# Statistic
dialog_len_count = {}
sentence_count = 0
total_word_count = 0
word_count = {}
for dialog in document_list:
dialog_len = len(dialog)
sentence_count += dialog_len
for sentence in dialog:
total_word_count += len(sentence)
for index in sentence:
word = my_lang.index2word[index]
word_count[word] = word_count.setdefault(word, 0) + 1
dialog_len_count[dialog_len] = dialog_len_count.setdefault(dialog_len, 0) + 1
print("total_word_count ", total_word_count)
print("sentence_count ", sentence_count)
print("dialog_len_count ", dialog_len_count)
print("word_count ", word_count)
if args.only_stat:
sys.exit(0)
#
random.shuffle(document_list)
cut = int(len(document_list) * args.validation_p)
training_data, validation_data = \
document_list[cut:], document_list[:cut]
# Training data for doc2vec
print("Training data for doc2vec")
gensim_train = []
for train_dialog in training_data:
doc = []
for sentence in train_dialog[:-1]:
doc += sentence
gensim_train.append(doc)
np.save("label/gensim_train.npy", gensim_train)
print("Label data for training")
label = []
dialog2vec = []
doc2vec = []
for train_dialog in training_data:
doc = []
dialog = []
for sentence in train_dialog:
if not sentence == train_dialog[-1]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(1)
doc = []
dialog = []
for sentence in train_dialog[:random.randint(1, len(train_dialog)-2)]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(0)
np.save("label/gensim_train_test.npy", doc2vec)
np.save("label/train_label.npy", label)
with codecs.open("label/dialog2vec_train.json", "w+", encoding="utf-8") as outfile:
json.dump(dialog2vec, outfile, indent=4, ensure_ascii=False)
print("Label data for testing")
label = []
dialog2vec = []
doc2vec = []
for validate_dialog in validation_data:
doc = []
dialog = []
for sentence in validate_dialog:
if not sentence == train_dialog[-1]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(1)
doc = []
dialog = []
for sentence in validate_dialog[:random.randint(1, len(validate_dialog)-2)]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(0)
np.save("label/gensim_test_test.npy", doc2vec)
np.save("label/test_label.npy", label)
with codecs.open("label/dialog2vec_test.json", "w+", encoding="utf-8") as outfile:
json.dump(dialog2vec, outfile, indent=4, ensure_ascii=False)
| 2.328125
| 2
|
improved_DNS_lookup.py
|
ThanosGkara/improved_DNS_lookup
| 0
|
12775903
|
#!/usr/bin/python
"""
Author: <NAME>
email: <EMAIL>
The script is written on python >=2.6
Script to resolve hostnames and ips from DNS
Depends on python-dns " yum install python-dns "
"""
import sys
import argparse
import ipaddress
from pprint import pprint
try:
import dns.resolver
import dns.reversename
except ImportError:
print "Plase install python-dns rpm: 'yum install python-dns' "
sys.exit(1)
# create a new instance named 'my_resolver'
hostname_resolver = dns.resolver.Resolver()
def aliases(hostname):
cnames = []
try:
cname = hostname_resolver.query(hostname, "CNAME")
# for attr in dir(cnames):
# print "obj.%s = %s" % (attr, getattr(cnames, attr))
# # pprint(vars(cnames))
# # print ' query qname:', cnames.qname, ' num ans.', len(cnames)
for rdata in cname:
# print ' cname target address:', rdata.target
tmp_name = rdata.target
cnames.append(str(tmp_name)[:-1])
except:
pass
return cnames
# def hostname_dns_resolver(hostname, iptype):
def hostname_dns_resolver(hostname, v=False):
"""
This function receives a host name and tries to resolve it via DNS and get the IPv4/IPv6 address/es
:param hostname: Hostname
:param v: verbose
:return: IP addresses found ( IPv4 or IPv6 or both )
"""
# if iptype not in ['ipv4', 'ipv6', 'ip']:
# sys.stderr("Not given ip type ", ' ipv4', ' ipv6', ' ip')
# sys.exit(1)
iplist = []
hostname = hostname.lower()
if v:
alias = aliases(hostname)
if len(alias) > 0:
hostname_tmp = hostname + ' ( ' + ' '.join(alias) + ' )'
else:
hostname_tmp = hostname
iplist.append(str(hostname_tmp))
else:
iplist.append(str(hostname))
try:
ipv4 = hostname_resolver.query(hostname, "A")
for ip in ipv4:
iplist.append(str(ip))
except dns.resolver.NoAnswer:
iplist.append("IPv4NOTFOUND")
except dns.resolver.NXDOMAIN:
iplist.append("IPv4NOTFOUND")
try:
ipv6 = hostname_resolver.query(hostname, "AAAA")
for ip in ipv6:
iplist.append(str(ip))
except dns.resolver.NoAnswer:
iplist.append("IPv6NOTFOUND")
except dns.resolver.NXDOMAIN:
iplist.append("IPv6NOTFOUND")
return iplist
def ip_dns_resolver(ip):
"""
Asks the DNS for the pointer to the hostname of an ip address
:param ip: IPv4 or IPv6
:return: string
"""
ip_rev = dns.reversename.from_address(ip)
try:
hostname = str(hostname_resolver.query(ip_rev, "PTR")[0])
except:
hostname = "NO_HOSTNAME"
hostname = hostname[:-1]
return hostname
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', nargs='+', help='Define the hostnames you want to resolve ')
parser.add_argument('--ips', nargs='+', help='Define the ips you want to resolve ')
parser.add_argument('-v', action='store_true', help='More rich output regarding each machine')
args = parser.parse_args()
if args.hostname:
for hostname in args.hostname:
if args.v:
# del _ips_[0]
_ips_ = hostname_dns_resolver(hostname, args.v)
tmp = []
for i in xrange(1, len(_ips_)):
if 'NOTFOUND' not in _ips_[i]:
tmp.append(ip_dns_resolver(_ips_[i]))
for i in xrange(len(tmp)):
_ips_[i+1] = _ips_[i+1] + ' ---> ' + tmp[i]
del tmp
else:
_ips_ = hostname_dns_resolver(hostname)
print '\n'.join(map(str, _ips_))
print ''
elif args.ips:
for ip in args.ips:
print 'IP:',ip
try:
_ip_ = ipaddress.ip_address(unicode(ip))
print ip_dns_resolver(ip)
except:
print "Please provide IPv4 or IPv6.\nInput invalid!"
sys.exit(1)
else:
print parser.print_usage()
sys.exit(1)
if __name__ == '__main__':
main()
| 3.25
| 3
|
LeetCodeSolutions/LeetCode_0286.py
|
lih627/python-algorithm-templates
| 24
|
12775904
|
class Solution:
def wallsAndGates(self, rooms: List[List[int]]) -> None:
"""
Do not return anything, modify rooms in-place instead.
"""
if not rooms:
return
INF = 2 ** 31 - 1
m, n = len(rooms), len(rooms[0])
from collections import deque
que = deque()
dirs = [[1, 0], [-1, 0], [0, 1], [0, -1]]
visited = set()
for i in range(m):
for j in range(n):
if rooms[i][j] == 0:
visited.add((i, j))
que.append((i, j, 0))
while que:
x, y, cnt = que.popleft()
for dx, dy in dirs:
xx = x + dx
yy = y + dy
if -1 < xx < m and -1 < yy < n and (xx, yy) not in visited and rooms[xx][yy] == INF:
rooms[xx][yy] = cnt + 1
visited.add((xx, yy))
que.append((xx, yy, cnt + 1))
| 3.078125
| 3
|
tests/test_core.py
|
astromancer/pyshoc
| 0
|
12775905
|
import more_itertools as mit
import functools as ftl
from recipes.testing import Expect
from astropy.io.fits.hdu.base import _BaseHDU
from pathlib import Path
from pySHOC import shocCampaign, shocHDU, shocNewHDU, shocBiasHDU, shocFlatHDU
import pytest
import numpy as np
import os
import tempfile as tmp
# TODO: old + new data all modes!!!
# TODO: all combinations of science, bias, dark, flats (+ masters)
# TODO:
# pylint: disable=C0111 # Missing %s docstring
# pylint: disable=R0201 # Method could be a function
# pretty sample images here:
DATA = Path(__file__).parent / 'data'
EX1 = DATA / 'AT2020hat'
CAL = DATA / 'calibration'
#
np.random.seed(12345)
# ---------------------------------- Helpers --------------------------------- #
def list_of_files():
# create text file with list of filenames for test load
fp, filename = tmp.mkstemp('.txt')
for name in EX1.glob('*.fits'):
os.write(fp, f'{name}{os.linesep}'.encode())
os.close(fp)
return filename
# --------------------------------- Fixtures --------------------------------- #
@pytest.fixture
def run():
return shocCampaign.load(EX1)
# run = shocCampaign.load(EX1)
# ----------------------------------- Tests ---------------------------------- #
class TestCampaign:
@pytest.mark.parametrize(
'pointer',
( # single file as a str
f'{EX1}/SHA_20200731.0001.fits',
# single file as a Path object
EX1 / 'SHA_20200731.0001.fits',
# file list
[f'{EX1}/SHA_20200731.0001.fits',
f'{EX1}/SHA_20200731.0002.fits'],
# globbing patterns
f'{EX1}/SHA_20200731.000[12].fits',
f'{EX1}/SHA_20200731.000*.fits',
# directory
EX1, str(EX1),
# pointer to text file with list of filenames
f'@{list_of_files()}'
)
)
def test_load(self, pointer):
run = shocCampaign.load(pointer)
def test_file_helper(self, run):
run.files
run.files.names
run.files.stems
run.files.nrs
@pytest.mark.parametrize(
'index',
( # simple indexing
0,
-1,
# by filename
'SHA_20200731.0007.fits',
'SHA_20200731.0007', # both should work
)
)
def test_single_index(self, run, index):
print(run[index].file.name)
assert isinstance(run[index], shocHDU)
@pytest.mark.parametrize(
'index,expected',
[ # slice
(slice(0, 4, 2),
['SHA_20200731.0001.fits', 'SHA_20200731.0003.fits']),
# sequences of ints
([0, 1, 3, -1],
['SHA_20200731.0001.fits', 'SHA_20200731.0002.fits',
'SHA_20200731.0004.fits', 'SHA_20200731.0022.fits']),
# array of ints
(np.arange(3),
['SHA_20200731.0001.fits', 'SHA_20200731.0002.fits',
'SHA_20200731.0003.fits']),
# boolean array
(np.random.randint(0, 2, 22).astype(bool),
['SHA_20200731.0002.fits', 'SHA_20200731.0003.fits',
'SHA_20200731.0004.fits', 'SHA_20200731.0006.fits',
'SHA_20200731.0009.fits', 'SHA_20200731.0011.fits',
'SHA_20200731.0012.fits', 'SHA_20200731.0014.fits',
'SHA_20200731.0015.fits', 'SHA_20200731.0017.fits',
'SHA_20200731.0018.fits', 'SHA_20200731.0019.fits']),
# by list of filenames
(('SHA_20200731.0007.fits', 'SHA_20200731.0008.fits'),
['SHA_20200731.0007.fits', 'SHA_20200731.0008.fits']),
# by globbing pattern
('SHA*[78].fits',
['SHA_20200731.0007.fits', 'SHA_20200731.0008.fits',
'SHA_20200731.0017.fits', 'SHA_20200731.0018.fits']),
# by brace expansion
('SHA*{7,8}.fits',
['SHA_20200731.0007.fits', 'SHA_20200731.0008.fits',
'SHA_20200731.0017.fits', 'SHA_20200731.0018.fits']),
# by filename sequence slice
('*0731.00[10:22].*',
['SHA_20200731.0010.fits', 'SHA_20200731.0011.fits',
'SHA_20200731.0012.fits', 'SHA_20200731.0013.fits',
'SHA_20200731.0014.fits', 'SHA_20200731.0015.fits',
'SHA_20200731.0016.fits', 'SHA_20200731.0017.fits',
'SHA_20200731.0018.fits', 'SHA_20200731.0019.fits',
'SHA_20200731.0020.fits', 'SHA_20200731.0021.fits'])
]
)
def test_multi_index(self, run, index, expected):
sub = run[index]
assert isinstance(sub, shocCampaign)
assert sub.files.names == expected
def test_pprint(self, run):
print(run, run.table(run), sep='\n\n')
# @pytest.mark.parametrize(
# 'filename,expected',
# [(CAL/'SHA_20200822.0005.fits', shocBiasHDU),
# (CAL/'SHA_20200801.0001.fits', shocFlatHDU),
# (EX1/'SHA_20200731.0022.fits', shocNewHDU)]
# )
# def test_hdu_type(filename, expected):
# obj = _BaseHDU.readfr
# @expected(
# (CAL/'SHA_20200822.0005.fits', shocBiasHDU,
# CAL/'SHA_20200801.0001.fits', shocFlatHDU,
# EX1/'SHA_20200731.0022.fits', shocNewHDU)
# )
def hdu_type(filename):
return _BaseHDU.readfrom(filename).__class__
# print('....', filename)
# print(obj)
# return obj
Expect(hdu_type)(
{CAL/'SHA_20200822.0005.fits': shocBiasHDU,
CAL/'SHA_20200801.0001.fits': shocFlatHDU,
EX1/'SHA_20200731.0022.fits': shocNewHDU},
globals())
# TODO: shocOldHDU, shocMasterBias, shocMasterFlat
# TODO
# def test_select
| 1.859375
| 2
|
list_files/rootpath.py
|
alepuzio/listfiles
| 0
|
12775906
|
import sys
import os
from tests.test_single_file import PhysicalData
from tests.test_single_file import SingleFile
import pytest
class Rootpath:
"""
@overvieww: class of the absolute path of root directory
"""
def __init__(self, opts):
self.rootpath = opts[1] #TODO study how to resolve the constants in Python
def data(self):
return str(self.rootpath )
def exists(self):
return os.path.exists(self.data())
def files ( self ): #TODO move in class Rootpath
"""
It read a directory recursavely
"""
readfiles = []
try:
self.subdir(self.data(), readfiles)
except:
print ( sys.exc_info() )
print ( "The total number of the read files is {0}".format ( str( len ( readfiles ) ) ) )
return readfiles;
def subdir(self, root_path, readfiles ):
"""
It traverses root directory, and list directories as dirs and files as files
----------
root_path: string root of the path
readfiles: list list of read files inside path
"""
for root, dirs, files in os.walk(root_path) :
path = root.split(os.sep)
for fileTmp in files:
readfiles.append ( SingleFile ( PhysicalData ( fileTmp, os.sep.join ( path ) ) ) )
for directory in dirs:
self.subdir(directory, readfiles)
def __repr__(self):
return "Rootpath.repr:{0}".format( str ( self.rootpath) )
def __str__(self):
#return "{0}".format( str ( self.rootpath) )
return "Rootpath:{0}".format( str ( self.rootpath) )
class OnlyVisible(Rootpath):
"""
It reads only visible directory
"""
def __init__(self, new_rootpath):
self.rootpath = new_rootpath
def data(self):
return self.rootpath.data()
def exists(self):
return super().exists()
def files ( self ): #TODO move in class Rootpath
readfiles = []
try:
if ( self.exists() ):
readfiles = self.rootpath.files()
else:
print ( "The directory [{0}] doesn'nt exists".format ( self.data() ) )
except:
print ( sys.exc_info() )
return readfiles;
def subdir(self, root_path, readfiles ):
if "\\." in root_path :
print ("Directory with dot (.), then it's hidden: {0}".format ( directory ))
else:
return self.rootpath.subdir(root_path, readfiles)
def __repr__(self):
return "OnlyVisible.repr:{0}".format( str ( self.rootpath) )
def __str__(self):
return "OnlyVisible:{0}".format( str ( self.rootpath) )
def test_dot_in_path():
path = "C:\\Users\\apuzielli\\Documents\\personale\\mio-github\\.metadata\\.plugins\\org.jboss.tools.central\\proxyWizards\\1596464026525\\.rcache\\.orphans"
result = ( "." in path)
assert True == result
| 2.96875
| 3
|
Experiments/Aljazeera/web_scrap/wlog.py
|
Ahmad-Fahad/Web-Scraping
| 0
|
12775907
|
import logging
def set_custom_log_info(file):
logging.basicConfig(filename=file , level=logging.INFO)
def report(e:Exception):
logging.exception(str(e))
| 2.5625
| 3
|
build/SCA/script/x_developer/developer_tools.py
|
oliverpatrick/python-screen_click_ai
| 26
|
12775908
|
<filename>build/SCA/script/x_developer/developer_tools.py
import threading
import random
import os
import time
import smtplib
import socket
from os import walk
from pynput.keyboard import Controller,Key
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from script.x_modules import send_mail
import data.config
import interface
import opencv2
from PIL import ImageGrab
import control
import keyboard
import pyautogui
import pynput
import numpy as np
import cv2
import mss
import numpy
from pynput.keyboard import Controller, Key
from script.x_modules import camera_view
from script.x_modules import server_choose
from script.x_modules import object_detection
from script.x_modules import gps
import getpass
from data import config as cf
from data.script.x_modules import object_detection_config as config
from data.script.x_modules import gps_config as gps_config
from script.x_modules import init
class developer_tools(threading.Thread):
init = None;
camera = None;
engine = None;
input = None;
config = None;
console = None;
gps = None;
detector = None;
xy = None;
SCRIPT_NAME = "DEVELOPER_TOOLS 0.2v";
STATUS = True;
FIRST_LOOP = True;
COUNT = 0;
INV_COUNT = 0;
OUTPUT = None;
position = None;
DEV = [
];
def __init__(self):
threading.Thread.__init__(self);
self.init = init.init();
self.camera = camera_view.camera_view;
self.engine = opencv2.opencv2;
self.input = control.control;
self.console = interface.interface;
self.gps = gps.gps;
self.detector = object_detection.object_detection;
self.config = config.object_detection_config;
def run(self):
#self.getPosition();
self.testModule();
def testModule(self):
pass;
#self.init.startWait();
#self.camera.cameraUp(self.camera);
#self.gps.gpsInit(self.gps,gps_config.gps_config.SAPPHIRE_RING_CRAFTING);
def getPosition(self):
self.engine.getPosition(self.engine);
def createBigArray(self):
for x in range(25):
if x < 10:
print("\".\\\\resources\\\\interface\\\\x_modules\\\\server_choose\\\\server\\\\50"+ str(x) +".png\",");
elif x >= 10:
print("\".\\\\resources\\\\interface\\\\x_modules\\\\server_choose\\\\server\\\\5"+ str(x) +".png\",");
def getFileNames(self):
for (dirpath, dirnames, filenames) in walk(".\\resources\\interface\\x_modules\\server_choose\\server\\"):
string = str(filenames);
clearText = string.replace(".png","");
print(clearText+"\n");
break;
def getStatus(self):
print(self.console.TERMINAL_INTERFACE[0]+self.SCRIPT_NAME+self.console.TERMINAL_INTERFACE[4]);
self.start();
| 2.046875
| 2
|
python/caffe/test/test_draw.py
|
Julian-He/caffe
| 0
|
12775909
|
<reponame>Julian-He/caffe
#-*- coding: utf-8 -*-
"""
All modification made by Cambricon Corporation: © 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import unittest
from google.protobuf import text_format
import caffe.draw
from caffe.proto import caffe_pb2
def getFilenames():
"""Yields files in the source tree which are Net prototxts."""
result = []
root_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
assert os.path.exists(root_dir)
for dirname in ('models', 'examples'):
dirname = os.path.join(root_dir, dirname)
assert os.path.exists(dirname)
for cwd, _, filenames in os.walk(dirname):
for filename in filenames:
filename = os.path.join(cwd, filename)
if filename.endswith('.prototxt') and 'solver' not in filename:
yield os.path.join(dirname, filename)
class TestDraw(unittest.TestCase):
def test_draw_net(self):
for filename in getFilenames():
net = caffe_pb2.NetParameter()
with open(filename) as infile:
text_format.Merge(infile.read(), net)
caffe.draw.draw_net(net, 'LR')
if __name__ == "__main__":
unittest.main()
| 1.460938
| 1
|
api/lastfm.py
|
notnola/pinybot
| 0
|
12775910
|
<filename>api/lastfm.py<gh_stars>0
import logging
import web_request
import youtube
log = logging.getLogger(__name__)
def get_lastfm_chart(chart_items=5):
"""
Finds the currently most played tunes on last.fm and turns them in to a youtube list of tracks.
:param chart_items: int the amount of tracks we want.
:return: list[ dict{'type=youtube', 'video_id', 'int(video_time)', 'video_title'} ] or None on error.
"""
url = 'http://lastfm-ajax-vip1.phx1.cbsig.net/kerve/charts?nr=%s&type=track&format=json' % chart_items
lastfm = web_request.get_request(url, json=True)
log.debug(lastfm)
if lastfm is not None:
if 'results' in lastfm['content']:
if 'track' in lastfm['content']['results']:
if len(lastfm['content']['results']['track']) is not 0:
yt_tracks = []
for track in lastfm['content']['results']['track']:
search_str = '%s - %s' % (track['artist'], track['name'])
yt = youtube.youtube_search(search_str)
log.info(yt)
if yt is not None:
yt_tracks.append(yt)
return yt_tracks
return None
def search_lastfm_by_tag(search_str, by_id=True, max_tunes=40):
"""
Search last.fm for tunes matching the search term and turns them in to a youtube list of tracks.
:param search_str: str the search term to search for.
:param by_id: bool if set to True, only tunes that have a youtube id will be added(recommended)
:param max_tunes: int the max amount of tunes to return.
:return: list[ dict{'type=youtube', 'video_id', 'int(video_time)', 'video_title'} ] or None on error.
"""
url = 'http://lastfm-ajax-vip1.phx1.cbsig.net/kerve/charts?nr=%s&type=track&f=tag:%s&format=json' % \
(max_tunes, search_str)
lastfm = web_request.get_request(url, json=True)
log.debug(lastfm)
if lastfm is not None:
if 'track' in lastfm['content']['results']:
if len(lastfm['content']['results']['track']) is not 0:
yt_tracks = []
for track in lastfm['content']['results']['track']:
search_str = '%s - %s' % (track['artist'], track['name'])
if 'playlink' in track:
if 'data-youtube-id' in track['playlink']:
youtube_id = track['playlink']['data-youtube-id']
yt = youtube.youtube_time(youtube_id)
log.debug(yt)
if yt is not None:
yt_tracks.append(yt)
else:
if not by_id:
yt = youtube.youtube_search(search_str)
log.debug('search by search string: %s result: %s' % (search_str, yt))
if yt is not None:
yt_tracks.append(yt)
else:
if not by_id:
yt = youtube.youtube_search(search_str)
log.debug('search by search string: %s result: %s' % (search_str, yt))
if yt is not None:
yt_tracks.append(yt)
return yt_tracks
return None
def lastfm_listening_now(max_tunes, by_id=True):
"""
Gets a list of tunes other people using last.fm are listening to, and turns them in to a youtube list of tracks.
:param max_tunes: int the amount of tracks we want.
:param by_id: bool if set to True, only tunes that have a youtube id will be added(recommended)
:return: list[ dict{'type=youtube', 'video_id', 'int(video_time)', 'video_title'} ] or None on error.
"""
url = 'http://lastfm-ajax-vip1.phx1.cbsig.net/kerve/listeningnow?limit=%s&format=json' % max_tunes
lastfm = web_request.get_request(url, json=True)
log.debug(lastfm)
if lastfm is not None:
if len(lastfm['content']['Users']) is not 0:
yt_tracks = []
for user in lastfm['content']['Users']:
if 'playlink' in user:
if 'data-youtube-id' in user['playlink']:
youtube_id = user['playlink']['data-youtube-id']
yt = youtube.youtube_time(youtube_id)
log.debug(yt)
if yt is not None:
yt_tracks.append(yt)
else:
if 'Track' in user:
search_str = '%s - %s' % (user['Track']['Artist'], user['Track']['Name'])
if not by_id:
yt = youtube.youtube_search(search_str)
log.debug('search by search string: %s result: %s' % (search_str, yt))
if yt is not None:
yt_tracks.append(yt)
return yt_tracks
return None
| 3.171875
| 3
|
test_libs/pyspec/eth2spec/test/helpers/block_header.py
|
prestonvanloon/eth2.0-specs
| 1
|
12775911
|
<reponame>prestonvanloon/eth2.0-specs
from eth2spec.utils.bls import bls_sign
from eth2spec.utils.ssz.ssz_impl import signing_root
def sign_block_header(spec, state, header, privkey):
domain = spec.get_domain(
state=state,
domain_type=spec.DOMAIN_BEACON_PROPOSER,
)
header.signature = bls_sign(
message_hash=signing_root(header),
privkey=privkey,
domain=domain,
)
| 2
| 2
|
sahara_dashboard/content/data_processing/data_plugins/tabs.py
|
hejunli-s/sahara-dashboard
| 33
|
12775912
|
<reponame>hejunli-s/sahara-dashboard
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from sahara_dashboard.api import sahara as saharaclient
from sahara_dashboard.content.data_processing.data_plugins \
import tables as plugin_tables
from sahara_dashboard.content.data_processing \
import tabs as sahara_tabs
LOG = logging.getLogger(__name__)
class PluginsTab(sahara_tabs.SaharaTableTab):
table_classes = (plugin_tables.PluginsTable, )
name = _("Plugins")
slug = "plugins_tab"
template_name = "horizon/common/_detail_table.html"
def get_plugins_data(self):
try:
plugins = saharaclient.plugin_list(self.request)
except Exception:
plugins = []
exceptions.handle(self.request,
_("Unable to fetch plugin list"))
return plugins
class DetailsTab(tabs.Tab):
name = _("Details")
slug = "plugin_details_tab"
template_name = "data_plugins/_details.html"
def _generate_context(self, plugin):
if not plugin:
return {'plugin': plugin}
def get_context_data(self, request):
plugin_id = self.tab_group.kwargs['plugin_id']
plugin = None
try:
plugin = saharaclient.plugin_get(request, plugin_id)
except Exception as e:
LOG.error("Unable to get plugin with plugin_id %s (%s)" %
(plugin_id, str(e)))
exceptions.handle(self.tab_group.request,
_('Unable to retrieve plugin.'))
return {"plugin": plugin}
class LabelsTab(tabs.Tab):
name = _("Label details")
slug = "label_details_tab"
template_name = "data_plugins/_label_details.html"
def _label_color(self, label):
color = 'info'
if label == 'deprecated':
color = 'danger'
elif label == 'stable':
color = 'success'
return color
def get_context_data(self, request, **kwargs):
plugin_id = self.tab_group.kwargs['plugin_id']
plugin = None
try:
plugin = saharaclient.plugin_get(request, plugin_id)
except Exception as e:
LOG.error("Unable to get plugin with plugin_id %s (%s)" %
(plugin_id, str(e)))
exceptions.handle(self.tab_group.request,
_('Unable to retrieve plugin.'))
labels = []
for label, data in plugin.plugin_labels.items():
labels.append(
{'name': label,
'color': self._label_color(label),
'description': data.get('description', _("No description")),
'scope': _("Plugin"), 'status': data.get('status', False)})
for version, version_data in plugin.version_labels.items():
for label, data in version_data.items():
labels.append(
{'name': label,
'color': self._label_color(label),
'description': data.get('description',
_("No description")),
'scope': _("Plugin version %s") % version,
'status': data.get('status', False)})
return {"labels": labels}
class PluginDetailsTabs(tabs.TabGroup):
slug = "cluster_details"
tabs = (DetailsTab, LabelsTab)
sticky = True
| 1.78125
| 2
|
regex_tester.py
|
ewhalan/IFB104
| 1
|
12775913
|
#-----Description----------------------------------------------------#
#
# REGULAR EXPRESSION TESTER
#
# This program provides a simple Graphical User Interface that
# helps you develop regular expressions. It allows you to enter a
# block of text and a regular expression and see what matches
# are found. (Similar web browser-based tools can be found online,
# but the advantage of this one is that it's written in Python, so
# we know that it obeys Python's regular expression syntax.)
#
#--------------------------------------------------------------------#
#-----Useful constants-----------------------------------------------#
#
# These constants control the text widgets in the GUI. Change them
# if you want to modify the widgets in which text is displayed.
FontSize = 14 # Size of the font used for all widgets
InputWidgetWidth = 60 # Width of the search text and regex widgets (chars)
SearchTextDepth = 15 # Depth of the search text widget (lines)
MatchesWidth = 25 # Width of the matches found widget (chars)
MatchesDepth = 20 # Depth of the matches found widget (lines)
#
#--------------------------------------------------------------------#
#-----Main program---------------------------------------------------#
#
#
# Import the necessary regular expression function
from re import findall, MULTILINE
# Import the Tkinter functions
from Tkinter import *
# Create a window
regex_window = Tk()
# Give the window a title
regex_window.title('Regular Expression Tester')
# Create some instructions
search_text_instruction = "Enter the text you want to search here"
regex_instruction = "Enter your regular expression here"
results_instruction = '''All matches found are displayed here
Quotation marks are used to indicate the beginning and end of each match
Matches are displayed in the order found
Newline or tab characters in the match are shown as \\n and \\t
Carriage returns (\\r) are deleted from the search text before searching
If 'multiline' is enabled the beginning and end of lines can be matched as ^ and $
If the pattern contains more than one group, each match shows all groups'''
# Define the fonts we want to use, including a
# fixed-width one which makes all characters easy to see
fixed_font = ('Courier', FontSize)
label_font = ('Calisto', FontSize, 'bold')
# Create a text editing widget for the text to be searched
search_text = Text(regex_window, width = InputWidgetWidth,
height = SearchTextDepth, wrap = WORD,
bg = 'light grey', font = fixed_font,
borderwidth = 2, relief = 'groove',
takefocus = False)
search_text.insert(END, search_text_instruction)
search_text.grid(row = 1, column = 0, padx = 5)
# Create label widgets to describe the boxes
matches_found = Label(regex_window, text = 'Matches found:',
font = label_font)
matches_found.grid(row = 0, column = 1, sticky = W, padx = 5)
enter_regex = Label(regex_window, text = 'Regular expression:',
font = label_font)
enter_regex.grid(row = 2, column = 0, sticky = W, padx = 5)
text_to_search = Label(regex_window, text = 'Text to be searched:',
font = label_font)
text_to_search.grid(row = 0, column = 0, sticky = W, padx = 5)
# Create a text widget to display the matches found
results_text = Text(regex_window, font = fixed_font,
width = MatchesWidth, height = MatchesDepth,
wrap = WORD, bg = 'light green',
borderwidth = 2, relief = 'groove',
takefocus = False)
results_text.insert(END, results_instruction)
results_text.grid(row = 1, column = 1, rowspan = 4, padx = 5, sticky = N)
# Create a frame to hold the controls
controls = Frame(regex_window)
controls.grid(row = 4, column = 0, padx = 5, pady = 5)
# Create a checkbutton to allow the user to enable multiline mode
multiline_on = BooleanVar()
multi_button = Checkbutton(controls, text = "Multiline", font = label_font,
variable = multiline_on, takefocus = False)
multi_button.grid(row = 0, column = 1, padx = 5)
# Create a text editing widget for the regular expression
reg_exp = Entry(regex_window, font = fixed_font,
width = InputWidgetWidth, bg = 'light yellow')
reg_exp.insert(END, regex_instruction)
reg_exp.grid(row = 3, column = 0, sticky = E, padx = 5)
reg_exp.selection_range(0, END) # select all text if we "tab" into the widget
# Function to format a single match. This is made more complicated
# than we'd like because Python's findall function usually returns a list
# of matching strings, but if the regular expression contains more than
# one group then it returns a list of tuples where each tuple contains
# the individual matches for each group.
def format_match(result):
if type(result) is tuple:
formatted = ()
for match in result:
# make the match a "normal" string (not unicode)
match = match.encode('utf8')
# make newline and tab characters in the match visible
match = match.replace('\n', '\\n')
match = match.replace('\t', '\\t')
# put it in the resulting tuple
formatted = formatted + (match,)
else:
# get rid of any unicode characters in the result
result = result.encode('utf8')
# make newline and tab characters in the result visible
formatted = result.replace('\n', '\\n')
formatted = formatted.replace('\t', '\\t')
# put quotess around the result, to help us see empty
# results or results containing spaces at either end
formatted = "'" + formatted + "'"
# return either form as a printable string
return str(formatted)
# Function to find and display results. This version has
# been made robust to user error, through the use of
# exception handling (a topic we'll cover later).
# The optional 'event' parameter allows this function to be
# the target of a key binding.
def find_matches(event = None):
# Clear the results box
results_text.delete(0.0, END)
# Delete any carriage returns (\r) in the search text,
# leaving just newlines (\n), to allow for text pasted from
# an environment with different end-of-line conventions
text_to_search = search_text.get(0.0, END)
text_to_search = text_to_search.replace('\r', '')
search_text.delete(0.0, END)
search_text.insert(0.0, text_to_search)
# Attempt to find the pattern and display the results
try:
# Do a single string or multiline search,
# depending on whether or not the user has
# enabled multiline mode
if multiline_on.get():
results = findall(reg_exp.get(), text_to_search,
flags = MULTILINE)
else:
results = findall(reg_exp.get(), text_to_search)
# Display the outcome
if len(results) == 0:
results_text['bg'] = 'khaki'
results_text.insert(END, 'No matches found\n')
else:
results_text['bg'] = 'light green'
for result in results:
results_text.insert(END, format_match(result) + '\n')
except: # assume the failure was due to a malformed regular expression
results_text['bg'] = 'coral'
results_text.insert(END, 'Invalid regular expression\n')
# Create a button widget to start the search
search_button = Button(controls, text = 'Show matches',
takefocus = False, command = find_matches,
font = label_font)
search_button.grid(row = 0, column = 0)
# Also allow users to start the search by typing a carriage return
# in the regular expression field
reg_exp.bind('<Return>', find_matches)
# Start the event loop
regex_window.mainloop()
#
#--------------------------------------------------------------------#
| 3.65625
| 4
|
cloth_segmentation.py
|
Ericcsr/ClothFromDepth
| 0
|
12775914
|
import numpy as np
import open3d as o3d
import os
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--red", type = float, default = 0.5)
parser.add_argument("--blue", type = float, default = 0.4)
parser.add_argument("--green", type = float, default = 0.4)
parser.add_argument("--source_dir", type = str, default = "./scatters")
parser.add_argument("--render", action = "store_true", default = False)
args = parser.parse_args()
# Need to consider that some cases disturbance may exist
def segment_cloth(pcd):
color = np.array(pcd.colors)
mask = (color[:,0] > args.red) * (color[:, 1] < args.green) * (color[:,2] < args.blue)
points = np.asarray(pcd.points)
truncated_pcd = o3d.geometry.PointCloud()
truncated_pcd.points = o3d.utility.Vector3dVector(points[mask])
truncated_pcd.colors = o3d.utility.Vector3dVector(color[mask])
truncated_pcd.remove_statistical_outlier(nb_neighbors = 20, std_ratio = 0.04)
return truncated_pcd
# Source direcrtory is identical to target directory
files = os.listdir(f"./pointcloud_transformed/{args.source_dir}/")
for f in files:
filename = f"./pointcloud_transformed/{args.source_dir}/{f}"
pcd = o3d.io.read_point_cloud(filename)
cloth_pcd = segment_cloth(pcd)
o3d.io.write_point_cloud(f"./pointcloud_cloth/{args.source_dir}/{f}", cloth_pcd)
if args.render:
o3d.visualization.draw_geometries([cloth_pcd])
| 2.515625
| 3
|
workflow_demo.py
|
KhunWasut/chempython
| 0
|
12775915
|
<reponame>KhunWasut/chempython<filename>workflow_demo.py<gh_stars>0
### necessary imports ###
import kpython_path as kp
import os, re
# Read necessary data from our workspace scheme
# These arrays need to be sorted!
x_snapshot_filelist = os.listdir('./x-snapshots')
f_snapshot_filelist = os.listdir('./f-snapshots')
x_snapshot_filelist.sort(key=kp.utils.natural_keys)
f_snapshot_filelist.sort(key=kp.utils.natural_keys)
# Assert equal length
assert(len(x_snapshot_filelist) == len(f_snapshot_filelist))
# Actually read the vectors
X = []
GRAD_V = []
for i in range(len(x_snapshot_filelist)):
path_x = os.path.join('./x-snapshots', x_snapshot_filelist[i])
path_f = os.path.join('./f-snapshots', f_snapshot_filelist[i])
vec_x = kp.chemmatrix.readvec(path_x)
vec_f = kp.chemmatrix.readvec(path_f)
X.append(vec_x)
GRAD_V.append(vec_f)
##### END OF PART 1 - CHECKED!! #####
atom_vec = kp.chemmatrix.readvec('./aux/atoms.vec')
masses = []
# Build a mass matrix
for atom in atom_vec:
if atom in kp.utils.masses:
masses.append(kp.utils.masses[atom])
# This mass vector is in a.m.u.
mass_vec = np.array(masses)
# convert to atomic unit
mass_vec_atomic = mass_vec*(1.66054e-27/9.10938e-31)
mu = np.diag(mass_vec)
##### END OF PART 2 - CHECKED!! #####
icf_atomic = []
CN = kp.colvars.coordnum.CNParams(numer_pow=10.0, denom_pow=26.0, r0=3.2)
R = kp.colvars.distance.RParams()
L = 11.093
L_atomic = L/0.529177
# Before calculations, make sure that distance c.v. is in atomic units!
# Convert X into atomic unit
X_atomic = ### Original unit: A
GRAD_V_atomic = ### Original unit: kcal/(mol.A)
for xi, grad_vi in X_atomic, GRAD_V_atomic:
icf_i = kp.icf.icf_construct(xi, mu, grad_vi, kT, R, CN, L)
for icf_val in icf_i:
icf.append(icf_val)
icf_atomic = np.array(icf_atomic)
icf_namd_unit = icf_atomic ### Convert back to NAMD units (either kcal/(mol.A) or kcal/(mol.A^2) for r_ab C.V.
##### END OF PART 3 - MAKE SURE THIS PART WORKS BEFORE PROCEEDING! #####
icf_var = kp.icf.var(icf_namd_unit, num_cv=2)
### Actually calculate CN and construct r_vec and cn_vec vectors
### Using what we have
##### END OF PART 4 - MAKE SURE THIS PART WORKS BEFORE PROCEEDING! #####
r_min = 1.5
r_max = 7.0
cn_min = 2.5
cn_max = 5.5
theta_r = 6.0
theta_cn = 4.0
chi = 10.0 # kcal/mol
r_space = np.linspace(r_min, r_max, 1000)
cn_space = np.linspace(cn_min, cn_max, 1000)
fe_coord = []
for r in r_space:
for cn in cn_space:
xi_star_vec = np.array([r, cn])
fe = kp.fe_construct.fe_fit(r_vec, cn_vec, theta_r, theta_cn, chi, icf, icf_var, xi_star_vec)
fe_coord.append(fe)
##### END OF PART 5 - MAKE SURE THIS PART WORKS BEFORE PROCEEDING! #####
# Use matplotlib here to plot fe_coord or save to a file with pandas
##### END OF PART 6 - MAKE SURE THIS PART WORKS BEFORE PROCEEDING! #####
##### END OF SCRIPT #####
| 2.359375
| 2
|
biliob_to_mysql/move_data.py
|
ProgramRipper/biliob-spider
| 2
|
12775916
|
from db import cursor
from db import db as mongodb
from pymongo import ASCENDING
import bson
import datetime
mongo_user = mongodb['user']
mongo_video = mongodb['video']
mongo_author = mongodb['author']
# 用户相关
INSERT_USER_SQL = """
INSERT INTO `user` (`name`, `password`, `credit`, `exp`, `gmt_create`, `role`)
VALUES (%(name)s, %(password)s, %(credit)s, %(exp)s, %(gen_time)s, %(role)s)
ON DUPLICATE KEY UPDATE `name` = VALUES(`name`), `exp` = VALUES(`exp`), `credit` = VALUES(`credit`), `password` = VALUES(`password`), `role` = VALUES(`role`);
"""
GET_USER_ID_SQL = """
SELECT `user_id` FROM `user` WHERE `name` = %s
"""
DELETE_USER_FOCUS_VIDEO_SQL = """
DELETE FROM biliob.user_focus_video
WHERE
`user_id` = %s;
"""
DELETE_USER_FOCUS_AUTHOR_SQL = """
DELETE FROM biliob.user_focus_author
WHERE
`user_id` = %s;
"""
INSERT_USER_FOCUS_VIDEO_SQL = """
INSERT INTO `user_focus_video` (`user_id`, `video_id`)
VALUES (%(user_id)s, %(video_id)s);
"""
INSERT_USER_FOCUS_AUTHOR_SQL = """
INSERT INTO `user_focus_author` (`user_id`, `author_id`)
VALUES (%(user_id)s, %(author_id)s)
"""
def translate_int64(item):
for each_key in item:
if type(item[each_key]) is bson.int64.Int64:
item[each_key] = int(item[each_key])
def move_user():
for each_doc in mongo_user.find().sort('_id', direction=ASCENDING):
item = dict()
item['gen_time'] = each_doc.pop('_id').generation_time
item['name'] = each_doc['name']
item['credit'] = each_doc['credit'] if 'credit' in each_doc else 0
item['password'] = each_doc['password'] if 'password' in each_doc else 0
item['exp'] = each_doc['exp'] if 'exp' in each_doc else 0
item['role'] = each_doc['role'] if 'role' in each_doc else 0
if len(item['name']) > 45:
print(item['name'])
continue
cursor.execute(INSERT_USER_SQL, item)
cursor.execute(GET_USER_ID_SQL, (each_doc['name']))
user_id = cursor.fetchone()['user_id']
cursor.execute(DELETE_USER_FOCUS_VIDEO_SQL, (user_id))
cursor.execute(DELETE_USER_FOCUS_AUTHOR_SQL, (user_id))
if 'favoriteAid' in each_doc:
for each_aid in each_doc['favoriteAid']:
if each_aid == None or each_aid > 4294967295:
continue
item = {}
item['user_id'] = int(user_id)
item['video_id'] = int(each_aid)
cursor.execute(INSERT_USER_FOCUS_VIDEO_SQL, item)
if 'favoriteMid' in each_doc:
for each_mid in each_doc['favoriteMid']:
if each_mid == None or each_mid > 4294967295:
continue
item = {}
item['user_id'] = int(user_id)
item['author_id'] = int(each_mid)
cursor.execute(INSERT_USER_FOCUS_AUTHOR_SQL, item)
# 视频相关
INSERT_VIDEO_SQL = """
INSERT INTO `video` (`video_id`, `author_id`, `title`, `pic`, `is_observe`, `gmt_create`, `channel`, `subchannel`, `pub_datetime`)
VALUES (%(video_id)s, %(author_id)s, %(title)s, %(pic)s, %(is_observe)s, %(gen_time)s, %(channel)s, %(subchannel)s, %(pub_datetime)s)
ON DUPLICATE KEY UPDATE `title` = VALUES(`title`), `pic` = VALUES(`pic`), `is_observe` = VALUES(`is_observe`), `channel` = VALUES(`channel`), `subchannel` = VALUES(`subchannel`), `pub_datetime` = VALUES(`pub_datetime`);
"""
INSERT_VIDEO_RECORD_SQL = """
INSERT INTO `video_record` (`video_id`, `view`, `danmaku`, `favorite`, `coin`, `share`, `like`, `dislike`, `gmt_create`)
VALUES (%(video_id)s, %(view)s, %(danmaku)s, %(favorite)s, %(coin)s, %(share)s, %(like)s, %(dislike)s, %(gmt_create)s)
ON DUPLICATE KEY UPDATE
`video_id` = VALUES(`video_id`),
`view` = VALUES(`view`),
`danmaku` = VALUES(`danmaku`),
`favorite` = VALUES(`favorite`),
`coin` = VALUES(`coin`),
`share` = VALUES(`share`);
`like` = VALUES(`like`);
`dislike` = VALUES(`dislike`);
"""
def move_video():
for each_doc in mongo_video.find().batch_size(8):
translate_int64(each_doc)
item = {}
item['video_id'] = each_doc['aid'] if 'aid' in each_doc else None
print(item['video_id'])
item['author_id'] = each_doc['mid'] if 'mid' in each_doc else None
item['title'] = each_doc['title'] if 'title' in each_doc else None
item['pic'] = each_doc['pic'] if 'pic' in each_doc else None
item['is_observe'] = each_doc['focus'] if 'focus' in each_doc else 1
item['channel'] = each_doc['channel'] if 'channel' in each_doc else None
item['subchannel'] = each_doc['subChannel'] if 'subChannel' in each_doc else None
item['gen_time'] = each_doc.pop('_id').generation_time
item['pub_datetime'] = each_doc['datetime'] if 'datetime' in each_doc else None
cursor.execute(INSERT_VIDEO_SQL, item)
if 'data' in each_doc:
item_list = []
for each_record in each_doc['data']:
translate_int64(each_record)
item = {}
item['video_id'] = each_doc['aid'] if 'aid' in each_doc else None
item['view'] = each_record['view'] if 'view' in each_record else None
item['danmaku'] = each_record['danmaku'] if 'danmaku' in each_record else None
item['favorite'] = each_record['favorite'] if 'favorite' in each_record else None
item['coin'] = each_record['coin'] if 'coin' in each_record else None
item['share'] = each_record['share'] if 'share' in each_record else None
item['like'] = each_record['like'] if 'like' in each_record else None
item['dislike'] = each_record['dislike'] if 'dislike' in each_record else None
item['gmt_create'] = each_record['datetime'] if 'datetime' in each_record else None
item_list.append(item)
cursor.executemany(INSERT_VIDEO_RECORD_SQL, item_list)
| 2.9375
| 3
|
world-1/desafio-011.py
|
udanielnogueira/Python.CursoEmVideo
| 0
|
12775917
|
<filename>world-1/desafio-011.py
'''
Faça um programa que leia a largura e a altura de uma parede
em metros, calcule a sua área e a quantidada de tinta
necessária para pintá-la. Sabendo que cada litro de
tinta, pinta uma área de 2m quadrados.
'''
l = float(input('Digite o valor da largura: '))
h = float(input('Digite o valor da altura: '))
a = l * h
t = a / 2
print(f'Área: {a}m²')
print(f'Tinta necessária: {t:.2f}l')
| 3.578125
| 4
|
library/rainbowhat/touch.py
|
Corteil/rainbow-hat
| 72
|
12775918
|
"""Rainbow HAT GPIO Touch Driver."""
try:
import RPi.GPIO as GPIO
except ImportError:
raise ImportError("""This library requires the RPi.GPIO module.
Install with: sudo pip install RPi.GPIO""")
PIN_A = 21
PIN_B = 20
PIN_C = 16
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
class Button(object):
"""Represent GPIO Button."""
def __init__(self, index, gpio_pin):
"""Initialise GPIO Button."""
object.__init__(self)
self.pressed = False
self._on_press_handler = None
self._on_release_handler = None
self._gpio_pin = gpio_pin
self._index = index
self._is_setup = False
def setup(self):
"""Set up the GPIO button."""
if self._is_setup:
return
GPIO.setup(self._gpio_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(self._gpio_pin, GPIO.BOTH, bouncetime=1, callback=self._handle_button)
self._is_setup = True
def _handle_button(self, pin):
self.pressed = GPIO.input(pin) != GPIO.HIGH
if self.pressed and callable(self._on_press_handler):
try:
self._on_press_handler(self._index, self._gpio_pin)
except TypeError:
self._on_press_handler(self._index)
elif callable(self._on_release_handler):
try:
self._on_release_handler(self._index, self._gpio_pin)
except TypeError:
self._on_release_handler(self._index)
def press(self, handler=None):
"""Bind a function to handle touch press."""
self.setup()
if handler is None:
def decorate(handler):
self._on_press_handler = handler
return decorate
self._on_press_handler = handler
def release(self, handler=None):
"""Bind a funciton to handle touch release."""
self.setup()
if handler is None:
def decorate(handler):
self._on_release_handler = handler
return decorate
self._on_release_handler = handler
class Buttons(object):
"""Represent A, B and C GPIO Buttons."""
A = Button(0, PIN_A)
B = Button(1, PIN_B)
C = Button(2, PIN_C)
_all = [A, B, C]
def __getitem__(self, key):
return self._all[key]
def press(self, handler=None):
"""Bind a function to handle touch press."""
if handler is None:
def decorate(handler):
self.A.press(handler)
self.B.press(handler)
self.C.press(handler)
return decorate
self.A.press(handler)
self.B.press(handler)
self.C.press(handler)
def release(self, handler=None):
"""Bind a function to handle touch release."""
if handler is None:
def decorate(handler):
self.A.release(handler)
self.B.release(handler)
self.C.release(handler)
return decorate
self.A.release(handler)
self.B.release(handler)
self.C.release(handler)
Buttons = Buttons()
| 2.796875
| 3
|
flow2ml/Data_Augumentation.py
|
yvkrishna/Flow2ML
| 1
|
12775919
|
<reponame>yvkrishna/Flow2ML
import cv2
import os
import imutils
import numpy as np
import matplotlib.pyplot as plt
from skimage import transform as tf
from matplotlib.transforms import Affine2D
import random
class Data_Augumentation:
'''
Class containing methods to apply Data Augumentation operations
to images in the data folder
'''
def __init__(self,operations):
'''
Initializes various attributes regarding to the object.
Args :
operations : (dictionary) python dictionary containing key value pairs
of operations (flip, shear, zoom etc.) and values (integer) to be applied to the image data.
'''
self.operations = operations
def applyFlip(self,classPath):
'''
Applies flipping augmentation to all the images in the given folder. Flip: 1 flips it on y axis, 0 flips it on x axis and -1 flips it on both axis
Args :
classPath : (string) directory containing images for a particular class.
'''
try:
os.mkdir(classPath+"/FlippedImages")
except:
raise Exception("Unable to create directory for flipped images.")
for image in list(os.listdir(classPath)):
# Read image
img = cv2.imread(classPath+"/"+image)
if self.operations['flip'] not in ['horizontal', 'vertical', 'cross']:
raise Exception("Invalid flip operation.")
else:
if self.operations['flip'] == 'horizontal':
operation = 1
elif self.operations['flip'] == 'vertical':
operation = 0
elif self.operations['flip'] == 'cross':
operation = -1
if img is not None:
try:
# applies Flip augmentation to the image.
Flipped = cv2.flip(img, operation)
# saving the image by
plt.imsave(classPath+"/FlippedImages/Flipped"+image, cv2.cvtColor(Flipped, cv2.COLOR_RGB2BGR))
except Exception as e:
print(f"Flip operation failed due to {e}")
def applyRotate(self,classPath):
'''
Applies rotation augmentation to all the images in the given folder. It rotates the images by the given angle (in degrees) in the counter clockwise direction
Args :
classPath : (string) directory containing images for a particular class.
'''
try:
os.mkdir(classPath+"/RotatedImages")
except:
raise Exception("Unable to create directory for rotated images.")
for image in list(os.listdir(classPath)):
# Read image
img = cv2.imread(classPath+"/"+image)
if isinstance(self.operations['rotate'], str):
raise Exception("Rotation angle cannot be a string.")
else:
angle = round(self.operations['rotate']) % 360
if img is not None:
try:
# applies Rotate augmentation to the image.
Rotated = imutils.rotate(img, angle)
# saving the image by
plt.imsave(classPath+"/RotatedImages/Rotated"+image, cv2.cvtColor(Rotated, cv2.COLOR_RGB2BGR))
except Exception as e:
print(f"Rotation operation failed due to {e}")
def applyShear(self,classPath):
'''
Applies shear augmentation to all the images in the given folder. It shears the images by the given angles (in degrees) along the given axes
Args :
classPath : (string) directory containing images for a particular class.
'''
try:
os.mkdir(classPath+"/ShearedImages")
except:
raise Exception("Unable to create directory for sheared images.")
for image in list(os.listdir(classPath)):
# Read image
img = cv2.imread(classPath+"/"+image)
if isinstance(self.operations['shear']['x_axis'], str) or isinstance(self.operations['shear']['y_axis'], str):
raise Exception("Shearing angle cannot be a string.")
else:
angle_x = np.deg2rad(self.operations['shear']['x_axis'])
angle_y = np.deg2rad(self.operations['shear']['y_axis'])
if img is not None:
try:
# applies Rotate augmentation to the image.
Sheared = tf.warp(img, inverse_map = np.linalg.inv(Affine2D().skew(xShear = angle_y, yShear = angle_y).get_matrix()))
Sheared = (Sheared * 255).astype(np.uint8)
# saving the image by
plt.imsave(classPath+"/ShearedImages/Sheared"+image, cv2.cvtColor(Sheared, cv2.COLOR_RGB2BGR))
except Exception as e:
print(f"Shearing operation failed due to {e}")
def applyInvert(self,classPath):
'''
Applies invert augmentation to all the images in the given folder. It negates the images by reversing the pixel value.
Args :
classPath : (string) directory containing images for a particular class.
'''
try:
os.mkdir(classPath+"/InvertedImages")
except:
raise Exception("Unable to create directory for inverted images.")
for image in list(os.listdir(classPath)):
# Read image
img = cv2.imread(classPath+"/"+image)
if (self.operations['invert']==True):
if img is not None:
try:
# applies Invert augmentation to the image.
Inverted=abs(255-img)
# saving the image by
plt.imsave(classPath+"/InvertedImages/Inverted"+image, Inverted)
except Exception as e:
print(f"Inverting operation failed due to {e}")
def applyCLAHE(self,classPath):
'''
Applies contrast limited adaptive histogram equalization to all the images in the given folder.
Args:
classPath : (string) directory containing images for a particular class.
'''
try:
os.mkdir(classPath+"/CLAHEedImages")
except:
raise Exception("Unable to create directory for CLAHEed images.")
for image in list(os.listdir(classPath)):
# Read image
img = cv2.imread(classPath+"/"+image)
if self.operations['CLAHE']==True:
if img is not None:
try:
#convert BGR to GRAYSCALE
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# applies CLAHE augmentation to the image.
clahe=cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
CLAHEed=clahe.apply(gray)
# saving the image by
plt.imsave(classPath+"/CLAHEedImages/CLAHEed"+image, cv2.cvtColor(CLAHEed, cv2.COLOR_GRAY2BGR))
except Exception as e:
print(f"CLAHE operation failed due to {e}")
def HistogramEqualisation(self,classPath):
'''
Applies histogram equilisation to all the images in the given folder.It adjusts the contrast of image using the image's histogram.
Args :
classPath : (string) directory containing images for a particular class.
'''
try:
os.mkdir(classPath+"/HistogramEqualisedImages")
except:
raise Exception("Unable to create directory for Histogram Equalised images.")
for image in list(os.listdir(classPath)):
# Read image
img = cv2.imread(classPath+"/"+image)
if(self.operations['Hist_Equal']==True):
if img is not None:
try:
#convert image from BGR to GRAYSCALE.
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# applies histogram equalisation to the image.
equalised_img=cv2.equalizeHist(gray)
HistogramEqualised=np.hstack((img,equalised_img))
# saving the image by
plt.imsave(classPath+"/HistogramEqualisedImages/HistogramEqualised"+image, cv2.cvtColor(HistogramEqualised, cv2.COLOR_RGB2BGR))
except Exception as e:
print(f"Histogram Equalisation operation failed due to {e}")
def applyCrop(self,classPath):
'''
Applies cropping augmentation to all the images in the given folder. Either the images are cropped randomly or cropped with fixed coordinates (y1, y2, x1, x2) given by the user
Args :
classPath : (string) directory containing images for a particular class.
'''
try:
os.mkdir(classPath+"/CroppedImages")
except:
raise Exception("Unable to create directory for cropped images.")
for image in list(os.listdir(classPath)):
# Read image
img = cv2.imread(classPath+"/"+image)
if img is not None:
try:
if isinstance(self.operations['crop'], str):
if self.operations['crop'] == 'random':
y1, y2, x1, x2 = random.randint(1, img.shape[0]), random.randint(1, img.shape[0]), random.randint(1, img.shape[1]), random.randint(1, img.shape[1]),
Cropped = img[min(y1, y2):max(y1, y2), min(x1, x2):max(x1, x2), :]
plt.imsave(classPath+"/CroppedImages/Cropped"+image, cv2.cvtColor(Cropped, cv2.COLOR_RGB2BGR))
elif isinstance(self.operations['crop'], list):
if len(self.operations['crop']) == 4:
Cropped = img[self.operations['crop'][0]:self.operations['crop'][1], self.operations['crop'][2]:self.operations['crop'][3], :]
plt.imsave(classPath+"/CroppedImages/Cropped"+image, cv2.cvtColor(Cropped, cv2.COLOR_RGB2BGR))
else:
raise Exception("Cropping needs exactly 4 coordinates for y1, y2, x1, x2.")
else:
raise Exception("Cropping needs random parameter or list of coordinates.")
except Exception as e:
print(f"Crop operation failed due to {e}")
def applyScale(self,classPath):
'''
Applies scaling augmentation to all the images in the given folder. Scales the image by the given ratio.
Args :
classPath : (string) directory containing images for a particular class.
'''
ratio = self.operations['scale']
try:
os.mkdir(classPath+"/ScaledImages")
except:
raise Exception("Unable to create directory for scaled images.")
for image in list(os.listdir(classPath)):
# Read image
img = cv2.imread(classPath+"/"+image)
if isinstance(self.operations['scale'], str):
raise Exception("Scaling ratio cannot be a string.")
else:
if img is not None:
try:
if ratio < 0:
raise Exception("Scale ratio cannot be negative.")
else:
# applies scale augmentation to the image.
Scaled = cv2.resize(img, (round(img.shape[0] * ratio), round(img.shape[1] * ratio)))
# # saving the image by
plt.imsave(classPath+"/ScaledImages/Scaled"+image, cv2.cvtColor(Scaled, cv2.COLOR_RGB2BGR))
except Exception as e:
print(f"Scale operation failed due to {e}")
def applyZoom(self,classPath):
'''
Applies zooming augmentation to all the images in the given folder. It zooms the images by the given ratio
Args :
classPath : (string) directory containing images for a particular class.
'''
try:
os.mkdir(classPath+"/ZoomedImages")
except:
raise Exception("Unable to create directory for zoomed images.")
for image in list(os.listdir(classPath)):
# Read image
img = cv2.imread(classPath+"/"+image)
if isinstance(self.operations['zoom'], str):
raise Exception("Zoom factor cannot be a string.")
else:
factor = self.operations['zoom']
if factor < 1:
raise Exception("Zoom factor cannot be lesser than 1.")
else:
if img is not None:
try:
# applies zooming augmentation to the image.
h, w = img.shape[0], img.shape[1]
Zoomed = cv2.resize(img, (round(img.shape[1] * factor), round(img.shape[0] * factor)))
w_zoomed, h_zoomed = Zoomed.shape[1], Zoomed.shape[0]
x1 = round((float(w_zoomed) / 2) - (float(w) / 2))
x2 = round((float(w_zoomed) / 2) + (float(w) / 2))
y1 = round((float(h_zoomed) / 2) - (float(h) / 2))
y2 = round((float(h_zoomed) / 2) + (float(h) / 2))
Zoomed = Zoomed[y1:y2, x1:x2]
# saving the image by
plt.imsave(classPath+"/ZoomedImages/Zoomed"+image, cv2.cvtColor(Zoomed, cv2.COLOR_RGB2BGR))
except Exception as e:
print(f"Zooming operation failed due to {e}")
def applyGreyscale(self,classPath):
'''
Applies greyscale augmentation to all the images in the given folder.
Args :
classPath : (string) directory containing images for a particular class.
'''
try:
os.mkdir(classPath+"/GreyscaleImages")
except:
raise Exception("Unable to create directory for greyscale images.")
for image in list(os.listdir(classPath)):
# Read image
img = cv2.imread(classPath+"/"+image)
if not isinstance(self.operations['greyscale'], bool):
raise Exception("Greyscale parameter must be a boolean value.")
else:
if self.operations['greyscale']:
if img is not None:
try:
# applies greyscale augmentation to the image.
Greyscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# saving the image by
cv2.imwrite(classPath+"/GreyscaleImages/Greyscale"+image, Greyscale)
except Exception as e:
print(f"Greyscale operation failed due to {e}")
| 3.09375
| 3
|
phase1/parser_indexer.py
|
mallika2011/Maze-Search-Engine
| 1
|
12775920
|
#!/usr/bin/python
import xml.sax
import sys
import os
import nltk
from nltk import sent_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.tokenize import TreebankWordTokenizer,ToktokTokenizer
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
import re
import json
import time
import threading
import Stemmer
# GLOBAL VARIABLES
total_tokens = 0
indexed_tokens = 0
start_time = time.time()
threads = []
end_time = 0
CHUNK = 1000
stem_words = {}
all_stopwords = stopwords.words('english')
# ss = SnowballStemmer("english")
stemmer = Stemmer.Stemmer('english')
output_folder = ""
stat_path = ""
STAT_FILE = ""
INDEX_FILE_PATH = ""
'''
Function to create new directories
'''
#function to create directories to store results
def create_directory(folder_path):
my_path = os.getcwd()
my_path = my_path + '/' +folder_path
if not os.path.exists(my_path):
os.makedirs(my_path)
return my_path
def dummy(n):
print("thread ", n)
time.sleep(10)
print("slept 10 for",n)
'''
Class handler to manage and parse
the XML wiki data accordingly.
'''
class WikiHandler(xml.sax.ContentHandler):
def __init__(self):
self.CurrentData = ""
self.data = ""
self.page_count = 0
self.all_titles = []
self.title = ''
self.text = ''
self.index = {}
self.id = ''
self.id_capture = False
self.page_titles = []
self.page_texts = []
self.page_nos = []
# Call when an element starts
def startElement(self, tag, attributes):
self.CurrentData = tag
if tag == "page":
self.data = ''
if tag == "text":
self.data = ''
if tag == 'id':
self.data = ''
# Call when an elements ends
def endElement(self, tag):
if tag == "page":
self.page_titles.append(self.title)
self.page_texts.append(self.text)
self.page_nos.append(self.id)
self.page_count+=1
self.id_capture = False
#create a new thread for every CHUNK pages
if(self.page_count%CHUNK == 0):
print("new thread for ", self.page_count, "...")
t = threading.Thread(target=process_chunk_pages, args=(self.page_titles, self.page_texts, self.page_nos, self.index,self.page_count,))
threads.append(t)
t.start()
#reset 1000 page arrays
self.page_titles = []
self.page_texts = []
self.page_nos = []
elif tag == "title":
self.title = self.data
self.all_titles.append(self.title)
self.data = ''
elif tag == "text":
self.text = self.data
self.data = ''
elif tag == 'id':
if not self.id_capture:
self.id = self.data
self.data = ''
self.id_capture = True
elif tag == 'mediawiki':
print("new thread for ", self.page_count, "...")
t = threading.Thread(target=process_chunk_pages, args=(self.page_titles, self.page_texts, self.page_nos, self.index,self.page_count,))
threads.append(t)
t.start()
#reset 1000 page arrays
self.page_titles = []
self.page_texts = []
self.page_nos = []
#collect all threads
for t in threads:
t.join()
print("Time to index = ", time.time() - start_time)
write_to_file(self.index, self.all_titles)
self.index = {}
self.all_titles = []
print("Done")
print("Total required Time = ", time.time() - start_time)
# Call when a character is read
def characters(self, content):
self.data += content
'''
Function to process CHUNK sized pages at a time
Each CHUNK will be processed by an individual thread.
'''
def process_chunk_pages(title, text, number, index,num):
t0 = time.time()
for i in range(len(title)):
create_index(title[i],text[i],number[i], index)
print("Finished processing for ---", num, "in : ", time.time()-t0)
'''
Function to process text for further use
Includes : case folding, tokenization, stop
words removal, and stemming.
'''
def process_text(text,count_tokens=False):
processed = []
#case folding : conver to lower case
text = text.lower()
# tokenize by splitting text
tokenized_text = re.split(r'[^A-Za-z0-9]+', text)
tokenized_text = ' '.join(tokenized_text).split()
#stop words removal
tokens_without_sw = [token for token in tokenized_text if not token in all_stopwords]
#stemming : check if the word already exists
# in the stem_words set. if does, then use, else stem
for token in tokens_without_sw:
if token in stem_words:
stemmed = stem_words[token]
else:
# stemmed = ss.stem(token)
stemmed = stemmer.stemWord(token)
stem_words[token]=stemmed
processed.append(stemmed)
#add to total tokens in the corpus
if count_tokens:
global total_tokens
total_tokens+=len(tokenized_text)
return(processed)
'''
Function to extract the infobox from the
pages of the wikipedia dump
'''
def get_infobox(text):
ind = [m.start() for m in re.finditer(r'{{Infobox|{{infobox|{{ Infobox| {{ infobox', text)]
ans = []
for i in ind:
close = False
counter = 0
end = -1
for j in range(i, len(text)-1):
if text[j]=='}' and text[j+1] =='}':
counter-=1
elif text[j]=='{' and text[j+1] =='{':
counter+=1
if counter == 0:
end=j+1
break
ans+= process_text(text[i:end+1])
return ans
'''
Function to extract the categoris, external links,
and the references from the body of the page and
process them individually as well.
'''
def split_components(text):
lis = re.split(r"\[\[Category|\[\[ Category", text,1)
#storing the value for cateogories
if len(lis)==1:
category=''
else:
category = lis[1]
lis = re.split(r"==External links==|== External links ==", lis[0],1)
#storing the value for external links
if len(lis)==1:
links = ''
else:
links = lis[1]
lis = re.split(r"==References==|== References ==|== references ==|==references==", lis[0],1)
#storing the value of references
if len(lis)==1:
references = ''
else:
references = lis[1]
return category, links, references
'''
Function to create the inverted index
'''
def create_index(title, text, doc_no, index):
c,r,l = split_components(text)
processed_components = []
processed_components.append(process_text(title,True))
try:
processed_components.append(process_text(text,True))
except:
pass
processed_components.append(process_text(c))
processed_components.append(get_infobox(text))
processed_components.append(process_text(r))
processed_components.append(process_text(l))
add_to_index(doc_no,processed_components, index)
'''
Function to append an entry to the index object.
'''
def add_to_index(doc_no,processed_components,index):
for i in range(len(processed_components)):
processed_tokens = processed_components[i]
field = i+1
for token in processed_tokens:
if(token == ""):
continue
freq_values = [0, 0, 0, 0, 0, 0, 0]
if token not in index:
freq_values[field] += 1
freq_values[0] += 1
index[token] = {}
index[token][doc_no] = freq_values
else:
if doc_no not in index[token]:
freq_values[field] += 1
freq_values[0] += 1
index[token][doc_no] = freq_values
else:
index[token][doc_no][field]+=1
index[token][doc_no][0]+=1
def write_to_file(index, titles):
#write statistics into file
statistics = str(total_tokens)+"\n"+str(len(index))
with open(STAT_FILE, "w") as file:
file.write(statistics)
#write inverted index into file
print("writing to file ...")
ftype = ['f','t', 'b', 'c', 'i', 'r', 'e']
with open(INDEX_FILE_PATH,'w') as f:
data = ""
for key, docs in sorted(index.items()):
#to reduce index size
if (len(key))>27 or len(index[key])<=1:
continue
data += str(key)+":"
for doc,values in index[key].items():
data+="d"+str(doc)
for i in range(len(values)):
if values[i]>0:
data+=str(ftype[i]) + str(values[i])
data+="\n"
f.write(data)
if ( __name__ == "__main__"):
xml_file = sys.argv[1]
output_folder = sys.argv[2]
STAT_FILE = sys.argv[3]
#create stat directory
stat_dir = stat_path.rsplit('/',1)
if len(stat_dir)>1:
create_directory(stat_dir[0])
INDEX_FILE_PATH = output_folder+'index.txt'
# create an XMLReader
parser = xml.sax.make_parser()
# turn off namepsaces
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
# override the default ContextHandler
Handler = WikiHandler()
parser.setContentHandler( Handler )
parser.parse(xml_file)
| 2.734375
| 3
|
wouso/games/challenge/urls.py
|
AlexandruGhergut/wouso
| 117
|
12775921
|
<gh_stars>100-1000
from django.conf.urls.defaults import *
urlpatterns = patterns('wouso.games.challenge.views',
url(r'^$', 'index', name='challenge_index_view'),
url(r'^(?P<id>\d+)/$', 'challenge', name='view_challenge'),
url(r'^launch/(?P<to_id>\d+)/$', 'launch', name='challenge_launch'),
url(r'^refuse/(?P<id>\d+)/$', 'refuse', name='challenge_refuse'),
url(r'^accept/(?P<id>\d+)/$', 'accept', name='challenge_accept'),
url(r'^cancel/(?P<id>\d+)/$', 'cancel', name='challenge_cancel'),
url(r'^setplayed/(?P<id>\d+)/$', 'setplayed', name='setplayed'),
url(r'^use_artifact/$', 'use_one_more', name='challenge_onemore'),
url(r'^history/(?P<playerid>\d+)/$', 'history', name='challenge_history'),
url(r'^playerchallenge/$', 'challenge_player', name='challenge_player'),
url(r'^randomchallenge/$', 'challenge_random', name='challenge_random'),
url(r'^stats/$', 'challenge_stats', name='challenge_stats'),
url(r'^stats/player=(?P<player_id>\d+)/$', 'challenge_stats', name='challenge_stats'),
url(r'^stats/target=(?P<target_id>\d+)/', 'detailed_challenge_stats',
name='detailed_challenge_stats'),
url(r'^stats/player=(?P<player_id>\d+)/target=(?P<target_id>\d+)/', 'detailed_challenge_stats',
name='detailed_challenge_stats'),
)
| 1.5625
| 2
|
talk_like/_nbdev.py
|
devacto/talk_like
| 0
|
12775922
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Scraper": "00_scraper.ipynb",
"Scraper.get_facebook_posts": "00_scraper.ipynb",
"print_something": "00_scraper.ipynb"}
modules = ["scraper.py"]
doc_url = "https://devacto.github.io/talk_like/"
git_url = "https://github.com/devacto/talk_like/tree/master/"
def custom_doc_links(name): return None
| 1.757813
| 2
|
iota/commands/extended/send_trytes.py
|
plenarius/iota.lib.py
| 62
|
12775923
|
<filename>iota/commands/extended/send_trytes.py
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from typing import List
import filters as f
from iota import TransactionTrytes, TryteString
from iota.commands import FilterCommand, RequestFilter
from iota.commands.core.attach_to_tangle import AttachToTangleCommand
from iota.commands.core.get_transactions_to_approve import \
GetTransactionsToApproveCommand
from iota.commands.extended.broadcast_and_store import BroadcastAndStoreCommand
from iota.filters import Trytes
__all__ = [
'SendTrytesCommand',
]
class SendTrytesCommand(FilterCommand):
"""
Executes `sendTrytes` extended API command.
See :py:meth:`iota.api.IotaApi.send_trytes` for more info.
"""
command = 'sendTrytes'
def get_request_filter(self):
return SendTrytesRequestFilter()
def get_response_filter(self):
pass
def _execute(self, request):
depth = request['depth'] # type: int
min_weight_magnitude = request['minWeightMagnitude'] # type: int
trytes = request['trytes'] # type: List[TryteString]
# Call ``getTransactionsToApprove`` to locate trunk and branch
# transactions so that we can attach the bundle to the Tangle.
gta_response = GetTransactionsToApproveCommand(self.adapter)(depth=depth)
att_response = AttachToTangleCommand(self.adapter)(
branchTransaction = gta_response.get('branchTransaction'),
trunkTransaction = gta_response.get('trunkTransaction'),
minWeightMagnitude = min_weight_magnitude,
trytes = trytes,
)
# ``trytes`` now have POW!
trytes = att_response['trytes']
BroadcastAndStoreCommand(self.adapter)(trytes=trytes)
return {
'trytes': trytes,
}
class SendTrytesRequestFilter(RequestFilter):
def __init__(self):
super(SendTrytesRequestFilter, self).__init__({
'depth': f.Required | f.Type(int) | f.Min(1),
'trytes':
f.Required
| f.Array
| f.FilterRepeater(f.Required | Trytes(result_type=TransactionTrytes)),
# Loosely-validated; testnet nodes require a different value than
# mainnet.
'minWeightMagnitude': f.Required | f.Type(int) | f.Min(1),
})
| 2.03125
| 2
|
project3/films/admin.py
|
Codetype/Django-application
| 0
|
12775924
|
from django.contrib import admin
from .models import Category, Movie, Comment
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'slug']
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Category, CategoryAdmin)
class FilmAdmin(admin.ModelAdmin):
list_display = ['name', 'slug', 'year', 'film_director', 'trailer', 'after_premiere', 'created_at', 'updated_at']
list_filter = ['after_premiere', 'created_at', 'updated_at']
list_editable = ['year', 'film_director', 'after_premiere']
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Movie, FilmAdmin)
class CommentAdmin(admin.ModelAdmin):
list_display = ('name', 'comment')
admin.site.register(Comment, CommentAdmin)
| 2.140625
| 2
|
etl-scripts/slice/convert_to_parquet/parquet_writer.py
|
aculich/openmappr
| 19
|
12775925
|
<gh_stars>10-100
# Databricks notebook source exported at Sat, 7 May 2016 16:46:36 UTC
# MAGIC %md
# MAGIC Sanitized csv file to paraquet file writer
# MAGIC #NOTES
# MAGIC here, I read the temp data and split out parquet data for the given file.
# MAGIC
# MAGIC also, if the file has already been processed, then we don't process it again. For this, we store a marker file alongwith written paraquet file.
# MAGIC
# MAGIC ## steps
# MAGIC 1. load file with correct options.
# MAGIC 2. fix column names
# MAGIC 3. mark empty cells as nulls.
# MAGIC 4. write out paraquet file.
# MAGIC
# MAGIC ## inputs
# MAGIC - source file path
# MAGIC - dest file path
# MAGIC - schema structure
# MAGIC - delimiter
# MAGIC - whether it contains header or not.
# COMMAND ----------
import datetime
import time
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.window import Window
from pyspark.sql.types import *
import pyspark.sql.functions as F
# COMMAND ----------
# Arguments
# delimiter = '\t'
# srcFilePath = raw_data[dataName]
# destParquetFilePath = parquet_data[dataName]
# creation date of this file is checked. if it is newer than srcFilePath, then import process is skipped
# fileMarkerPath = parquet_data[dataName] +'-marker'
sqlContext.clearCache()
# COMMAND ----------
dataSchema = StructType([
StructField('merchant',StringType(), False),
StructField('userID', LongType(), False),
StructField('orderDate', StringType(), True),
StructField('orderID', IntegerType(), False),
StructField('itemID', IntegerType(), False),
StructField('quantity', DoubleType(), True),
StructField('spend', DoubleType(), True),
StructField('projWt', DoubleType(), True),
StructField('brand', StringType(), True),
StructField('category', StringType(), True),
StructField('state', StringType(), True),
StructField('zipCode', IntegerType(), True),
StructField('gender', StringType(), True),
StructField('birthYrMo', IntegerType(), True),
StructField('ethnicity', StringType(), True),
StructField('education', StringType(), True),
StructField('hhIncome', StringType(), True),
StructField('NumAdults', IntegerType(), True),
StructField('NumPPL', IntegerType(), True),
StructField('Marital', StringType(), True)])
# COMMAND ----------
def set_schema(df, dataSchema):
old_new_name_tuple = zip(df.columns, [(f.name,f.dataType) for f in dataSchema.fields])
df = df.select([F.col(x).cast(new_type).alias(new_name) for x,(new_name, new_type) in old_new_name_tuple])
return df
# COMMAND ----------
def fix_null(x):
return F.when(F.col(x).isNotNull() & (F.lower(F.col(x)) != "null") & (F.ltrim(F.col(x)) != ""), F.col(x)).otherwise(None)
def sanitize_data(df, dataSchema):
# fix nulls because CSV parser isn't doing it
# convert "NULL" and "" string values for all StringType columns into None
columns_to_fix = set(filter(None, [x.name if x.dataType == StringType() else None for x in dataSchema.fields]))
df = df.select([fix_null(x).alias(x) if x in columns_to_fix else x for x in df.columns])
return df
# COMMAND ----------
def import_data(src_url, dest_url, delimiter='\t', header=False):
print "sanitizing file: %s and writing to: %s" % (src_url, dest_url)
#sanitize data and store as paraquet file
if delimiter:
df = sqlContext.read.format('com.databricks.spark.csv').options(header=header, delimiter=delimiter).load(src_url)
else:
df = sqlContext.read.format('com.databricks.spark.csv').options(header=header).load(src_url)
# set schema
df = set_schema(df, dataSchema)
df = sanitize_data(df, dataSchema)
df.write.mode("overwrite").parquet(dest_url, mode="overwrite")
print "transaction data written to: %s" % dest_url
return dest_url
# COMMAND ----------
import_data(srcFilePath, destParquetFilePath, delimiter, False)
| 2.84375
| 3
|
study/chainer_study/chainer_study-4.py
|
strawsyz/straw
| 2
|
12775926
|
# Initial setup following http://docs.chainer.org/en/stable/tutorial/basic.html
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.training import extensions
import matplotlib.pyplot as plt
# Defining your own neural networks using `Chain` class
class MyChain(Chain):
def __init__(self):
super(MyChain, self).__init__(
# 第一个参数设为None,可以根据第一次输入的变量来确定他的大小
l1=L.Linear(None, 30),
l2=L.Linear(None, 30),
l3=L.Linear(None, 1)
)
def __call__(self, x):
h = self.l1(x)
h = self.l2(F.sigmoid(h))
return self.l3(F.sigmoid(h))
# Setup a model
model = MyChain()
model_save_path = 'mlp.model'
print('Loading model')
# --- use NPZ format ---
serializers.load_npz(model_save_path, model)
# --- use HDF5 format (need h5py library) ---
# %timeit serializers.load_hdf5(model_save_path, model)
# define target function
def target_func(x):
"""Target function to be predicted"""
return x ** 3 - x ** 2 + x ** -1 + x
# create efficient function to calculate target_func of numpy array in element wise
target_func_elementwise = np.frompyfunc(target_func, 1, 1)
# define data domain [xmin, xmax]
xmin = -3
xmax = 3
# number of training data
sample_num = 20
# calculate new data from model (predict value)
x_test_data = np.array(np.random.rand(sample_num) * (xmax - xmin) + xmin) # create 20
x_test = Variable(x_test_data.reshape(-1, 1).astype(np.float32))
y_test_data = model(x_test).data # this is predicted value
# calculate target function (true value)
x_detail_data = np.array(np.arange(xmin, xmax, 0.1))
y_detail_data = target_func_elementwise(x_detail_data)
plt.clf()
# plot model predict data
plt.scatter(x_test_data, y_test_data, color='k', label='Model predict value')
# plot target function
plt.plot(x_detail_data, y_detail_data, label='True value')
plt.legend(loc='lower right')
plt.show()
| 2.90625
| 3
|
Easy/Flipping_an_Image/Flipping_an_Image.py
|
nitin3685/LeetCode_Solutions
| 0
|
12775927
|
<gh_stars>0
class Solution:
def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]:
FI_A = list()
for row in A:
row = [0 if i else 1 for i in row[::-1]]
FI_A.append(row)
return FI_A
| 2.9375
| 3
|
contacts/forms.py
|
intherenzone/CRM
| 2
|
12775928
|
from django import forms
from contacts.models import Contact
from common.models import Comment
class ContactForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
assigned_users = kwargs.pop('assigned_to', [])
contact_org = kwargs.pop('organization', [])
super(ContactForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['description'].widget.attrs.update({
'rows': '6'})
self.fields['assigned_to'].queryset = assigned_users
self.fields['organization'].queryset = contact_org
self.fields['organization'].required = False
self.fields['assigned_to'].required = False
self.fields['teams'].required = False
self.fields['title'].required = False
class Meta:
model = Contact
fields = (
'assigned_to', 'organization', 'title','teams', 'first_name', 'last_name', 'email', 'phone', 'address', 'description'
)
def format_phone(phone):
phone_length = len(phone)
if phone_length == 11:
new_phone = phone[:1] + ' (' + phone[1:4] + ') ' + phone[4:7] + '-' + phone[7:]
elif phone_length == 12:
new_phone = phone[:2] + ' (' + phone[2:5] + ') ' + phone[5:8] + '-' + phone[8:]
elif phone_length == 13:
new_phone = phone[:3] + ' (' + phone[3:6] + ') ' + phone[6:9] + '-' + phone[9:]
else:
new_phone = '(' + phone[0:3] + ') ' + phone[3:6] + '-' + phone[6:]
return phone
def clean_phone(self):
client_phone = self.cleaned_data.get('phone', None)
try:
if int(client_phone) and not client_phone.isalpha():
ph_length = str(client_phone)
if len(ph_length) < 10 or len(ph_length) > 13:
raise forms.ValidationError('Phone number must be minimum 10 Digits and maximum of 13 Digits')
except (ValueError):
raise forms.ValidationError('Phone Number should contain only Numbers')
# COMMENTED OUT BECAUSE FILTER WON'T FIND NUMBERS I.E. 7035011932 -> (703) 501-1932, FILTER WON'T FIND IF USER ENTERS 703501...
# phone_length = len(client_phone)
# if phone_length == 11:
# new_phone = client_phone[:1] + ' (' + client_phone[1:4] + ') ' + client_phone[4:7] + '-' + client_phone[7:]
# elif phone_length == 12:
# new_phone = client_phone[:2] + ' (' + client_phone[2:5] + ') ' + client_phone[5:8] + '-' + client_phone[8:]
# elif phone_length == 13:
# new_phone = client_phone[:3] + ' (' + client_phone[3:6] + ') ' + client_phone[6:9] + '-' + client_phone[9:]
# else:
# new_phone = '(' + client_phone[0:3] + ') ' + client_phone[3:6] + '-' + client_phone[6:]
#
# client_phone = new_phone
return client_phone
class ContactCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=64, required=True)
class Meta:
model = Comment
fields = ('comment', 'contact', 'commented_by')
| 2.453125
| 2
|
normalize_data.py
|
aubreychen9012/cAAE
| 19
|
12775929
|
import nibabel as nib
import glob
import os
import numpy as np
import tensorlayer as tl
'''
Before normalization, run N4 bias correction (https://www.ncbi.nlm.nih.gov/pubmed/20378467),
then save the data under folder ./CamCAN_unbiased/CamCAN
'''
modalities = ['T1w', 'T2w']
BraTS_modalities = ['T1w']
folders = ['HGG', 'LGG']
wd = './Data/CamCAN_unbiased/CamCAN'
thumbnail_idx = [60, 70, 80, 90]
for mod in modalities:
wd_mod = os.path.join(wd, str(mod))
os.chdir(wd_mod)
img_files = [i for i in glob.glob("*") if "_unbiased" in i]
for img in img_files:
print(img)
img_data = nib.load(img)
img_data = img_data.get_data()
mask = img.split("_unbiased")[0] + "_brain_mask.nii.gz"
mask_data = nib.load(mask).get_data()
img_data = np.transpose(img_data, [2, 0, 1])
mask_data = np.transpose(mask_data, [2, 0, 1])
idx = [s for s in range(img_data.shape[0]) if mask_data[s].sum() > 1]
img_data = img_data[idx, :, 17:215]
mask_data = mask_data[idx, :, 17:215]
img_data = np.pad(img_data, ((0, 0), (1, 2), (1, 1)), mode='edge')
mask_data = np.pad(mask_data, ((0, 0), (1, 2), (1, 1)), mode='edge')
img_data = np.rot90(img_data, 1, (2, 1))
mask_data = np.rot90(mask_data, 1, (2, 1))
ref_mean = np.mean(img_data[mask_data == 1])
ref_std = np.std(img_data[mask_data == 1])
normed_img = (img_data - ref_mean) / ref_std
normed_img[normed_img == normed_img.min()] = -3.5
x_nif = nib.Nifti1Image(normed_img, np.eye(4))
nib.save(x_nif, os.path.join(img.split("_unbiased")[0] + "_normalized_cropped_mask.nii.gz"))
x_nif = nib.Nifti1Image(mask_data, np.eye(4))
nib.save(x_nif, os.path.join(img.split("_unbiased")[0] + "_mask_cropped_mask.nii.gz"))
tl.visualize.save_images(normed_img[thumbnail_idx, :, :, np.newaxis], [2, 2],
"/scratch_net/bmicdl01/Data/CamCAN_unbiased/preview/" + str(mod)
+ "/" + img.split("_unbiased")[0] + "_normed_img.png")
print("---")
| 2.1875
| 2
|
multi-process.py
|
lidongyv/PSSM
| 0
|
12775930
|
# -*- coding: utf-8 -*-
# @Author: yulidong
# @Date: 2018-08-30 16:47:51
# @Last Modified by: yulidong
# @Last Modified time: 2018-08-30 21:13:04
import torch
import torch.multiprocessing as mp
import time
def add(a,b,c):
start=time.time()
d=a+b
c+=d
print(time.time()-start)
def selfadd(a):
print('a')
a+=2
print(a)
if __name__=='__main__':
mp.set_start_method('forkserver')
c=torch.zeros(1,1,5).float().cuda().share_memory_()
a=torch.arange(5).float().cuda().view_as(c).share_memory_()
b=torch.arange(5).float().cuda().view_as(c).share_memory_()
#c=torch.ones(1).share_memory_()
process=[]
start=time.time()
for i in range(5):
p=mp.Process(target=add,args=[a[:,:,i],b[:,:,i],c[:,:,i],])
#p=mp.Process(target=selfadd,args=(c))
p.daemon=True
p.start()
process.append(p)
for p in process:
p.join()
# p=mp.Process(target=add,args=[a,b,c,])
# p.start()
# p.join()
print('running')
print(a,b,c)
print(time.time()-start)
| 2.71875
| 3
|
autodraft/draftHost/models.py
|
gnmerritt/autodraft
| 0
|
12775931
|
from django.db import models
from django.utils import timezone
class NflConference(models.Model):
name = models.TextField()
abbreviation = models.TextField(max_length=5)
def __unicode__(self):
return self.name
class NflDivision(models.Model):
name = models.TextField()
conference = models.ForeignKey(NflConference)
def __unicode__(self):
return self.name
class NflTeam(models.Model):
"""NFL Team"""
name = models.TextField()
abbreviation = models.TextField(max_length=5)
city = models.TextField()
division = models.ForeignKey(NflDivision)
def __unicode__(self):
return u"{a} - {c} {n}" \
.format(c=self.city, n=self.name, a=self.abbreviation)
class NflPosition(models.Model):
"""Football position e.g. RB, QB, S"""
description = models.TextField()
abbreviation = models.TextField(max_length=4)
def __unicode__(self):
return self.description
class FantasyPosition(models.Model):
"""Fantasy position - a simple subset of NflPositions"""
position = models.ForeignKey(NflPosition)
def __unicode__(self):
return unicode(self.position)
class College(models.Model):
"""A NCAA College"""
name = models.TextField(max_length=30)
def __unicode__(self):
return self.name
class NflPlayer(models.Model):
"""Draft-eligible NFL player"""
first_name = models.TextField()
last_name = models.TextField()
draft_year = models.PositiveIntegerField(default=1)
team = models.ForeignKey(NflTeam)
school = models.ForeignKey(College)
position = models.ForeignKey(NflPosition)
fantasy_position = models.ForeignKey(FantasyPosition)
def __unicode__(self):
return u"{f} {l}".format(f=self.first_name, l=self.last_name)
class ExternalDatabase(models.Model):
"""An external player DB ie ESPN or Yahoo"""
name = models.TextField(max_length=20)
description = models.TextField(max_length=200)
homepage = models.URLField()
def __unicode__(self):
return self.name
class ExternalNflPlayer(models.Model):
"""Link to an external database's player info"""
player = models.ForeignKey(NflPlayer)
db = models.ForeignKey(ExternalDatabase)
external_id = models.IntegerField()
url = models.URLField()
picture = models.URLField()
class ExternalNflTeam(models.Model):
"""Link to an external database's team info"""
team = models.ForeignKey(NflTeam)
db = models.ForeignKey(ExternalDatabase)
external_id = models.IntegerField()
url = models.URLField()
class FantasyRoster(models.Model):
description = models.TextField() ## TODO: this should be more than a text field?
slots = models.PositiveIntegerField()
def __unicode__(self):
return self.description
class FantasyDraft(models.Model):
name = models.TextField(max_length=20)
admin = models.EmailField()
draft_start = models.DateTimeField()
time_per_pick = models.PositiveIntegerField()
team_limit = models.PositiveIntegerField()
roster = models.ForeignKey(FantasyRoster)
password = models.TextField(max_length=32, null=True, blank=True)
def __unicode__(self):
return self.name
def picks(self):
return FantasyPick.objects.filter(fantasy_team__draft=self)
def is_active(self, time):
"""A draft is active if any picks are active"""
for p in self.picks():
if p.is_active(time):
return True
return False
class FantasyTeam(models.Model):
draft = models.ForeignKey(FantasyDraft)
name = models.TextField(max_length=80)
email = models.EmailField()
auth_key = models.TextField(max_length=40) # len(uuid.uuid4) == 36
def __unicode__(self):
return self.name
def picks(self):
return FantasyPick.objects.filter(fantasy_team=self)
def remove_picks(self):
self.picks().delete()
class FantasySeason(models.Model):
year = models.TextField(max_length=10) # 2013-2014
def __unicode__(self):
return u"{} Fantasy Season".format(self.year)
class MockDraft(models.Model):
"""Ties together an existing fantasy team & a separate draft"""
owner = models.ForeignKey(FantasyTeam)
draft = models.ForeignKey(FantasyDraft)
class MockDraftBot(models.Model):
season = models.ForeignKey(FantasySeason)
draft = models.ForeignKey(FantasyDraft)
team = models.ForeignKey(FantasyTeam)
brain = models.TextField(max_length=12)
def __unicode__(self):
return u"{} bot for the {}".format(self.brain, self.season)
class FantasyPick(models.Model):
"""An upcoming pick"""
fantasy_team = models.ForeignKey(FantasyTeam)
starts = models.DateTimeField('starts at')
expires = models.DateTimeField('expires at')
pick_number = models.PositiveIntegerField()
class Meta:
ordering = ('pick_number',)
def __unicode__(self):
return u"{d} - Pick {n} - {t}" \
.format(d=self.fantasy_team.draft.name, n=self.pick_number,
t=self.fantasy_team.name)
def is_active(self, time):
"""Returns whether the time is between start & expire"""
return self.starts <= time and \
self.expires >= time
class FantasySelection(models.Model):
"""A pick that's been made"""
when = models.DateTimeField(default=timezone.now())
draft_pick = models.ForeignKey(FantasyPick)
player = models.ForeignKey(NflPlayer)
| 2.46875
| 2
|
pkt/pkt_gsheet.py
|
queeniekwan/Seaquake
| 0
|
12775932
|
<reponame>queeniekwan/Seaquake
from googleapiclient.discovery import build
from google.oauth2 import service_account
from pool_metrics import metrics_comparison
from balance import get_seaquake_balance, get_steward_stats
from explorer_webscrap import get_pkt_metrics
from datetime import datetime
# define the scope
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
# get credentials
SERVICE_ACCOUNT_FILE = 'pkt/credentials.json'
creds = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES)
# create service and call the sheets API
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
# Seaquake Mining gsheet info
SheetID = '1wgWRJN6yo-TcpwfulkcENCR7XmCn41k7TnnlEzsAc6E'
def write(id, range, data):
"""write values into the spreadsheet given the id (str), range (str), and data (list of list)"""
data_body = {}
data_body['range'] = range
data_body['majorDimension'] = 'COLUMNS' # 'ROWS' or 'COLUMNS'
data_body['values'] = data
value_input_option = 'USER_ENTERED' # 'USER_ENTERED' or 'RAW'
result = sheet.values().update(spreadsheetId=id, range=range, valueInputOption=value_input_option, body=data_body).execute()
if not result:
print('Error when writing values')
else:
print('New values wrote')
def append(id, range, data):
"""append data to a current table"""
value_input_option = 'USER_ENTERED' # 'USER_ENTERED' or 'RAW'
insert_data_option = 'INSERT_ROWS' # 'INSERT_ROWS' or 'OVERWRITE'
data_body = {}
data_body['range'] = range
data_body['majorDimension'] = 'ROWS' # 'ROWS' or 'COLUMNS'
data_body['values'] = data
result = sheet.values().append(spreadsheetId=id, range=range, valueInputOption=value_input_option, insertDataOption=insert_data_option, body=data_body).execute()
if not result:
print('Error when appending values')
else:
print('New values appended')
def batch_update(id, ranges, values):
data_body = {}
value_input_option = 'USER_ENTERED' # 'USER_ENTERED' or 'RAW'
data_body['valueInputOption'] = value_input_option
data = []
for i in range(len(ranges)):
update={}
update['range'] = ranges[i]
update['majorDimension'] = 'COLUMNS' # 'ROWS' or 'COLUMNS'
update['values'] = values[i]
data.append(update)
data_body['data'] = data
result = sheet.values().batchUpdate(spreadsheetId=id, body=data_body).execute()
if not result:
print('Error when batch updating values')
else:
print('New values batch updated')
def update_sq_mining():
ranges = []
values = []
t_range = 'Data!B1'
t = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
ranges.append(t_range)
values.append([[t]])
pool_metrics_range = 'Data!B4:E7'
pool_metrics = metrics_comparison()
ranges.append(pool_metrics_range)
values.append(pool_metrics)
coins_metrics_range = 'Data!B15:B23'
pkt = get_pkt_metrics()
steward = get_steward_stats()
sq = get_seaquake_balance()
cic = [pkt[0] - steward[0]]
coins_metrics = [pkt + [''] + steward + [''] + cic + [''] + sq]
ranges.append(coins_metrics_range)
values.append(coins_metrics)
batch_update(SheetID, ranges, values)
backlog_range = 'Total PKT Mined to date!A1:B1'
backlog = [[t, pkt[0]]]
append(SheetID, backlog_range, backlog)
def main():
update_sq_mining()
if __name__ == '__main__':
main()
| 2.703125
| 3
|
visualization/prepare_intervals.py
|
icelu/GI_Cluster
| 3
|
12775933
|
<reponame>icelu/GI_Cluster
#!/usr/bin/env python
# Create interval files for visualization in Circos
#
# Author: <NAME>
# Affiliation : National University of Singapore
# E-mail : <EMAIL>
#
import os
import optparse
def getGenomeSize(genomefile):
firstLine = open(genomefile).readline()
assert ('>' in firstLine), "This is not a standard FASTA file!"
genomesize = 0
with open(genomefile, 'r') as fin:
# skip the first line
fin.readline()
for line in fin:
genomesize += len(line.strip())
return genomesize
def createGIFile(input_file, output_file, genomesize):
'''
given an input file with a list of start and end positions
create a file: where the intervals not shown in the input has an additional column 0,
while the intervals shown in the input has an additional column 1.
eg.
Input:
270001 275000
560001 565000
565001 570000
Output:
hs1 0 270000 0
hs1 270001 275000 1
hs1 275001 560000 0
hs1 560001 565000 1
'''
gene_dict = {}
product_dict = {}
with open(input_file, 'r') as infile, open(output_file, 'w') as outfile:
# next(infile)
last_end = 0
for line in infile:
# should always strip to trim the trailing special characters
fields = line.strip().split('\t')
start = int(fields[0])
end = int(fields[1])
# if start begins at 1, no line,
# TODO: if end smaller than the total length, one 0 line to make genome connected
if start != '1':
nomark_line = "hs1\t%s\t%s\t0\n" % (last_end + 1, start - 1)
outfile.write(nomark_line)
marked_line = "hs1\t%s\t%s\t1\n" % (start, end)
outfile.write(marked_line)
# remember the last position of previous line
last_end = end
# if last_end < genome_len, should output additional line to complete the circle
# print 'last_end', last_end
if last_end < genomesize:
nomark_line = "hs1\t%s\t%s\t0\n" % (last_end + 1, genomesize)
outfile.write(nomark_line)
def createHightFile(input_file, output_file):
gene_dict = {}
product_dict = {}
with open(input_file, 'r') as infile, open(output_file, 'w') as outfile:
# next(infile)
last_end = 0
for line in infile:
# should always strip to trim the trailing special characters
fields = line.strip().split('\t')
start = int(fields[0])
end = int(fields[1])
marked_line = "hs1\t%s\t%s\n" % (start, end)
outfile.write(marked_line)
def createGenomeFile(outfile, gname, genomesize):
'''
Output format:
type parent name start end color
chr - hs1 CT18 0 48090376 black
'''
with open(outfile, 'w') as fout:
line = 'chr - hs1 %s 0 %d black\n' % (gname, genomesize)
fout.write(line)
if __name__ == '__main__':
parser = optparse.OptionParser()
# parser.add_option("-l", "--length", dest="length", type="int", default="-1", help="the size of the microbial genome")
parser.add_option("-g", "--gfile", dest="gfile", help="input file containing the genome sequence")
parser.add_option("-i", "--input", dest="input", help="input file containing GIs")
parser.add_option("-o", "--output", dest="output", help="output file for visualizing genomic islands")
parser.add_option("-c", "--cfile", dest="cfile", help="output file for visualing the whole genome")
parser.add_option("-f", "--hfile", dest="hfile", help="output file for highlighting the genome islands")
options, args = parser.parse_args()
genomesize = getGenomeSize(options.gfile)
createGIFile(options.input, options.output, genomesize)
if options.cfile:
gname = os.path.basename(options.gfile)
createGenomeFile(options.cfile, gname, genomesize)
if options.hfile:
createHightFile(options.input, options.hfile)
| 3.171875
| 3
|
templates/compiler/BUILD.tmpl.bzl
|
iocat/rules_rescript
| 1
|
12775934
|
<filename>templates/compiler/BUILD.tmpl.bzl
{{AUTO_GENERATED_NOTICE}}
load("@{{REPO_NAME}}//:rules.bzl", "rescript_compiler")
rescript_compiler(
name = "darwin",
bsc = ":darwin/bsc.exe",
bsb_helper = ":darwin/bsb_helper.exe",
visibility = ["//visibility:public"],
)
rescript_compiler(
name = "linux",
bsc = ":linux/bsc.exe",
bsb_helper = ":linux/bsb_helper.exe",
visibility = ["//visibility:public"],
)
rescript_compiler(
name = "windows",
bsc = ":win32/bsc.exe",
bsb_helper = ":win32/bsb_helper.exe",
visibility = ["//visibility:public"],
)
| 1.171875
| 1
|
qutip/core/data/constant.py
|
jakelishman/qutip
| 0
|
12775935
|
<reponame>jakelishman/qutip<gh_stars>0
# This module exists to supply a couple of very standard constant matrices
# which are used in the data layer, and within `Qobj` itself. Other matrices
# (e.g. `create`) should not be here, but should be defined within the
# higher-level components of QuTiP instead.
from . import csr, dense
from .csr import CSR
from .dense import Dense
from .dispatch import Dispatcher as _Dispatcher
import inspect as _inspect
__all__ = ['zeros', 'identity']
zeros = _Dispatcher(
_inspect.Signature([
_inspect.Parameter('rows', _inspect.Parameter.POSITIONAL_OR_KEYWORD),
_inspect.Parameter('cols', _inspect.Parameter.POSITIONAL_OR_KEYWORD),
]),
name='zeros',
module=__name__,
inputs=(),
out=True,
)
zeros.__doc__ =\
"""
Create matrix representation of 0 with the given dimensions.
Depending on the selected output type, this may or may not actually
contained explicit values; sparse matrices will typically contain nothing
(which is their representation of 0), and dense matrices will still be
filled.
Arguments
---------
rows, cols : int
The number of rows and columns in the output matrix.
"""
zeros.add_specialisations([
(CSR, csr.zeros),
(Dense, dense.zeros),
], _defer=True)
identity = _Dispatcher(
_inspect.Signature([
_inspect.Parameter('dimension',
_inspect.Parameter.POSITIONAL_OR_KEYWORD),
_inspect.Parameter('scale', _inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=1),
]),
name='identity',
module=__name__,
inputs=(),
out=True,
)
identity.__doc__ =\
"""
Create a square identity matrix of the given dimension. Optionally, the
`scale` can be given, where all the diagonal elements will be that instead
of 1.
Arguments
---------
dimension : int
The dimension of the square output identity matrix.
scale : complex, optional
The element which should be placed on the diagonal.
"""
identity.add_specialisations([
(CSR, csr.identity),
(Dense, dense.identity),
], _defer=True)
del _Dispatcher, _inspect
| 2.59375
| 3
|
tests/example_handlers.py
|
svenhartmann/mediatr_py
| 0
|
12775936
|
from tests.example_queries import GetArrayQuery, GetArrayQuery1
async def get_array_handler(request: GetArrayQuery):
items = list()
for i in range(0, request.items_count):
items.append(i)
return items
def get_array_handler_sync(request: GetArrayQuery):
items = list()
for i in range(0, request.items_count):
items.append(i)
return items
def get_array_query_behavior(request: GetArrayQuery, next):
request.items_count = 4
return next()
async def get_array_query_behavior_3(request: GetArrayQuery, next):
request.items_count = 3
return await next()
async def get_array_query_behavior_6(request: GetArrayQuery, next):
array1 = await next()
array1.append(0)
array1.append(0)
array1.append(0)
return array1
class GetArrayQueryHandler():
def handle(self, request: GetArrayQuery1):
items = list()
for i in range(0, request.items_count):
items.append(i)
return items
class GetArrayQueryBehavior():
def handle(self, request: GetArrayQuery1, next):
request.items_count = 4
return next()
def common_log_behavior(request: object, next):
request.updated_at = '123'
return next()
| 2.4375
| 2
|
jp.atcoder/abc090/abc090_b/11471132.py
|
kagemeka/atcoder-submissions
| 1
|
12775937
|
import sys
def cnt(n):
m = str(n)
l = len(m)
if l == 1:
return n + 1
tot = 0
tot += pow(10, (l - 1) // 2) * (int(m[0]) - 1)
tot += pow(10, l // 2) - 1 - pow(10, l // 2 - 1) * (l & 1 ^ 1)
while l >= 2:
l -= 2
if l == 0:
tot += m[1] >= m[0]
elif l == 1:
tot += int(m[1]) + 1 - (m[-1] < m[0])
else:
m = str(int(m[1:-1]) - (m[-1] < m[0]))
m = "0" * (l - len(m)) + m
tot += int(m[0]) * pow(10, (l - 1) // 2)
return tot
a, b = map(int, sys.stdin.readline().split())
def main():
print(cnt(b) - cnt(a - 1))
if __name__ == "__main__":
main()
| 2.921875
| 3
|
UnitTest/MNIST_Test.py
|
Mostafa-ashraf19/TourchPIP
| 0
|
12775938
|
from DLFrameWork.forward import NetWork
from DLFrameWork.dataset import FashionMNIST,DataLoader
if __name__ == '__main__':
FMNIST = FashionMNIST(path='MNIST_Data',download=False,train=True)
dLoader = DataLoader(FMNIST,batchsize=100,shuffling=False,normalization={'Transform':True})
# (784,256),(256,128),(128,64),(64,10)
net = NetWork((784,256,128,64,10),('ReLU','ReLU','ReLU','SoftMax'),optimType={'Momeuntum':True})
print(net)
costs = []
print_cost = True
epochs = 10
for i in range(epochs):
cost = 0.0
for j,(images,labels) in enumerate(dLoader):
ourimages = images.T
ourlabel = labels.T
innercost = net.fit(ourimages,ourlabel,learning_rate =0.02)
cost += innercost
# print('iteration num {},inner cost is {}'.format(j, innercost))
if print_cost:# and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, cost/600))
print('-'*10)
images, labels = next(dLoader)
net.Prediction(images.T,labels.T,net.Parameters())
| 3.203125
| 3
|
data-science-master/Section-2-Basics-of-Python-Programming/Lec-2.15-Creating-Python-Modules-and-Packages/module-files/myscript.py
|
Hamid-Ali-99/Python_Just_Python
| 0
|
12775939
|
def myFunction():
print('The value of __name__ is ' + __name__)
def main():
myFunction()
if __name__ == '__main__':
main()
| 3.046875
| 3
|
build/setenv.py
|
simonaoliver/metageta
| 0
|
12775940
|
<reponame>simonaoliver/metageta
import os,sys
#General vars
CURDIR=os.path.dirname(os.path.abspath(__file__))
TOPDIR=os.path.dirname(CURDIR)
DOWNLOAD_DIR=TOPDIR+'\\downloads'
#Default vars
PY_VER='Python27'
BIN_DIR=TOPDIR+'\\bin'
PY_DIR=BIN_DIR+'\\'+PY_VER #Don't mess with PYTHONHOME
############################################################
#Check environment settings in case they'e been overridden
env=os.environ
CURDIR=env.get('CURDIR',CURDIR)
TOPDIR=env.get('TOPDIR',os.path.dirname(CURDIR))
DOWNLOAD_DIR=env.get('DOWNLOAD_DIR',DOWNLOAD_DIR)
PY_VER=env.get('PY_VER',PY_VER)
BIN_DIR=env.get('BIN_DIR',BIN_DIR)
PY_DIR=env.get('PY_DIR',PY_DIR)
#Hide from autocomplete IDEs
del os
del sys
del env
| 2.328125
| 2
|
__init__.py
|
challenger-zpp/dataflow
| 0
|
12775941
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 2 10:39:47 2019
@author: ldh
"""
# __init__.py
| 1.078125
| 1
|
log_decorator/log.py
|
Pavel-Egorov/log_decorator
| 0
|
12775942
|
<gh_stars>0
import inspect
import json
import logging
import re
import time
from copy import deepcopy
from uuid import uuid1
from wrapt import decorator
HIDE_ANNOTATION = 'hide'
HIDDEN_VALUE = 'hidden'
SECONDS_TO_MS = 1000
LOGS_COUNTER = {}
def get_logger(logger_name='service_logger'):
logger = logging.getLogger(logger_name)
logger.propagate = False
return logger
def log(
logger_inst=get_logger(),
lvl: int = logging.INFO,
*,
hide_output=False,
hidden_params=(),
exceptions_only=False,
track_exec_time=False,
frequency=None,
exception_hook=None,
):
# noinspection DuplicatedCode
@decorator
def _log(wrapped, instance, args, kwargs):
func_name = f'{wrapped.__module__}.{wrapped.__qualname__}'
extra = {'call_id': uuid1().hex, 'function': func_name}
send_log = True
if frequency is not None:
log_counter = LOGS_COUNTER.setdefault(func_name, 0) + 1
LOGS_COUNTER[func_name] = log_counter
if log_counter % frequency != 0:
send_log = False
try:
params = inspect.getfullargspec(wrapped)
extra['input_data'] = get_logged_args(
params,
[instance] + list(args) if instance else args,
kwargs,
hidden_params,
)
if send_log and not exceptions_only:
logger_inst.log(level=lvl, msg=f'call {func_name}', extra=extra)
start_time = time.time()
result = wrapped(*args, **kwargs)
if track_exec_time:
extra['execution_time_ms'] = int((time.time() - start_time) * SECONDS_TO_MS)
extra['result'] = normalize_for_log(result) if not hide_output else HIDDEN_VALUE
if send_log and not exceptions_only:
logger_inst.log(level=lvl, msg=f'return {func_name}', extra=extra)
return result
except Exception as exc: # noqa
error_msg = f'error in {func_name}'
if send_log:
logger_inst.exception(msg=error_msg, extra=extra if extra is not None else {})
if exception_hook is not None:
exception_hook(logger_inst, exc, extra)
if hasattr(exc, 'return_value'):
return exc.return_value
raise
return _log
def get_logged_args(params, args, kwargs, hidden_params):
result = {}
annotations = params.annotations
for i, v in enumerate(args[:len(params.args)]):
arg_name = params.args[i]
arg_value = _hide_items(v, arg_name, annotations, hidden_params)
result[arg_name] = normalize_for_log(arg_value)
varargs = params.varargs
if varargs:
if _hide_items(args[len(params.args):], varargs, annotations, hidden_params) == HIDDEN_VALUE:
result['*args'] = f'hidden {len(args) - len(params.args)} args'
else:
result['*args'] = tuple(normalize_for_log(i) for i in args[len(params.args):])
for k, v in kwargs.items():
if params.varkw and k not in params.kwonlyargs and k not in params.args:
result[k] = HIDDEN_VALUE
continue
kwarg = _hide_items(v, k, annotations, hidden_params)
result[k] = normalize_for_log(kwarg)
return result
def normalize_for_log(value):
if isinstance(value, bool) or value is None:
return str(value)
elif isinstance(value, dict):
return {k: normalize_for_log(v) for k, v in value.items()}
elif isinstance(value, (list, set, frozenset, tuple)):
return type(value)(normalize_for_log(i) for i in value)
else:
return _get_log_repr(value)
def _get_log_repr(value):
has_log_id = hasattr(value, 'get_log_id')
if has_log_id:
return value.get_log_id()
try:
json.dumps(value)
return value
except TypeError:
return str(value)
def _hide_items(item, item_name, annotations, hidden_params):
if item_name in hidden_params:
return HIDDEN_VALUE
item_annotation = annotations.get(item_name)
if item_annotation is None or isinstance(item_annotation, type):
hide_annotation = []
elif isinstance(item_annotation, str):
hide_annotation = [item_annotation]
else:
hide_annotation = item_annotation
hide_pointers = []
for i in hide_annotation:
if i == HIDE_ANNOTATION:
return HIDDEN_VALUE
if re.match(HIDE_ANNOTATION, str(i)):
hide_pointers.append(i.split('__')[1:])
for i in hidden_params:
if re.match(item_name, i):
pointer = i.split('__')[1:]
if pointer not in hide_pointers:
hide_pointers.append(pointer)
if not hide_pointers:
return item
result = deepcopy(item)
for i in hide_pointers:
try:
result = _hide_items_impl(result, i)
except (KeyError, IndexError):
continue
return result
def _hide_items_impl(item, pointers):
pointer = pointers[0]
if isinstance(item, list):
pointer = int(pointer)
if (isinstance(item[pointer], dict) or isinstance(item[pointer], list)) and len(pointers) > 1:
item[pointer] = _hide_items_impl(item[pointer], pointers[1:])
else:
item[pointer] = HIDDEN_VALUE
return item
| 2.25
| 2
|
extraPackages/matplotlib-3.0.3/examples/lines_bars_and_markers/vline_hline_demo.py
|
dolboBobo/python3_ios
| 130
|
12775943
|
<reponame>dolboBobo/python3_ios
"""
=================
hlines and vlines
=================
This example showcases the functions hlines and vlines.
"""
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0.0, 5.0, 0.1)
s = np.exp(-t) + np.sin(2 * np.pi * t) + 1
nse = np.random.normal(0.0, 0.3, t.shape) * s
fig, (vax, hax) = plt.subplots(1, 2, figsize=(12, 6))
vax.plot(t, s + nse, '^')
vax.vlines(t, [0], s)
# By using ``transform=vax.get_xaxis_transform()`` the y coordinates are scaled
# such that 0 maps to the bottom of the axes and 1 to the top.
vax.vlines([1, 2], 0, 1, transform=vax.get_xaxis_transform(), colors='r')
vax.set_xlabel('time (s)')
vax.set_title('Vertical lines demo')
hax.plot(s + nse, t, '^')
hax.hlines(t, [0], s, lw=2)
hax.set_xlabel('time (s)')
hax.set_title('Horizontal lines demo')
plt.show()
| 3.515625
| 4
|
cbotami.py
|
sciutrux/cbotami
| 0
|
12775944
|
<reponame>sciutrux/cbotami<filename>cbotami.py<gh_stars>0
from chatterbot import ChatBot
from chatterbot.comparisons import LevenshteinDistance, JaccardSimilarity, SpacySimilarity
from chatterbot.response_selection import get_first_response, get_most_frequent_response, get_random_response
from chatterbot.filters import get_recent_repeated_responses
from chatterbot.conversation import Statement
# from comparisons import CustomLevenshteinDistance
# override Chatterbot PosLemmaTagger
from tagging import CustomPosLemmaTagger
# add "JustMyCode": false into launch.json to debug Chatterbot code
# under \Lib\site-packages\chatterbot
class CBotAmI():
"""
Conversational dialog chat bot.
"""
def __init__(self, config_data):
# TODO - in specific_response.py can_process() expects to compare to a statement,
# not text str?
# does not work with Statement either - bug?
input_text_statement = Statement(text=config_data.specificResponseAdapter.input_text)
read_only = False
if config_data.read_only:
read_only = True
self.bot = ChatBot(
config_data.bot_name,
storage_adapter = 'chatterbot.storage.' + config_data.storage_adapter,
database_uri = config_data.database_uri,
preprocessors = [
'chatterbot.preprocessors.clean_whitespace'
],
logic_adapters = [
'chatterbot.logic.MathematicalEvaluation',
# UnitConversion needs pint installed
# 'chatterbot.logic.UnitConversion',
# TimeLogic needs nltk installed
# 'chatterbot.logic.TimeLogicAdapter',
# 'chatterbot.logic.BestMatch',
{
'import_path': 'chatterbot.logic.SpecificResponseAdapter',
'input_text': input_text_statement,
'output_text': config_data.specificResponseAdapter.output_text
},
# {
# 'import_path': 'chatterbot.logic.BestMatch',
# 'default_response': config_data.bestMatch.default_response,
# 'maximum_similarity_threshold': config_data.bestMatch.maximum_similarity_threshold
# },
{
'import_path': 'best_match.CustomBestMatch',
'default_response': config_data.bestMatch.default_response,
'maximum_similarity_threshold': config_data.bestMatch.maximum_similarity_threshold
}
],
filters = [
get_recent_repeated_responses
],
# options for statement comparison:
# - LevenshteinDistance
# - SpacySimilarity
# - JaccardSimilarity
statement_comparison_function = SpacySimilarity,
# options for response selection:
# - get_first_response
# - get_most_frequent_response
# - get_random_response
response_selection_method = get_most_frequent_response,
read_only = read_only
)
# override Chatterbot PosLemmaTagger get_bigram_pair_string function
# POS tags were not suitable for how conversations are processed
custom_tagger = CustomPosLemmaTagger()
self.bot.storage.tagger.get_bigram_pair_string = custom_tagger.get_bigram_pair_string
def get_response(self, sentence):
return self.bot.get_response(sentence)
def learn_response(self, response, sentence):
# create statement pair similarly to corpus trainer
# chatterbot method creates single line with no tagging
statements_to_create = []
statement_search_text = self.bot.storage.tagger.get_bigram_pair_string(sentence)
statement = Statement(
text=sentence,
search_text=statement_search_text,
in_response_to=None,
search_in_response_to='',
conversation='training'
)
# statement.add_tags(*categories)
# statement = get_preprocessed_statement(statement)
statements_to_create.append(statement)
response_search_text = self.bot.storage.tagger.get_bigram_pair_string(response.text)
response_statement = Statement(
text=response.text,
search_text=response_search_text,
in_response_to=statement.text,
search_in_response_to=statement_search_text,
conversation='training'
)
# statement.add_tags(*categories)
# statement = get_preprocessed_statement(statement)
statements_to_create.append(response_statement)
self.bot.storage.create_many(statements_to_create)
# return self.bot.learn_response(response, sentence)
| 2.484375
| 2
|
process_metrics/metrics/tsv_metric.py
|
tmooney/qc-metric-aggregator
| 1
|
12775945
|
<reponame>tmooney/qc-metric-aggregator
import re
import csv
import glob
import os.path
from typing import Dict
from abc import ABC, abstractmethod
class TSVMetric:
@abstractmethod
def metric_file_pattern(self) -> str:
pass
@abstractmethod
def metric_column_name(self) -> str:
pass
def extract_metric(self, metrics_dir: str) -> str:
metrics_file = self.identify_metrics_file(metrics_dir)
return self.read_metric(metrics_file)
def delimiter(self) -> str:
return "\t"
def identify_metrics_file(self, metrics_dir: str) -> str:
regex = re.compile(
self.metric_file_pattern(),
re.IGNORECASE)
glob_path = os.path.join(metrics_dir, '**')
all_files = glob.glob(glob_path, recursive = True)
candidate_files = [f for f in all_files if regex.match(f)]
if len(candidate_files) > 1:
raise Exception(f"Ambiguous metrics file. Matcher {self.metric_file_pattern()} matches multiple files in {metrics_dir}.")
if len(candidate_files) == 0:
raise Exception(f"Metrics file not found. Matcher {self.metric_file_pattern()} matches no files in {metrics_dir}.")
return candidate_files[0]
def read_metric(self, filename: str) -> str:
values = []
with open(filename) as file:
if self.skip_commented_lines():
file_without_comments = filter(lambda row: row.strip() and not row.startswith('#'), file)
reader = csv.DictReader(file_without_comments, delimiter=self.delimiter())
else:
reader = csv.DictReader(file, delimiter=self.delimiter())
for row in reader:
if self.custom_filter(row):
values.append(row[self.metric_column_name()])
if self.take_first_value():
break
if len(values) > 1 and not self.take_first_value():
raise Exception(f"Ambiguous metric value. Found {len(values)} values for {self.metric_column_name()}.")
if len(values) == 0:
raise Exception(f"Missing metric value. Found {len(values)} values for {self.metric_column_name()}.")
return values[0]
def skip_commented_lines(self) -> bool:
return True
def take_first_value(self) -> bool:
return False
def custom_filter(self, row: Dict[str,str]) -> bool:
return True
| 2.578125
| 3
|
Entry Widget & Grid Layout In Tkinter/Code-1.py
|
Ranjan2104/Tkinter-GUI--Series
| 2
|
12775946
|
<gh_stars>1-10
from tkinter import *
def getvals():
print(f"The value of username is {uservalue.get()}")
print(f"The value of password is {passvalue.get()}")
root = Tk()
root.geometry("655x333")
user = Label(root, text="Username")
password = Label(root, text="Password")
user.grid()
password.grid(row=1)
# Variable classes in tkinter
# BooleanVar, DoubleVar, IntVar, StringVar
uservalue = StringVar()
passvalue = StringVar()
userentry = Entry(root, textvariable = uservalue)
passentry = Entry(root, textvariable = passvalue)
userentry.grid(row=0, column=1)
passentry.grid(row=1, column=1)
Button(text="Submit", command=getvals).grid()
root.mainloop()
| 3.25
| 3
|
dash_website/utils/graphs.py
|
SamuelDiai/Dash-Website
| 0
|
12775947
|
<filename>dash_website/utils/graphs.py
import numpy as np
import plotly.graph_objs as go
from plotly.figure_factory import create_dendrogram
from dash_website import GRAPH_SIZE
from dash_website.utils import BLUE_WHITE_RED, MAX_LENGTH_CATEGORY
def heatmap_by_clustering(table_correlations, hovertemplate, customdata, zmin=-1, zmax=1):
fig = create_dendrogram(table_correlations.replace(np.nan, 0), orientation="bottom", distfun=lambda df: 1 - df)
for scatter in fig["data"]:
scatter["yaxis"] = "y2"
order_dendrogram = list(map(int, fig["layout"]["xaxis"]["ticktext"]))
labels = table_correlations.columns[order_dendrogram]
fig.update_layout(xaxis={"ticktext": labels, "mirror": False})
fig.update_layout(yaxis2={"domain": [0.85, 1], "showticklabels": False, "showgrid": False, "zeroline": False})
heat_correlations = table_correlations.loc[labels, labels].values
if customdata is not None:
heat_customdata = customdata.loc[labels, labels].values
else:
heat_customdata = None
heatmap = go.Heatmap(
x=fig["layout"]["xaxis"]["tickvals"],
y=fig["layout"]["xaxis"]["tickvals"],
z=heat_correlations,
colorscale=BLUE_WHITE_RED,
customdata=heat_customdata,
hovertemplate=hovertemplate,
zmin=zmin,
zmax=zmax,
)
fig.update_layout(
yaxis={
"domain": [0, 0.85],
"mirror": False,
"showgrid": False,
"zeroline": False,
"ticktext": labels,
"tickvals": fig["layout"]["xaxis"]["tickvals"],
"showticklabels": True,
"ticks": "outside",
"tickfont": {"size": 15},
},
xaxis={"tickfont": {"size": 15}},
)
fig.add_trace(heatmap)
fig["layout"]["width"] = 1100
fig["layout"]["height"] = 1100
return fig
def heatmap_by_sorted_dimensions(sorted_table_correlations, hovertemplate, sorted_customdata, zmin=-1, zmax=1):
heatmap = go.Heatmap(
x=np.arange(5, 10 * sorted_table_correlations.shape[1] + 5, 10),
y=np.arange(5, 10 * sorted_table_correlations.shape[1] + 5, 10),
z=sorted_table_correlations,
colorscale=BLUE_WHITE_RED,
customdata=sorted_customdata,
hovertemplate=hovertemplate,
zmin=zmin,
zmax=zmax,
)
fig = go.Figure(heatmap)
fig.update_layout(
xaxis={
"tickvals": np.arange(5, 10 * sorted_table_correlations.shape[1] + 5, 10),
"ticktext": [" - ".join(elem) for elem in sorted_table_correlations.columns.values],
"tickfont": {"size": 15},
},
yaxis={
"tickvals": np.arange(5, 10 * sorted_table_correlations.shape[0] + 5, 10),
"ticktext": [" - ".join(elem) for elem in sorted_table_correlations.index.values],
"tickfont": {"size": 15},
},
)
return fig
def add_custom_legend_axis(
fig,
indexes,
outer_margin_level_1=-60,
inner_margin_level_1=-30,
margin_level_2=0,
size_level_1=11,
size_level_2=9,
horizontal=True,
):
name_level_1, name_level_2 = indexes.names[:2]
indexes_info = indexes.to_frame()[[name_level_1, name_level_2]].reset_index(drop=True)
if horizontal:
indexes_info["position"] = fig["layout"]["xaxis"]["tickvals"]
else:
indexes_info["position"] = fig["layout"]["yaxis"]["tickvals"]
indexes_info.set_index([name_level_1, name_level_2], inplace=True)
lines = []
annotations = []
for level_1 in indexes_info.index.get_level_values(name_level_1).drop_duplicates():
min_position = indexes_info.loc[level_1].min()
max_position = indexes_info.loc[level_1].max()
line, annotation = add_line_and_annotation(
level_1, min_position, max_position, inner_margin_level_1, outer_margin_level_1, size_level_1, horizontal
)
lines.append(line)
annotations.append(annotation)
for level_2 in indexes_info.loc[level_1].index.get_level_values(name_level_2).drop_duplicates():
submin_position = indexes_info.loc[(level_1, level_2)].min()
submax_position = indexes_info.loc[(level_1, level_2)].max()
line, annotation = add_line_and_annotation(
level_2,
submin_position,
submax_position,
margin_level_2,
inner_margin_level_1,
size_level_2,
horizontal,
)
lines.append(line)
annotations.append(annotation)
# The final top/right line
line, _ = add_line_and_annotation(
level_1,
min_position,
max_position,
margin_level_2,
outer_margin_level_1,
size_level_2,
horizontal,
final=True,
)
lines.append(line)
if fig["layout"]["shapes"] == ():
fig["layout"]["shapes"] = lines
fig["layout"]["annotations"] = annotations
else:
fig["layout"]["shapes"] = list(fig["layout"]["shapes"]) + lines
fig["layout"]["annotations"] = list(fig["layout"]["annotations"]) + annotations
fig.update_layout(yaxis={"showticklabels": False}, xaxis={"showticklabels": False})
return fig
def add_line_and_annotation(
text, min_position, max_position, inner_margin, outer_margin, size, horizontal, final=False
):
if horizontal:
textangle = 90
first_axis, second_axis = ["x", "y"]
else:
textangle = 0
first_axis, second_axis = ["y", "x"]
if not final:
to_match_heatmap = -10 / 2
position = min_position
else:
to_match_heatmap = +10 / 2
position = max_position
if len(text) > MAX_LENGTH_CATEGORY:
text = text[:MAX_LENGTH_CATEGORY] + "..."
return (
{
"type": "line",
"xref": "x",
"yref": "y",
f"{first_axis}0": float(position + to_match_heatmap),
f"{second_axis}0": inner_margin,
f"{first_axis}1": float(position + to_match_heatmap),
f"{second_axis}1": outer_margin,
"line": {"color": "Black", "width": 0.5},
},
{
"text": text,
"xref": "x",
"yref": "y",
first_axis: float((min_position + max_position) / 2),
second_axis: (inner_margin + outer_margin) / 2,
"showarrow": False,
"textangle": textangle,
"font": {"size": size},
},
)
def histogram_correlation(table_correlations):
correlations = table_correlations.values[np.triu_indices(table_correlations.shape[0], k=1)]
histogram = go.Histogram(x=correlations, histnorm="percent", xbins={"size": 0.01})
fig = go.Figure(histogram)
fig.update_layout(
height=500,
width=GRAPH_SIZE,
xaxis_title_text="Correlation",
xaxis_title_font={"size": 25},
yaxis_title_text="Count (in %)",
yaxis_title_font={"size": 25},
bargap=0.2,
bargroupgap=0.1,
margin={"l": 0, "r": 0, "b": 0, "t": 0},
)
return fig
| 2.78125
| 3
|
machine-learning/QiWei-Python-Chinese/class/class_04.py
|
yw-fang/MLreadingnotes
| 2
|
12775948
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__maintainer__ = "<NAME>"
__email__ = '<EMAIL>'
__license__ = 'Apache License 2.0'
__creation_date__= 'Dec. 26, 2018'
"""
single inheritance
"""
class Person:
"""
define a CLASS Person with three methods
"""
def speak(self):
print('How are you?')
def listheight(self):
print('Height is 170 cm')
def listweight(self, n):
print('weight is', n)
class Girl(Person):
"""
class Girl inherits the attributes and methods
in the class Person
"""
def listheight(self):
"""
overwrite the methods listheight
"""
print('HEIGHT is 165 cm')
if __name__ == '__main__':
cang = Girl()
cang.listheight()
cang.speak()
cang.listweight(80)
| 3.78125
| 4
|
python/examples/kaitai/icc_4.py
|
carsonharmon/binaryninja-api
| 20
|
12775949
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
import collections
from enum import Enum
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class Icc4(KaitaiStruct):
SEQ_FIELDS = ["header", "tag_table"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header']['start'] = self._io.pos()
self.header = self._root.ProfileHeader(self._io, self, self._root)
self.header._read()
self._debug['header']['end'] = self._io.pos()
self._debug['tag_table']['start'] = self._io.pos()
self.tag_table = self._root.TagTable(self._io, self, self._root)
self.tag_table._read()
self._debug['tag_table']['end'] = self._io.pos()
class U8Fixed8Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(2)
self._debug['number']['end'] = self._io.pos()
class U16Fixed16Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(4)
self._debug['number']['end'] = self._io.pos()
class StandardIlluminantEncoding(KaitaiStruct):
class StandardIlluminantEncodings(Enum):
unknown = 0
d50 = 1
d65 = 2
d93 = 3
f2 = 4
d55 = 5
a = 6
equi_power = 7
f8 = 8
SEQ_FIELDS = ["standard_illuminant_encoding"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['standard_illuminant_encoding']['start'] = self._io.pos()
self.standard_illuminant_encoding = KaitaiStream.resolve_enum(self._root.StandardIlluminantEncoding.StandardIlluminantEncodings, self._io.read_u4be())
self._debug['standard_illuminant_encoding']['end'] = self._io.pos()
class ProfileHeader(KaitaiStruct):
class CmmSignatures(Enum):
the_imaging_factory_cmm = 858931796
agfa_cmm = 1094929747
adobe_cmm = 1094992453
color_gear_cmm = 1128484179
logosync_cmm = 1147629395
efi_cmm = 1162234144
exact_scan_cmm = 1163411779
fuji_film_cmm = 1179000864
harlequin_rip_cmm = 1212370253
heidelberg_cmm = 1212435744
kodak_cmm = 1262701907
konica_minolta_cmm = 1296256324
device_link_cmm = 1380404563
sample_icc_cmm = 1397310275
mutoh_cmm = 1397311310
toshiba_cmm = 1413696845
color_gear_cmm_lite = 1430471501
color_gear_cmm_c = 1430474067
windows_color_system_cmm = 1464029984
ware_to_go_cmm = 1465141024
apple_cmm = 1634758764
argyll_cms_cmm = 1634887532
little_cms_cmm = 1818455411
zoran_cmm = 2053320752
class PrimaryPlatforms(Enum):
apple_computer_inc = 1095782476
microsoft_corporation = 1297303124
silicon_graphics_inc = 1397180704
sun_microsystems = 1398099543
class ProfileClasses(Enum):
abstract_profile = 1633842036
device_link_profile = 1818848875
display_device_profile = 1835955314
named_color_profile = 1852662636
output_device_profile = 1886549106
input_device_profile = 1935896178
color_space_profile = 1936744803
class RenderingIntents(Enum):
perceptual = 0
media_relative_colorimetric = 1
saturation = 2
icc_absolute_colorimetric = 3
class DataColourSpaces(Enum):
two_colour = 843271250
three_colour = 860048466
four_colour = 876825682
five_colour = 893602898
six_colour = 910380114
seven_colour = 927157330
eight_colour = 943934546
nine_colour = 960711762
ten_colour = 1094929490
eleven_colour = 1111706706
twelve_colour = 1128483922
cmy = 1129142560
cmyk = 1129142603
thirteen_colour = 1145261138
fourteen_colour = 1162038354
fifteen_colour = 1178815570
gray = 1196573017
hls = 1212961568
hsv = 1213421088
cielab_or_pcslab = 1281450528
cieluv = 1282766368
rgb = 1380401696
nciexyz_or_pcsxyz = 1482250784
ycbcr = 1497588338
cieyxy = 1501067552
SEQ_FIELDS = ["size", "preferred_cmm_type", "version", "device_class", "color_space", "pcs", "creation_date_time", "file_signature", "primary_platform", "profile_flags", "device_manufacturer", "device_model", "device_attributes", "rendering_intent", "nciexyz_values_of_illuminant_of_pcs", "creator", "identifier", "reserved_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['size']['start'] = self._io.pos()
self.size = self._io.read_u4be()
self._debug['size']['end'] = self._io.pos()
self._debug['preferred_cmm_type']['start'] = self._io.pos()
self.preferred_cmm_type = KaitaiStream.resolve_enum(self._root.ProfileHeader.CmmSignatures, self._io.read_u4be())
self._debug['preferred_cmm_type']['end'] = self._io.pos()
self._debug['version']['start'] = self._io.pos()
self.version = self._root.ProfileHeader.VersionField(self._io, self, self._root)
self.version._read()
self._debug['version']['end'] = self._io.pos()
self._debug['device_class']['start'] = self._io.pos()
self.device_class = KaitaiStream.resolve_enum(self._root.ProfileHeader.ProfileClasses, self._io.read_u4be())
self._debug['device_class']['end'] = self._io.pos()
self._debug['color_space']['start'] = self._io.pos()
self.color_space = KaitaiStream.resolve_enum(self._root.ProfileHeader.DataColourSpaces, self._io.read_u4be())
self._debug['color_space']['end'] = self._io.pos()
self._debug['pcs']['start'] = self._io.pos()
self.pcs = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['pcs']['end'] = self._io.pos()
self._debug['creation_date_time']['start'] = self._io.pos()
self.creation_date_time = self._root.DateTimeNumber(self._io, self, self._root)
self.creation_date_time._read()
self._debug['creation_date_time']['end'] = self._io.pos()
self._debug['file_signature']['start'] = self._io.pos()
self.file_signature = self._io.ensure_fixed_contents(b"\x61\x63\x73\x70")
self._debug['file_signature']['end'] = self._io.pos()
self._debug['primary_platform']['start'] = self._io.pos()
self.primary_platform = KaitaiStream.resolve_enum(self._root.ProfileHeader.PrimaryPlatforms, self._io.read_u4be())
self._debug['primary_platform']['end'] = self._io.pos()
self._debug['profile_flags']['start'] = self._io.pos()
self.profile_flags = self._root.ProfileHeader.ProfileFlags(self._io, self, self._root)
self.profile_flags._read()
self._debug['profile_flags']['end'] = self._io.pos()
self._debug['device_manufacturer']['start'] = self._io.pos()
self.device_manufacturer = self._root.DeviceManufacturer(self._io, self, self._root)
self.device_manufacturer._read()
self._debug['device_manufacturer']['end'] = self._io.pos()
self._debug['device_model']['start'] = self._io.pos()
self.device_model = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['device_model']['end'] = self._io.pos()
self._debug['device_attributes']['start'] = self._io.pos()
self.device_attributes = self._root.DeviceAttributes(self._io, self, self._root)
self.device_attributes._read()
self._debug['device_attributes']['end'] = self._io.pos()
self._debug['rendering_intent']['start'] = self._io.pos()
self.rendering_intent = KaitaiStream.resolve_enum(self._root.ProfileHeader.RenderingIntents, self._io.read_u4be())
self._debug['rendering_intent']['end'] = self._io.pos()
self._debug['nciexyz_values_of_illuminant_of_pcs']['start'] = self._io.pos()
self.nciexyz_values_of_illuminant_of_pcs = self._root.XyzNumber(self._io, self, self._root)
self.nciexyz_values_of_illuminant_of_pcs._read()
self._debug['nciexyz_values_of_illuminant_of_pcs']['end'] = self._io.pos()
self._debug['creator']['start'] = self._io.pos()
self.creator = self._root.DeviceManufacturer(self._io, self, self._root)
self.creator._read()
self._debug['creator']['end'] = self._io.pos()
self._debug['identifier']['start'] = self._io.pos()
self.identifier = self._io.read_bytes(16)
self._debug['identifier']['end'] = self._io.pos()
self._debug['reserved_data']['start'] = self._io.pos()
self.reserved_data = self._io.read_bytes(28)
self._debug['reserved_data']['end'] = self._io.pos()
class VersionField(KaitaiStruct):
SEQ_FIELDS = ["major", "minor", "bug_fix_level", "reserved"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['major']['start'] = self._io.pos()
self.major = self._io.ensure_fixed_contents(b"\x04")
self._debug['major']['end'] = self._io.pos()
self._debug['minor']['start'] = self._io.pos()
self.minor = self._io.read_bits_int(4)
self._debug['minor']['end'] = self._io.pos()
self._debug['bug_fix_level']['start'] = self._io.pos()
self.bug_fix_level = self._io.read_bits_int(4)
self._debug['bug_fix_level']['end'] = self._io.pos()
self._io.align_to_byte()
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
class ProfileFlags(KaitaiStruct):
SEQ_FIELDS = ["embedded_profile", "profile_can_be_used_independently_of_embedded_colour_data", "other_flags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['embedded_profile']['start'] = self._io.pos()
self.embedded_profile = self._io.read_bits_int(1) != 0
self._debug['embedded_profile']['end'] = self._io.pos()
self._debug['profile_can_be_used_independently_of_embedded_colour_data']['start'] = self._io.pos()
self.profile_can_be_used_independently_of_embedded_colour_data = self._io.read_bits_int(1) != 0
self._debug['profile_can_be_used_independently_of_embedded_colour_data']['end'] = self._io.pos()
self._debug['other_flags']['start'] = self._io.pos()
self.other_flags = self._io.read_bits_int(30)
self._debug['other_flags']['end'] = self._io.pos()
class XyzNumber(KaitaiStruct):
SEQ_FIELDS = ["x", "y", "z"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x']['start'] = self._io.pos()
self.x = self._io.read_bytes(4)
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._io.read_bytes(4)
self._debug['y']['end'] = self._io.pos()
self._debug['z']['start'] = self._io.pos()
self.z = self._io.read_bytes(4)
self._debug['z']['end'] = self._io.pos()
class DateTimeNumber(KaitaiStruct):
SEQ_FIELDS = ["year", "month", "day", "hour", "minute", "second"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['year']['start'] = self._io.pos()
self.year = self._io.read_u2be()
self._debug['year']['end'] = self._io.pos()
self._debug['month']['start'] = self._io.pos()
self.month = self._io.read_u2be()
self._debug['month']['end'] = self._io.pos()
self._debug['day']['start'] = self._io.pos()
self.day = self._io.read_u2be()
self._debug['day']['end'] = self._io.pos()
self._debug['hour']['start'] = self._io.pos()
self.hour = self._io.read_u2be()
self._debug['hour']['end'] = self._io.pos()
self._debug['minute']['start'] = self._io.pos()
self.minute = self._io.read_u2be()
self._debug['minute']['end'] = self._io.pos()
self._debug['second']['start'] = self._io.pos()
self.second = self._io.read_u2be()
self._debug['second']['end'] = self._io.pos()
class Response16Number(KaitaiStruct):
SEQ_FIELDS = ["number", "reserved", "measurement_value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_u4be()
self._debug['number']['end'] = self._io.pos()
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['measurement_value']['start'] = self._io.pos()
self.measurement_value = self._root.S15Fixed16Number(self._io, self, self._root)
self.measurement_value._read()
self._debug['measurement_value']['end'] = self._io.pos()
class U1Fixed15Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(2)
self._debug['number']['end'] = self._io.pos()
class TagTable(KaitaiStruct):
SEQ_FIELDS = ["tag_count", "tags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_count']['start'] = self._io.pos()
self.tag_count = self._io.read_u4be()
self._debug['tag_count']['end'] = self._io.pos()
self._debug['tags']['start'] = self._io.pos()
self.tags = [None] * (self.tag_count)
for i in range(self.tag_count):
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = self._root.TagTable.TagDefinition(self._io, self, self._root)
_t_tags._read()
self.tags[i] = _t_tags
self._debug['tags']['arr'][i]['end'] = self._io.pos()
self._debug['tags']['end'] = self._io.pos()
class TagDefinition(KaitaiStruct):
class TagSignatures(Enum):
a_to_b_0 = 1093812784
a_to_b_1 = 1093812785
a_to_b_2 = 1093812786
b_to_a_0 = 1110589744
b_to_a_1 = 1110589745
b_to_a_2 = 1110589746
b_to_d_0 = 1110590512
b_to_d_1 = 1110590513
b_to_d_2 = 1110590514
b_to_d_3 = 1110590515
d_to_b_0 = 1144144432
d_to_b_1 = 1144144433
d_to_b_2 = 1144144434
d_to_b_3 = 1144144435
blue_trc = 1649693251
blue_matrix_column = 1649957210
calibration_date_time = 1667329140
chromatic_adaptation = 1667785060
chromaticity = 1667789421
colorimetric_intent_image_state = 1667852659
colorant_table_out = 1668050804
colorant_order = 1668051567
colorant_table = 1668051572
copyright = 1668313716
profile_description = 1684370275
device_model_desc = 1684890724
device_mfg_desc = 1684893284
green_trc = 1733579331
green_matrix_column = 1733843290
gamut = 1734438260
gray_trc = 1800688195
luminance = 1819635049
measurement = 1835360627
named_color_2 = 1852009522
preview_0 = 1886545200
preview_1 = 1886545201
preview_2 = 1886545202
profile_sequence = 1886610801
profile_sequence_identifier = 1886611812
red_trc = 1918128707
red_matrix_column = 1918392666
output_response = 1919251312
perceptual_rendering_intent_gamut = 1919510320
saturation_rendering_intent_gamut = 1919510322
char_target = 1952543335
technology = 1952801640
viewing_conditions = 1986618743
viewing_cond_desc = 1987405156
media_white_point = 2004119668
class TagTypeSignatures(Enum):
xyz_type = 1482250784
colorant_table_type = 1668051572
curve_type = 1668641398
data_type = 1684108385
date_time_type = 1685350765
multi_function_a_to_b_table_type = 1832993312
multi_function_b_to_a_table_type = 1833058592
measurement_type = 1835360627
multi_function_table_with_one_byte_precision_type = 1835430961
multi_function_table_with_two_byte_precision_type = 1835430962
multi_localized_unicode_type = 1835824483
multi_process_elements_type = 1836082548
named_color_2_type = 1852009522
parametric_curve_type = 1885434465
profile_sequence_desc_type = 1886610801
profile_sequence_identifier_type = 1886611812
response_curve_set_16_type = 1919120178
s_15_fixed_16_array_type = 1936077618
signature_type = 1936287520
text_type = 1952807028
u_16_fixed_16_array_type = 1969632050
u_int_8_array_type = 1969827896
u_int_16_array_type = 1969828150
u_int_32_array_type = 1969828658
u_int_64_array_type = 1969829428
viewing_conditions_type = 1986618743
class MultiProcessElementsTypes(Enum):
bacs_element_type = 1648444243
clut_element_type = 1668052340
one_dimensional_curves_type = 1668641382
eacs_element_type = 1698775891
matrix_element_type = 1835103334
curve_set_element_table_type = 1835428980
formula_curve_segments_type = 1885434470
sampled_curve_segment_type = 1935764838
SEQ_FIELDS = ["tag_signature", "offset_to_data_element", "size_of_data_element"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_signature']['start'] = self._io.pos()
self.tag_signature = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagSignatures, self._io.read_u4be())
self._debug['tag_signature']['end'] = self._io.pos()
self._debug['offset_to_data_element']['start'] = self._io.pos()
self.offset_to_data_element = self._io.read_u4be()
self._debug['offset_to_data_element']['end'] = self._io.pos()
self._debug['size_of_data_element']['start'] = self._io.pos()
self.size_of_data_element = self._io.read_u4be()
self._debug['size_of_data_element']['end'] = self._io.pos()
class BlueMatrixColumnTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DeviceMfgDescTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class NamedColor2Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "vendor_specific_flag", "count_of_named_colours", "number_of_device_coordinates_for_each_named_colour", "prefix_for_each_colour_name", "prefix_for_each_colour_name_padding", "suffix_for_each_colour_name", "suffix_for_each_colour_name_padding", "named_colour_definitions"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['vendor_specific_flag']['start'] = self._io.pos()
self.vendor_specific_flag = self._io.read_u4be()
self._debug['vendor_specific_flag']['end'] = self._io.pos()
self._debug['count_of_named_colours']['start'] = self._io.pos()
self.count_of_named_colours = self._io.read_u4be()
self._debug['count_of_named_colours']['end'] = self._io.pos()
self._debug['number_of_device_coordinates_for_each_named_colour']['start'] = self._io.pos()
self.number_of_device_coordinates_for_each_named_colour = self._io.read_u4be()
self._debug['number_of_device_coordinates_for_each_named_colour']['end'] = self._io.pos()
self._debug['prefix_for_each_colour_name']['start'] = self._io.pos()
self.prefix_for_each_colour_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['prefix_for_each_colour_name']['end'] = self._io.pos()
self._debug['prefix_for_each_colour_name_padding']['start'] = self._io.pos()
self.prefix_for_each_colour_name_padding = [None] * ((32 - len(self.prefix_for_each_colour_name)))
for i in range((32 - len(self.prefix_for_each_colour_name))):
if not 'arr' in self._debug['prefix_for_each_colour_name_padding']:
self._debug['prefix_for_each_colour_name_padding']['arr'] = []
self._debug['prefix_for_each_colour_name_padding']['arr'].append({'start': self._io.pos()})
self.prefix_for_each_colour_name_padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['prefix_for_each_colour_name_padding']['arr'][i]['end'] = self._io.pos()
self._debug['prefix_for_each_colour_name_padding']['end'] = self._io.pos()
self._debug['suffix_for_each_colour_name']['start'] = self._io.pos()
self.suffix_for_each_colour_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['suffix_for_each_colour_name']['end'] = self._io.pos()
self._debug['suffix_for_each_colour_name_padding']['start'] = self._io.pos()
self.suffix_for_each_colour_name_padding = [None] * ((32 - len(self.suffix_for_each_colour_name)))
for i in range((32 - len(self.suffix_for_each_colour_name))):
if not 'arr' in self._debug['suffix_for_each_colour_name_padding']:
self._debug['suffix_for_each_colour_name_padding']['arr'] = []
self._debug['suffix_for_each_colour_name_padding']['arr'].append({'start': self._io.pos()})
self.suffix_for_each_colour_name_padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['suffix_for_each_colour_name_padding']['arr'][i]['end'] = self._io.pos()
self._debug['suffix_for_each_colour_name_padding']['end'] = self._io.pos()
self._debug['named_colour_definitions']['start'] = self._io.pos()
self.named_colour_definitions = [None] * (self.count_of_named_colours)
for i in range(self.count_of_named_colours):
if not 'arr' in self._debug['named_colour_definitions']:
self._debug['named_colour_definitions']['arr'] = []
self._debug['named_colour_definitions']['arr'].append({'start': self._io.pos()})
_t_named_colour_definitions = self._root.TagTable.TagDefinition.NamedColor2Type.NamedColourDefinition(self._io, self, self._root)
_t_named_colour_definitions._read()
self.named_colour_definitions[i] = _t_named_colour_definitions
self._debug['named_colour_definitions']['arr'][i]['end'] = self._io.pos()
self._debug['named_colour_definitions']['end'] = self._io.pos()
class NamedColourDefinition(KaitaiStruct):
SEQ_FIELDS = ["root_name", "root_name_padding", "pcs_coordinates", "device_coordinates"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['root_name']['start'] = self._io.pos()
self.root_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['root_name']['end'] = self._io.pos()
self._debug['root_name_padding']['start'] = self._io.pos()
self.root_name_padding = [None] * ((32 - len(self.root_name)))
for i in range((32 - len(self.root_name))):
if not 'arr' in self._debug['root_name_padding']:
self._debug['root_name_padding']['arr'] = []
self._debug['root_name_padding']['arr'].append({'start': self._io.pos()})
self.root_name_padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['root_name_padding']['arr'][i]['end'] = self._io.pos()
self._debug['root_name_padding']['end'] = self._io.pos()
self._debug['pcs_coordinates']['start'] = self._io.pos()
self.pcs_coordinates = self._io.read_bytes(6)
self._debug['pcs_coordinates']['end'] = self._io.pos()
if self._parent.number_of_device_coordinates_for_each_named_colour > 0:
self._debug['device_coordinates']['start'] = self._io.pos()
self.device_coordinates = [None] * (self._parent.count_of_named_colours)
for i in range(self._parent.count_of_named_colours):
if not 'arr' in self._debug['device_coordinates']:
self._debug['device_coordinates']['arr'] = []
self._debug['device_coordinates']['arr'].append({'start': self._io.pos()})
self.device_coordinates[i] = self._io.read_u2be()
self._debug['device_coordinates']['arr'][i]['end'] = self._io.pos()
self._debug['device_coordinates']['end'] = self._io.pos()
class ViewingConditionsTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.viewing_conditions_type:
self.tag_data = self._root.TagTable.TagDefinition.ViewingConditionsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BlueTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ResponseCurveSet16Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_channels", "count_of_measurement_types", "response_curve_structure_offsets", "response_curve_structures"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_channels']['start'] = self._io.pos()
self.number_of_channels = self._io.read_u2be()
self._debug['number_of_channels']['end'] = self._io.pos()
self._debug['count_of_measurement_types']['start'] = self._io.pos()
self.count_of_measurement_types = self._io.read_u2be()
self._debug['count_of_measurement_types']['end'] = self._io.pos()
self._debug['response_curve_structure_offsets']['start'] = self._io.pos()
self.response_curve_structure_offsets = [None] * (self.count_of_measurement_types)
for i in range(self.count_of_measurement_types):
if not 'arr' in self._debug['response_curve_structure_offsets']:
self._debug['response_curve_structure_offsets']['arr'] = []
self._debug['response_curve_structure_offsets']['arr'].append({'start': self._io.pos()})
self.response_curve_structure_offsets[i] = self._io.read_u4be()
self._debug['response_curve_structure_offsets']['arr'][i]['end'] = self._io.pos()
self._debug['response_curve_structure_offsets']['end'] = self._io.pos()
self._debug['response_curve_structures']['start'] = self._io.pos()
self.response_curve_structures = self._io.read_bytes_full()
self._debug['response_curve_structures']['end'] = self._io.pos()
class CurveType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_entries", "curve_values", "curve_value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_entries']['start'] = self._io.pos()
self.number_of_entries = self._io.read_u4be()
self._debug['number_of_entries']['end'] = self._io.pos()
if self.number_of_entries > 1:
self._debug['curve_values']['start'] = self._io.pos()
self.curve_values = [None] * (self.number_of_entries)
for i in range(self.number_of_entries):
if not 'arr' in self._debug['curve_values']:
self._debug['curve_values']['arr'] = []
self._debug['curve_values']['arr'].append({'start': self._io.pos()})
self.curve_values[i] = self._io.read_u4be()
self._debug['curve_values']['arr'][i]['end'] = self._io.pos()
self._debug['curve_values']['end'] = self._io.pos()
if self.number_of_entries == 1:
self._debug['curve_value']['start'] = self._io.pos()
self.curve_value = self._io.read_u1()
self._debug['curve_value']['end'] = self._io.pos()
class SaturationRenderingIntentGamutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class XyzType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
_t_values = self._root.XyzNumber(self._io, self, self._root)
_t_values._read()
self.values.append(_t_values)
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class Lut8Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "number_of_clut_grid_points", "padding", "encoded_e_parameters", "number_of_input_table_entries", "number_of_output_table_entries", "input_tables", "clut_values", "output_tables"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['number_of_clut_grid_points']['start'] = self._io.pos()
self.number_of_clut_grid_points = self._io.read_u1()
self._debug['number_of_clut_grid_points']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['encoded_e_parameters']['start'] = self._io.pos()
self.encoded_e_parameters = [None] * (9)
for i in range(9):
if not 'arr' in self._debug['encoded_e_parameters']:
self._debug['encoded_e_parameters']['arr'] = []
self._debug['encoded_e_parameters']['arr'].append({'start': self._io.pos()})
self.encoded_e_parameters[i] = self._io.read_s4be()
self._debug['encoded_e_parameters']['arr'][i]['end'] = self._io.pos()
self._debug['encoded_e_parameters']['end'] = self._io.pos()
self._debug['number_of_input_table_entries']['start'] = self._io.pos()
self.number_of_input_table_entries = self._io.read_u4be()
self._debug['number_of_input_table_entries']['end'] = self._io.pos()
self._debug['number_of_output_table_entries']['start'] = self._io.pos()
self.number_of_output_table_entries = self._io.read_u4be()
self._debug['number_of_output_table_entries']['end'] = self._io.pos()
self._debug['input_tables']['start'] = self._io.pos()
self.input_tables = self._io.read_bytes((256 * self.number_of_input_channels))
self._debug['input_tables']['end'] = self._io.pos()
self._debug['clut_values']['start'] = self._io.pos()
self.clut_values = self._io.read_bytes(((self.number_of_clut_grid_points ^ self.number_of_input_channels) * self.number_of_output_channels))
self._debug['clut_values']['end'] = self._io.pos()
self._debug['output_tables']['start'] = self._io.pos()
self.output_tables = self._io.read_bytes((256 * self.number_of_output_channels))
self._debug['output_tables']['end'] = self._io.pos()
class BToA2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class LutAToBType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "padding", "offset_to_first_b_curve", "offset_to_matrix", "offset_to_first_m_curve", "offset_to_clut", "offset_to_first_a_curve", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['offset_to_first_b_curve']['start'] = self._io.pos()
self.offset_to_first_b_curve = self._io.read_u4be()
self._debug['offset_to_first_b_curve']['end'] = self._io.pos()
self._debug['offset_to_matrix']['start'] = self._io.pos()
self.offset_to_matrix = self._io.read_u4be()
self._debug['offset_to_matrix']['end'] = self._io.pos()
self._debug['offset_to_first_m_curve']['start'] = self._io.pos()
self.offset_to_first_m_curve = self._io.read_u4be()
self._debug['offset_to_first_m_curve']['end'] = self._io.pos()
self._debug['offset_to_clut']['start'] = self._io.pos()
self.offset_to_clut = self._io.read_u4be()
self._debug['offset_to_clut']['end'] = self._io.pos()
self._debug['offset_to_first_a_curve']['start'] = self._io.pos()
self.offset_to_first_a_curve = self._io.read_u4be()
self._debug['offset_to_first_a_curve']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes_full()
self._debug['data']['end'] = self._io.pos()
class BToA0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MediaWhitePointTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Lut16Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "number_of_clut_grid_points", "padding", "encoded_e_parameters", "number_of_input_table_entries", "number_of_output_table_entries", "input_tables", "clut_values", "output_tables"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['number_of_clut_grid_points']['start'] = self._io.pos()
self.number_of_clut_grid_points = self._io.read_u1()
self._debug['number_of_clut_grid_points']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['encoded_e_parameters']['start'] = self._io.pos()
self.encoded_e_parameters = [None] * (9)
for i in range(9):
if not 'arr' in self._debug['encoded_e_parameters']:
self._debug['encoded_e_parameters']['arr'] = []
self._debug['encoded_e_parameters']['arr'].append({'start': self._io.pos()})
self.encoded_e_parameters[i] = self._io.read_s4be()
self._debug['encoded_e_parameters']['arr'][i]['end'] = self._io.pos()
self._debug['encoded_e_parameters']['end'] = self._io.pos()
self._debug['number_of_input_table_entries']['start'] = self._io.pos()
self.number_of_input_table_entries = self._io.read_u4be()
self._debug['number_of_input_table_entries']['end'] = self._io.pos()
self._debug['number_of_output_table_entries']['start'] = self._io.pos()
self.number_of_output_table_entries = self._io.read_u4be()
self._debug['number_of_output_table_entries']['end'] = self._io.pos()
self._debug['input_tables']['start'] = self._io.pos()
self.input_tables = self._io.read_bytes(((2 * self.number_of_input_channels) * self.number_of_input_table_entries))
self._debug['input_tables']['end'] = self._io.pos()
self._debug['clut_values']['start'] = self._io.pos()
self.clut_values = self._io.read_bytes(((2 * (self.number_of_clut_grid_points ^ self.number_of_input_channels)) * self.number_of_output_channels))
self._debug['clut_values']['end'] = self._io.pos()
self._debug['output_tables']['start'] = self._io.pos()
self.output_tables = self._io.read_bytes(((2 * self.number_of_output_channels) * self.number_of_output_table_entries))
self._debug['output_tables']['end'] = self._io.pos()
class PerceptualRenderingIntentGamutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class U16Fixed16ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
_t_values = self._root.U16Fixed16Number(self._io, self, self._root)
_t_values._read()
self.values.append(_t_values)
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class ColorantTableOutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.colorant_table_type:
self.tag_data = self._root.TagTable.TagDefinition.ColorantTableType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MeasurementTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.measurement_type:
self.tag_data = self._root.TagTable.TagDefinition.MeasurementType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ProfileSequenceTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.profile_sequence_desc_type:
self.tag_data = self._root.TagTable.TagDefinition.ProfileSequenceDescType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class TechnologyTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class AToB0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DToB0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class OutputResponseTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.response_curve_set_16_type:
self.tag_data = self._root.TagTable.TagDefinition.ResponseCurveSet16Type(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class GreenMatrixColumnTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ProfileDescriptionTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Preview1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class RedTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToD0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DToB1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToA1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ParametricCurveType(KaitaiStruct):
class ParametricCurveTypeFunctions(Enum):
y_equals_x_to_power_of_g = 0
cie_122_1996 = 1
iec_61966_3 = 2
iec_61966_2_1 = 3
y_equals_ob_ax_plus_b_cb_to_power_of_g_plus_c = 4
SEQ_FIELDS = ["reserved", "function_type", "reserved_2", "parameters"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['function_type']['start'] = self._io.pos()
self.function_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions, self._io.read_u2be())
self._debug['function_type']['end'] = self._io.pos()
self._debug['reserved_2']['start'] = self._io.pos()
self.reserved_2 = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['reserved_2']['end'] = self._io.pos()
self._debug['parameters']['start'] = self._io.pos()
_on = self.function_type
if _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.cie_122_1996:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsCie1221996(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.iec_61966_3:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsIec619663(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.iec_61966_2_1:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsIec6196621(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.y_equals_ob_ax_plus_b_cb_to_power_of_g_plus_c:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsYEqualsObAxPlusBCbToPowerOfGPlusC(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.y_equals_x_to_power_of_g:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsYEqualsXToPowerOfG(self._io, self, self._root)
self.parameters._read()
self._debug['parameters']['end'] = self._io.pos()
class ParamsIec619663(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b", "c"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
self._debug['c']['start'] = self._io.pos()
self.c = self._io.read_s4be()
self._debug['c']['end'] = self._io.pos()
class ParamsIec6196621(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b", "c", "d"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
self._debug['c']['start'] = self._io.pos()
self.c = self._io.read_s4be()
self._debug['c']['end'] = self._io.pos()
self._debug['d']['start'] = self._io.pos()
self.d = self._io.read_s4be()
self._debug['d']['end'] = self._io.pos()
class ParamsYEqualsXToPowerOfG(KaitaiStruct):
SEQ_FIELDS = ["g"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
class ParamsYEqualsObAxPlusBCbToPowerOfGPlusC(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b", "c", "d", "e", "f"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
self._debug['c']['start'] = self._io.pos()
self.c = self._io.read_s4be()
self._debug['c']['end'] = self._io.pos()
self._debug['d']['start'] = self._io.pos()
self.d = self._io.read_s4be()
self._debug['d']['end'] = self._io.pos()
self._debug['e']['start'] = self._io.pos()
self.e = self._io.read_s4be()
self._debug['e']['end'] = self._io.pos()
self._debug['f']['start'] = self._io.pos()
self.f = self._io.read_s4be()
self._debug['f']['end'] = self._io.pos()
class ParamsCie1221996(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
class ChromaticityTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.chromaticity_type:
self.tag_data = self._root.TagTable.TagDefinition.ChromaticityType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ChromaticAdaptationTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.s_15_fixed_16_array_type:
self.tag_data = self._root.TagTable.TagDefinition.S15Fixed16ArrayType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MeasurementType(KaitaiStruct):
class StandardObserverEncodings(Enum):
unknown = 0
cie_1931_standard_colorimetric_observer = 1
cie_1964_standard_colorimetric_observer = 2
class MeasurementGeometryEncodings(Enum):
unknown = 0
zero_degrees_to_45_degrees_or_45_degrees_to_zero_degrees = 1
zero_degrees_to_d_degrees_or_d_degrees_to_zero_degrees = 2
class MeasurementFlareEncodings(Enum):
zero_percent = 0
one_hundred_percent = 65536
SEQ_FIELDS = ["reserved", "standard_observer_encoding", "nciexyz_tristimulus_values_for_measurement_backing", "measurement_geometry_encoding", "measurement_flare_encoding", "standard_illuminant_encoding"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['standard_observer_encoding']['start'] = self._io.pos()
self.standard_observer_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.MeasurementType.StandardObserverEncodings, self._io.read_u4be())
self._debug['standard_observer_encoding']['end'] = self._io.pos()
self._debug['nciexyz_tristimulus_values_for_measurement_backing']['start'] = self._io.pos()
self.nciexyz_tristimulus_values_for_measurement_backing = self._root.XyzNumber(self._io, self, self._root)
self.nciexyz_tristimulus_values_for_measurement_backing._read()
self._debug['nciexyz_tristimulus_values_for_measurement_backing']['end'] = self._io.pos()
self._debug['measurement_geometry_encoding']['start'] = self._io.pos()
self.measurement_geometry_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.MeasurementType.MeasurementGeometryEncodings, self._io.read_u4be())
self._debug['measurement_geometry_encoding']['end'] = self._io.pos()
self._debug['measurement_flare_encoding']['start'] = self._io.pos()
self.measurement_flare_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.MeasurementType.MeasurementFlareEncodings, self._io.read_u4be())
self._debug['measurement_flare_encoding']['end'] = self._io.pos()
self._debug['standard_illuminant_encoding']['start'] = self._io.pos()
self.standard_illuminant_encoding = self._root.StandardIlluminantEncoding(self._io, self, self._root)
self.standard_illuminant_encoding._read()
self._debug['standard_illuminant_encoding']['end'] = self._io.pos()
class TextType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['value']['start'] = self._io.pos()
self.value = (KaitaiStream.bytes_terminate(self._io.read_bytes_full(), 0, False)).decode(u"ASCII")
self._debug['value']['end'] = self._io.pos()
class ProfileSequenceIdentifierType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_structures", "positions_table", "profile_identifiers"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_structures']['start'] = self._io.pos()
self.number_of_structures = self._io.read_u4be()
self._debug['number_of_structures']['end'] = self._io.pos()
self._debug['positions_table']['start'] = self._io.pos()
self.positions_table = [None] * (self.number_of_structures)
for i in range(self.number_of_structures):
if not 'arr' in self._debug['positions_table']:
self._debug['positions_table']['arr'] = []
self._debug['positions_table']['arr'].append({'start': self._io.pos()})
_t_positions_table = self._root.PositionNumber(self._io, self, self._root)
_t_positions_table._read()
self.positions_table[i] = _t_positions_table
self._debug['positions_table']['arr'][i]['end'] = self._io.pos()
self._debug['positions_table']['end'] = self._io.pos()
self._debug['profile_identifiers']['start'] = self._io.pos()
self.profile_identifiers = [None] * (self.number_of_structures)
for i in range(self.number_of_structures):
if not 'arr' in self._debug['profile_identifiers']:
self._debug['profile_identifiers']['arr'] = []
self._debug['profile_identifiers']['arr'].append({'start': self._io.pos()})
_t_profile_identifiers = self._root.TagTable.TagDefinition.ProfileSequenceIdentifierType.ProfileIdentifier(self._io, self, self._root)
_t_profile_identifiers._read()
self.profile_identifiers[i] = _t_profile_identifiers
self._debug['profile_identifiers']['arr'][i]['end'] = self._io.pos()
self._debug['profile_identifiers']['end'] = self._io.pos()
class ProfileIdentifier(KaitaiStruct):
SEQ_FIELDS = ["profile_id", "profile_description"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['profile_id']['start'] = self._io.pos()
self.profile_id = self._io.read_bytes(16)
self._debug['profile_id']['end'] = self._io.pos()
self._debug['profile_description']['start'] = self._io.pos()
self.profile_description = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.profile_description._read()
self._debug['profile_description']['end'] = self._io.pos()
class ColorantTableType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "count_of_colorants", "colorants"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['count_of_colorants']['start'] = self._io.pos()
self.count_of_colorants = self._io.read_u4be()
self._debug['count_of_colorants']['end'] = self._io.pos()
self._debug['colorants']['start'] = self._io.pos()
self.colorants = [None] * (self.count_of_colorants)
for i in range(self.count_of_colorants):
if not 'arr' in self._debug['colorants']:
self._debug['colorants']['arr'] = []
self._debug['colorants']['arr'].append({'start': self._io.pos()})
_t_colorants = self._root.TagTable.TagDefinition.ColorantTableType.Colorant(self._io, self, self._root)
_t_colorants._read()
self.colorants[i] = _t_colorants
self._debug['colorants']['arr'][i]['end'] = self._io.pos()
self._debug['colorants']['end'] = self._io.pos()
class Colorant(KaitaiStruct):
SEQ_FIELDS = ["name", "padding", "pcs_values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['name']['start'] = self._io.pos()
self.name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['name']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = [None] * ((32 - len(self.name)))
for i in range((32 - len(self.name))):
if not 'arr' in self._debug['padding']:
self._debug['padding']['arr'] = []
self._debug['padding']['arr'].append({'start': self._io.pos()})
self.padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['padding']['arr'][i]['end'] = self._io.pos()
self._debug['padding']['end'] = self._io.pos()
self._debug['pcs_values']['start'] = self._io.pos()
self.pcs_values = self._io.read_bytes(6)
self._debug['pcs_values']['end'] = self._io.pos()
class SignatureType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "signature"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['signature']['start'] = self._io.pos()
self.signature = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['signature']['end'] = self._io.pos()
class CopyrightTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Preview0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DateTimeType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "date_and_time"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['date_and_time']['start'] = self._io.pos()
self.date_and_time = self._root.DateTimeNumber(self._io, self, self._root)
self.date_and_time._read()
self._debug['date_and_time']['end'] = self._io.pos()
class DToB3Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Preview2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DeviceModelDescTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MultiProcessElementsType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "number_of_processing_elements", "process_element_positions_table", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u2be()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u2be()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['number_of_processing_elements']['start'] = self._io.pos()
self.number_of_processing_elements = self._io.read_u4be()
self._debug['number_of_processing_elements']['end'] = self._io.pos()
self._debug['process_element_positions_table']['start'] = self._io.pos()
self.process_element_positions_table = [None] * (self.number_of_processing_elements)
for i in range(self.number_of_processing_elements):
if not 'arr' in self._debug['process_element_positions_table']:
self._debug['process_element_positions_table']['arr'] = []
self._debug['process_element_positions_table']['arr'].append({'start': self._io.pos()})
_t_process_element_positions_table = self._root.PositionNumber(self._io, self, self._root)
_t_process_element_positions_table._read()
self.process_element_positions_table[i] = _t_process_element_positions_table
self._debug['process_element_positions_table']['arr'][i]['end'] = self._io.pos()
self._debug['process_element_positions_table']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes_full()
self._debug['data']['end'] = self._io.pos()
class UInt16ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u2be())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class ColorantOrderTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.colorant_order_type:
self.tag_data = self._root.TagTable.TagDefinition.ColorantOrderType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DataType(KaitaiStruct):
class DataTypes(Enum):
ascii_data = 0
binary_data = 1
SEQ_FIELDS = ["data_flag"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['data_flag']['start'] = self._io.pos()
self.data_flag = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.DataType.DataTypes, self._io.read_u4be())
self._debug['data_flag']['end'] = self._io.pos()
class ChromaticityType(KaitaiStruct):
class ColorantAndPhosphorEncodings(Enum):
unknown = 0
itu_r_bt_709_2 = 1
smpte_rp145 = 2
ebu_tech_3213_e = 3
p22 = 4
SEQ_FIELDS = ["reserved", "number_of_device_channels", "colorant_and_phosphor_encoding", "ciexy_coordinates_per_channel"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_device_channels']['start'] = self._io.pos()
self.number_of_device_channels = self._io.read_u2be()
self._debug['number_of_device_channels']['end'] = self._io.pos()
self._debug['colorant_and_phosphor_encoding']['start'] = self._io.pos()
self.colorant_and_phosphor_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.ChromaticityType.ColorantAndPhosphorEncodings, self._io.read_u2be())
self._debug['colorant_and_phosphor_encoding']['end'] = self._io.pos()
self._debug['ciexy_coordinates_per_channel']['start'] = self._io.pos()
self.ciexy_coordinates_per_channel = [None] * (self.number_of_device_channels)
for i in range(self.number_of_device_channels):
if not 'arr' in self._debug['ciexy_coordinates_per_channel']:
self._debug['ciexy_coordinates_per_channel']['arr'] = []
self._debug['ciexy_coordinates_per_channel']['arr'].append({'start': self._io.pos()})
_t_ciexy_coordinates_per_channel = self._root.TagTable.TagDefinition.ChromaticityType.CiexyCoordinateValues(self._io, self, self._root)
_t_ciexy_coordinates_per_channel._read()
self.ciexy_coordinates_per_channel[i] = _t_ciexy_coordinates_per_channel
self._debug['ciexy_coordinates_per_channel']['arr'][i]['end'] = self._io.pos()
self._debug['ciexy_coordinates_per_channel']['end'] = self._io.pos()
class CiexyCoordinateValues(KaitaiStruct):
SEQ_FIELDS = ["x_coordinate", "y_coordinate"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x_coordinate']['start'] = self._io.pos()
self.x_coordinate = self._io.read_u2be()
self._debug['x_coordinate']['end'] = self._io.pos()
self._debug['y_coordinate']['start'] = self._io.pos()
self.y_coordinate = self._io.read_u2be()
self._debug['y_coordinate']['end'] = self._io.pos()
class LuminanceTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class S15Fixed16ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
_t_values = self._root.S15Fixed16Number(self._io, self, self._root)
_t_values._read()
self.values.append(_t_values)
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class MultiLocalizedUnicodeType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_records", "record_size", "records"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_records']['start'] = self._io.pos()
self.number_of_records = self._io.read_u4be()
self._debug['number_of_records']['end'] = self._io.pos()
self._debug['record_size']['start'] = self._io.pos()
self.record_size = self._io.read_u4be()
self._debug['record_size']['end'] = self._io.pos()
self._debug['records']['start'] = self._io.pos()
self.records = [None] * (self.number_of_records)
for i in range(self.number_of_records):
if not 'arr' in self._debug['records']:
self._debug['records']['arr'] = []
self._debug['records']['arr'].append({'start': self._io.pos()})
_t_records = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType.Record(self._io, self, self._root)
_t_records._read()
self.records[i] = _t_records
self._debug['records']['arr'][i]['end'] = self._io.pos()
self._debug['records']['end'] = self._io.pos()
class Record(KaitaiStruct):
SEQ_FIELDS = ["language_code", "country_code", "string_length", "string_offset"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['language_code']['start'] = self._io.pos()
self.language_code = self._io.read_u2be()
self._debug['language_code']['end'] = self._io.pos()
self._debug['country_code']['start'] = self._io.pos()
self.country_code = self._io.read_u2be()
self._debug['country_code']['end'] = self._io.pos()
self._debug['string_length']['start'] = self._io.pos()
self.string_length = self._io.read_u4be()
self._debug['string_length']['end'] = self._io.pos()
self._debug['string_offset']['start'] = self._io.pos()
self.string_offset = self._io.read_u4be()
self._debug['string_offset']['end'] = self._io.pos()
@property
def string_data(self):
if hasattr(self, '_m_string_data'):
return self._m_string_data if hasattr(self, '_m_string_data') else None
_pos = self._io.pos()
self._io.seek(self.string_offset)
self._debug['_m_string_data']['start'] = self._io.pos()
self._m_string_data = (self._io.read_bytes(self.string_length)).decode(u"UTF-16BE")
self._debug['_m_string_data']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_string_data if hasattr(self, '_m_string_data') else None
class AToB2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class AToB1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ColorimetricIntentImageStateTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class CharTargetTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.text_type:
self.tag_data = self._root.TagTable.TagDefinition.TextType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ColorantTableTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.colorant_table_type:
self.tag_data = self._root.TagTable.TagDefinition.ColorantTableType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class CalibrationDateTimeTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.date_time_type:
self.tag_data = self._root.TagTable.TagDefinition.DateTimeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class NamedColor2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.named_color_2_type:
self.tag_data = self._root.TagTable.TagDefinition.NamedColor2Type(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ViewingCondDescTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToD3Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ProfileSequenceDescType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_description_structures", "profile_descriptions"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_description_structures']['start'] = self._io.pos()
self.number_of_description_structures = self._io.read_u4be()
self._debug['number_of_description_structures']['end'] = self._io.pos()
self._debug['profile_descriptions']['start'] = self._io.pos()
self.profile_descriptions = [None] * (self.number_of_description_structures)
for i in range(self.number_of_description_structures):
if not 'arr' in self._debug['profile_descriptions']:
self._debug['profile_descriptions']['arr'] = []
self._debug['profile_descriptions']['arr'].append({'start': self._io.pos()})
_t_profile_descriptions = self._root.TagTable.TagDefinition.ProfileSequenceDescType.ProfileDescription(self._io, self, self._root)
_t_profile_descriptions._read()
self.profile_descriptions[i] = _t_profile_descriptions
self._debug['profile_descriptions']['arr'][i]['end'] = self._io.pos()
self._debug['profile_descriptions']['end'] = self._io.pos()
class ProfileDescription(KaitaiStruct):
SEQ_FIELDS = ["device_manufacturer", "device_model", "device_attributes", "device_technology", "description_of_device_manufacturer", "description_of_device_model"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['device_manufacturer']['start'] = self._io.pos()
self.device_manufacturer = self._root.DeviceManufacturer(self._io, self, self._root)
self.device_manufacturer._read()
self._debug['device_manufacturer']['end'] = self._io.pos()
self._debug['device_model']['start'] = self._io.pos()
self.device_model = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['device_model']['end'] = self._io.pos()
self._debug['device_attributes']['start'] = self._io.pos()
self.device_attributes = self._root.DeviceAttributes(self._io, self, self._root)
self.device_attributes._read()
self._debug['device_attributes']['end'] = self._io.pos()
self._debug['device_technology']['start'] = self._io.pos()
self.device_technology = self._root.TagTable.TagDefinition.TechnologyTag(self._io, self, self._root)
self.device_technology._read()
self._debug['device_technology']['end'] = self._io.pos()
self._debug['description_of_device_manufacturer']['start'] = self._io.pos()
self.description_of_device_manufacturer = self._root.TagTable.TagDefinition.DeviceMfgDescTag(self._io, self, self._root)
self.description_of_device_manufacturer._read()
self._debug['description_of_device_manufacturer']['end'] = self._io.pos()
self._debug['description_of_device_model']['start'] = self._io.pos()
self.description_of_device_model = self._root.TagTable.TagDefinition.DeviceModelDescTag(self._io, self, self._root)
self.description_of_device_model._read()
self._debug['description_of_device_model']['end'] = self._io.pos()
class ProfileSequenceIdentifierTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.profile_sequence_identifier_type:
self.tag_data = self._root.TagTable.TagDefinition.ProfileSequenceIdentifierType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToD1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ColorantOrderType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "count_of_colorants", "numbers_of_colorants_in_order_of_printing"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['count_of_colorants']['start'] = self._io.pos()
self.count_of_colorants = self._io.read_u4be()
self._debug['count_of_colorants']['end'] = self._io.pos()
self._debug['numbers_of_colorants_in_order_of_printing']['start'] = self._io.pos()
self.numbers_of_colorants_in_order_of_printing = [None] * (self.count_of_colorants)
for i in range(self.count_of_colorants):
if not 'arr' in self._debug['numbers_of_colorants_in_order_of_printing']:
self._debug['numbers_of_colorants_in_order_of_printing']['arr'] = []
self._debug['numbers_of_colorants_in_order_of_printing']['arr'].append({'start': self._io.pos()})
self.numbers_of_colorants_in_order_of_printing[i] = self._io.read_u1()
self._debug['numbers_of_colorants_in_order_of_printing']['arr'][i]['end'] = self._io.pos()
self._debug['numbers_of_colorants_in_order_of_printing']['end'] = self._io.pos()
class DToB2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class GrayTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ViewingConditionsType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "un_normalized_ciexyz_values_for_illuminant", "un_normalized_ciexyz_values_for_surround", "illuminant_type"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['un_normalized_ciexyz_values_for_illuminant']['start'] = self._io.pos()
self.un_normalized_ciexyz_values_for_illuminant = self._root.XyzNumber(self._io, self, self._root)
self.un_normalized_ciexyz_values_for_illuminant._read()
self._debug['un_normalized_ciexyz_values_for_illuminant']['end'] = self._io.pos()
self._debug['un_normalized_ciexyz_values_for_surround']['start'] = self._io.pos()
self.un_normalized_ciexyz_values_for_surround = self._root.XyzNumber(self._io, self, self._root)
self.un_normalized_ciexyz_values_for_surround._read()
self._debug['un_normalized_ciexyz_values_for_surround']['end'] = self._io.pos()
self._debug['illuminant_type']['start'] = self._io.pos()
self.illuminant_type = self._root.StandardIlluminantEncoding(self._io, self, self._root)
self.illuminant_type._read()
self._debug['illuminant_type']['end'] = self._io.pos()
class LutBToAType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "padding", "offset_to_first_b_curve", "offset_to_matrix", "offset_to_first_m_curve", "offset_to_clut", "offset_to_first_a_curve", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['offset_to_first_b_curve']['start'] = self._io.pos()
self.offset_to_first_b_curve = self._io.read_u4be()
self._debug['offset_to_first_b_curve']['end'] = self._io.pos()
self._debug['offset_to_matrix']['start'] = self._io.pos()
self.offset_to_matrix = self._io.read_u4be()
self._debug['offset_to_matrix']['end'] = self._io.pos()
self._debug['offset_to_first_m_curve']['start'] = self._io.pos()
self.offset_to_first_m_curve = self._io.read_u4be()
self._debug['offset_to_first_m_curve']['end'] = self._io.pos()
self._debug['offset_to_clut']['start'] = self._io.pos()
self.offset_to_clut = self._io.read_u4be()
self._debug['offset_to_clut']['end'] = self._io.pos()
self._debug['offset_to_first_a_curve']['start'] = self._io.pos()
self.offset_to_first_a_curve = self._io.read_u4be()
self._debug['offset_to_first_a_curve']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes_full()
self._debug['data']['end'] = self._io.pos()
class GreenTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class UInt32ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u4be())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class GamutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class UInt8ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u1())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class RedMatrixColumnTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class UInt64ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u8be())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class BToD2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
@property
def tag_data_element(self):
if hasattr(self, '_m_tag_data_element'):
return self._m_tag_data_element if hasattr(self, '_m_tag_data_element') else None
_pos = self._io.pos()
self._io.seek(self.offset_to_data_element)
self._debug['_m_tag_data_element']['start'] = self._io.pos()
_on = self.tag_signature
if _on == self._root.TagTable.TagDefinition.TagSignatures.colorant_order:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorantOrderTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_a_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToA2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.media_white_point:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.MediaWhitePointTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_3:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD3Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.colorimetric_intent_image_state:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorimetricIntentImageStateTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.viewing_cond_desc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ViewingCondDescTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.preview_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.Preview1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.device_model_desc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DeviceModelDescTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.chromaticity:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ChromaticityTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.preview_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.Preview0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.saturation_rendering_intent_gamut:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.SaturationRenderingIntentGamutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_a_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToA0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.green_matrix_column:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GreenMatrixColumnTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.copyright:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.CopyrightTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.blue_matrix_column:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BlueMatrixColumnTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.chromatic_adaptation:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ChromaticAdaptationTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.a_to_b_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.AToB1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.output_response:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.OutputResponseTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.profile_sequence:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ProfileSequenceTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.char_target:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.CharTargetTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.red_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.RedTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.gamut:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GamutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.device_mfg_desc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DeviceMfgDescTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.measurement:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.MeasurementTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.green_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GreenTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_3:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB3Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.colorant_table:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorantTableTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.profile_description:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ProfileDescriptionTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.profile_sequence_identifier:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ProfileSequenceIdentifierTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.gray_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GrayTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.perceptual_rendering_intent_gamut:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.PerceptualRenderingIntentGamutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.blue_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BlueTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.a_to_b_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.AToB2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.calibration_date_time:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.CalibrationDateTimeTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.colorant_table_out:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorantTableOutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.red_matrix_column:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.RedMatrixColumnTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.preview_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.Preview2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.a_to_b_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.AToB0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.luminance:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.LuminanceTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.named_color_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.NamedColor2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_a_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToA1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.viewing_conditions:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ViewingConditionsTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.technology:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.TechnologyTag(io, self, self._root)
self._m_tag_data_element._read()
else:
self._m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
self._debug['_m_tag_data_element']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_tag_data_element if hasattr(self, '_m_tag_data_element') else None
class DeviceAttributes(KaitaiStruct):
class DeviceAttributesReflectiveOrTransparency(Enum):
reflective = 0
transparency = 1
class DeviceAttributesGlossyOrMatte(Enum):
glossy = 0
matte = 1
class DeviceAttributesPositiveOrNegativeMediaPolarity(Enum):
positive_media_polarity = 0
negative_media_polarity = 1
class DeviceAttributesColourOrBlackAndWhiteMedia(Enum):
colour_media = 0
black_and_white_media = 1
SEQ_FIELDS = ["reflective_or_transparency", "glossy_or_matte", "positive_or_negative_media_polarity", "colour_or_black_and_white_media", "reserved", "vendor_specific"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reflective_or_transparency']['start'] = self._io.pos()
self.reflective_or_transparency = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesReflectiveOrTransparency, self._io.read_bits_int(1))
self._debug['reflective_or_transparency']['end'] = self._io.pos()
self._debug['glossy_or_matte']['start'] = self._io.pos()
self.glossy_or_matte = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesGlossyOrMatte, self._io.read_bits_int(1))
self._debug['glossy_or_matte']['end'] = self._io.pos()
self._debug['positive_or_negative_media_polarity']['start'] = self._io.pos()
self.positive_or_negative_media_polarity = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesPositiveOrNegativeMediaPolarity, self._io.read_bits_int(1))
self._debug['positive_or_negative_media_polarity']['end'] = self._io.pos()
self._debug['colour_or_black_and_white_media']['start'] = self._io.pos()
self.colour_or_black_and_white_media = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesColourOrBlackAndWhiteMedia, self._io.read_bits_int(1))
self._debug['colour_or_black_and_white_media']['end'] = self._io.pos()
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.read_bits_int(28)
self._debug['reserved']['end'] = self._io.pos()
self._debug['vendor_specific']['start'] = self._io.pos()
self.vendor_specific = self._io.read_bits_int(32)
self._debug['vendor_specific']['end'] = self._io.pos()
class DeviceManufacturer(KaitaiStruct):
class DeviceManufacturers(Enum):
erdt_systems_gmbh_and_co_kg = 878981744
aamazing_technologies_inc = 1094798657
acer_peripherals = 1094927698
acolyte_color_research = 1094929492
actix_sytems_inc = 1094931529
adara_technology_inc = 1094992210
adobe_systems_incorporated = 1094992453
adi_systems_inc = 1094994208
agfa_graphics_nv = 1095190081
alps_electric_usa_inc = 1095519556
alps_electric_usa_inc_2 = 1095520339
alwan_color_expertise = 1095522126
amiable_technologies_inc = 1095586889
aoc_international_usa_ltd = 1095713568
apago = 1095778631
apple_computer_inc = 1095782476
ast = 1095980064
atandt_computer_systems = 1096033876
barbieri_electronic = 1111573836
barco_nv = 1112687439
breakpoint_pty_limited = 1112689488
brother_industries_ltd = 1112690516
bull = 1112886348
bus_computer_systems = 1112888096
c_itoh = 1127041364
intel_corporation = 1128353106
canon_inc_canon_development_americas_inc = 1128353359
carroll_touch = 1128354386
casio_computer_co_ltd = 1128354633
colorbus_pl = 1128420691
crossfield = 1128614944
crossfield_2 = 1128615032
cgs_publishing_technologies_international_gmbh = 1128747808
rochester_robotics = 1128811808
colour_imaging_group_london = 1128875852
citizen = 1128879177
candela_ltd = 1129066544
color_iq = 1129072977
chromaco_inc = 1129136975
chromix = 1129146712
colorgraphic_communications_corporation = 1129270351
compaq_computer_corporation = 1129270608
compeq_usa_focus_technology = 1129270640
conrac_display_products = 1129270866
cordata_technologies_inc = 1129271876
compaq_computer_corporation_2 = 1129337120
colorpro = 1129337423
cornerstone = 1129467424
ctx_international_inc = 1129601056
colorvision = 1129728339
fujitsu_laboratories_ltd = 1129792288
darius_technology_ltd = 1145131593
dataproducts = 1145132097
dry_creek_photo = 1145262112
digital_contents_resource_center_chung_ang_university = 1145262659
dell_computer_corporation = 1145392204
dainippon_ink_and_chemicals = 1145652000
diconix = 1145652047
digital = 1145653065
digital_light_and_color = 1145841219
doppelganger_llc = 1146113095
dainippon_screen = 1146298400
doosol = 1146310476
dupont = 1146441806
epson = 1162892111
esko_graphics = 1163086671
electronics_and_telecommunications_research_institute = 1163153993
everex_systems_inc = 1163281746
exactcode_gmbh = 1163411779
eizo_nanao_corporation = 1164540527
falco_data_products_inc = 1178684483
fuji_photo_film_coltd = 1179000864
fujifilm_electronic_imaging_ltd = 1179010377
fnord_software = 1179537988
fora_inc = 1179603521
forefront_technology_corporation = 1179603525
fujitsu = 1179658794
waytech_development_inc = 1179664672
fujitsu_2 = 1179994697
fuji_xerox_co_ltd = 1180180512
gcc_technologies_inc = 1195590432
global_graphics_software_limited = 1195856716
gretagmacbeth = 1196245536
gmg_gmbh_and_co_kg = 1196246816
goldstar_technology_inc = 1196379204
giantprint_pty_ltd = 1196446292
gretagmacbeth_2 = 1196707138
waytech_development_inc_2 = 1196835616
sony_corporation = 1196896843
hci = 1212369184
heidelberger_druckmaschinen_ag = 1212435744
hermes = 1212502605
hitachi_america_ltd = 1212765249
hewlett_packard = 1213210656
hitachi_ltd = 1213481760
hiti_digital_inc = 1214862441
ibm_corporation = 1229081888
scitex_corporation_ltd = 1229213268
hewlett_packard_2 = 1229275936
iiyama_north_america_inc = 1229543745
ikegami_electronics_inc = 1229669703
image_systems_corporation = 1229799751
ingram_micro_inc = 1229801760
intel_corporation_2 = 1229870147
intl = 1229870156
intra_electronics_usa_inc = 1229870162
iocomm_international_technology_corporation = 1229931343
infoprint_solutions_company = 1230000928
scitex_corporation_ltd_3 = 1230129491
ichikawa_soft_laboratory = 1230195744
itnl = 1230261836
ivm = 1230392608
iwatsu_electric_co_ltd = 1230455124
scitex_corporation_ltd_2 = 1231318644
inca_digital_printers_ltd = 1231971169
scitex_corporation_ltd_4 = 1232234867
jetsoft_development = 1246971476
jvc_information_products_co = 1247167264
scitex_corporation_ltd_6 = 1262572116
kfc_computek_components_corporation = 1262895904
klh_computers = 1263290400
konica_minolta_holdings_inc = 1263355972
konica_corporation = 1263420225
kodak = 1263486017
kyocera = 1264144195
scitex_corporation_ltd_7 = 1264677492
leica_camera_ag = 1279476039
leeds_colour = 1279476548
left_dakota = 1279541579
leading_technology_inc = 1279607108
lexmark_international_inc = 1279613005
link_computer_inc = 1279872587
linotronic = 1279872591
lite_on_inc = 1279874117
mag_computronic_usa_inc = 1296123715
mag_innovision_inc = 1296123721
mannesmann = 1296125518
micron_technology_inc = 1296646990
microtek = 1296646994
microvitec_inc = 1296646998
minolta = 1296649807
mitsubishi_electronics_america_inc = 1296651347
mitsuba_corporation = 1296651379
minolta_2 = 1296976980
modgraph_inc = 1297040455
monitronix_inc = 1297043017
monaco_systems_inc = 1297043027
morse_technology_inc = 1297044051
motive_systems = 1297044553
microsoft_corporation = 1297303124
mutoh_industries_ltd = 1297437775
mitsubishi_electric_corporation_kyoto_works = 1298756723
nanao_usa_corporation = 1312902721
nec_corporation = 1313162016
nexpress_solutions_llc = 1313167440
nissei_sangyo_america_ltd = 1313428307
nikon_corporation = 1313558350
oce_technologies_bv = 1329808672
ocecolor = 1329808707
oki = 1330333984
okidata = 1330334020
okidata_2 = 1330334032
olivetti = 1330399574
olympus_optical_co_ltd = 1330403661
onyx_graphics = 1330534744
optiquest = 1330664521
packard_bell = 1346454347
matsushita_electric_industrial_co_ltd = 1346457153
pantone_inc = 1346457172
packard_bell_2 = 1346522656
pfu_limited = 1346786592
philips_consumer_electronics_co = 1346914636
hoya_corporation_pentax_imaging_systems_division = 1347310680
phase_one_a_s = 1347382885
premier_computer_innovations = 1347568973
princeton_graphic_systems = 1347569998
princeton_publishing_labs = 1347570000
qlux = 1363957080
qms_inc = 1364022048
qpcard_ab = 1364214596
quadlaser = 1364541764
qume_corporation = 1364544837
radius_inc = 1380009033
integrated_color_solutions_inc_2 = 1380205688
roland_dg_corporation = 1380206368
redms_group_inc = 1380271181
relisys = 1380273225
rolf_gierling_multitools = 1380404563
ricoh_corporation = 1380533071
edmund_ronald = 1380863044
royal = 1380931905
ricoh_printing_systemsltd = 1380991776
royal_information_electronics_co_ltd = 1381256224
sampo_corporation_of_america = 1396788560
samsung_inc = 1396788563
jaime_santana_pomares = 1396788820
scitex_corporation_ltd_9 = 1396918612
dainippon_screen_3 = 1396920910
scitex_corporation_ltd_12 = 1396985888
samsung_electronics_coltd = 1397048096
seiko_instruments_usa_inc = 1397049675
seikosha = 1397049707
scanguycom = 1397183833
sharp_laboratories = 1397244242
international_color_consortium = 1397310275
sony_corporation_2 = 1397706329
spectracal = 1397769036
star = 1398030674
sampo_technology_corporation = 1398031136
scitex_corporation_ltd_10 = 1399023988
scitex_corporation_ltd_13 = 1399091232
sony_corporation_3 = 1399811705
talon_technology_corporation = 1413565519
tandy = 1413566020
tatung_co_of_america_inc = 1413567573
taxan_america_inc = 1413568577
tokyo_denshi_sekei_kk = 1413763872
teco_information_systems_inc = 1413825359
tegra = 1413826386
tektronix_inc = 1413827412
texas_instruments = 1414078496
typemaker_ltd = 1414351698
toshiba_corp = 1414484802
toshiba_inc = 1414484808
totoku_electric_co_ltd = 1414485067
triumph = 1414678869
toshiba_tec_corporation = 1414742612
ttx_computer_products_inc = 1414813728
tvm_professional_monitor_corporation = 1414941984
tw_casper_corporation = 1414996000
ulead_systems = 1431065432
unisys = 1431193939
utz_fehlau_and_sohn = 1431591494
varityper = 1447121481
viewsonic = 1447642455
visual_communication = 1447646028
wang = 1463897671
wilbur_imaging = 1464615506
ware_to_go = 1465141042
wyse_technology = 1465471813
xerox_corporation = 1480938072
x_rite = 1481787732
lavanyas_test_company = 1513173555
zoran_corporation = 1515340110
zebra_technologies_inc = 1516593778
basiccolor_gmbh = 1648968515
bergdesign_incorporated = 1650815591
integrated_color_solutions_inc = 1667594596
macdermid_colorspan_inc = 1668051824
dainippon_screen_2 = 1685266464
dupont_2 = 1685418094
fujifilm_electronic_imaging_ltd_2 = 1717986665
fluxdata_corporation = 1718383992
scitex_corporation_ltd_5 = 1769105779
scitex_corporation_ltd_8 = 1801548404
erdt_systems_gmbh_and_co_kg_2 = 1868706916
medigraph_gmbh = 1868720483
qubyx_sarl = 1903518329
scitex_corporation_ltd_11 = 1935894900
dainippon_screen_4 = 1935897198
scitex_corporation_ltd_14 = 1935962144
siwi_grafika_corporation = 1936291689
yxymaster_gmbh = 2037938541
SEQ_FIELDS = ["device_manufacturer"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['device_manufacturer']['start'] = self._io.pos()
self.device_manufacturer = KaitaiStream.resolve_enum(self._root.DeviceManufacturer.DeviceManufacturers, self._io.read_u4be())
self._debug['device_manufacturer']['end'] = self._io.pos()
class S15Fixed16Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(4)
self._debug['number']['end'] = self._io.pos()
class PositionNumber(KaitaiStruct):
SEQ_FIELDS = ["offset_to_data_element", "size_of_data_element"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['offset_to_data_element']['start'] = self._io.pos()
self.offset_to_data_element = self._io.read_u4be()
self._debug['offset_to_data_element']['end'] = self._io.pos()
self._debug['size_of_data_element']['start'] = self._io.pos()
self.size_of_data_element = self._io.read_u4be()
self._debug['size_of_data_element']['end'] = self._io.pos()
| 1.984375
| 2
|
votesystem/vote/form.py
|
majaeseong/votesystem
| 0
|
12775950
|
<reponame>majaeseong/votesystem<filename>votesystem/vote/form.py
from django import forms
from . import models
from datetime import datetime
class FormCandi(forms.ModelForm):
class Meta:
model = models.Candidate
fields=(
'name',
'area'
)
class DateInput(forms.DateInput):
input_type = 'date'
class FormPoll(forms.ModelForm):
class Meta:
model = models.Poll
# start_date = DateInput()
# end_date = DateInput()
fields=(
'area',
'start_date',
'end_date'
)
widgets = {
'start_date': DateInput(),
'end_date':DateInput(),
}
| 2.1875
| 2
|