content stringlengths 5 1.05M |
|---|
import os
import concurrent
import logging
import json
import random
import datetime
import asyncio
import aiohttp
from aiohttp import web
import aiohttp_session
import prometheus_client as pc
import pymysql
from prometheus_async.aio import time as prom_async_time
from prometheus_async.aio.web import server_stats
import google.oauth2.service_account
import google.api_core.exceptions
from hailtop.utils import (time_msecs, time_msecs_str, humanize_timedelta_msecs,
request_retry_transient_errors, run_if_changed,
retry_long_running, LoggingTimer)
from hailtop.batch_client.parse import parse_cpu_in_mcpu, parse_memory_in_bytes
from hailtop.config import get_deploy_config
from hailtop.tls import get_in_cluster_server_ssl_context, in_cluster_ssl_client_session
from hailtop.hail_logging import AccessLogger
from gear import (Database, setup_aiohttp_session,
rest_authenticated_users_only, web_authenticated_users_only,
web_authenticated_developers_only, check_csrf_token, transaction)
from web_common import (setup_aiohttp_jinja2, setup_common_static_routes,
render_template, set_message)
# import uvloop
from ..utils import (adjust_cores_for_memory_request, worker_memory_per_core_gb,
cost_from_msec_mcpu, adjust_cores_for_packability, coalesce)
from ..batch import batch_record_to_dict, job_record_to_dict
from ..log_store import LogStore
from ..database import CallError, check_call_procedure
from ..batch_configuration import (BATCH_PODS_NAMESPACE, BATCH_BUCKET_NAME,
DEFAULT_NAMESPACE, WORKER_LOGS_BUCKET_NAME)
from ..globals import HTTP_CLIENT_MAX_SIZE, BATCH_FORMAT_VERSION
from ..spec_writer import SpecWriter
from ..batch_format_version import BatchFormatVersion
from .validate import ValidationError, validate_batch, validate_jobs
# uvloop.install()
log = logging.getLogger('batch.front_end')
REQUEST_TIME = pc.Summary('batch_request_latency_seconds', 'Batch request latency in seconds', ['endpoint', 'verb'])
REQUEST_TIME_GET_JOBS = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches/batch_id/jobs', verb="GET")
REQUEST_TIME_GET_JOB = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches/batch_id/jobs/job_id', verb="GET")
REQUEST_TIME_GET_JOB_LOG = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches/batch_id/jobs/job_id/log', verb="GET")
REQUEST_TIME_GET_ATTEMPTS = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches/batch_id/jobs/job_id/attempts', verb="GET")
REQUEST_TIME_GET_BATCHES = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches', verb="GET")
REQUEST_TIME_POST_CREATE_JOBS = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches/batch_id/jobs/create', verb="POST")
REQUEST_TIME_POST_CREATE_BATCH = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches/create', verb='POST')
REQUEST_TIME_POST_GET_BATCH = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches/batch_id', verb='GET')
REQUEST_TIME_PATCH_CANCEL_BATCH = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches/batch_id/cancel', verb="PATCH")
REQUEST_TIME_PATCH_CLOSE_BATCH = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches/batch_id/close', verb="PATCH")
REQUEST_TIME_DELETE_BATCH = REQUEST_TIME.labels(endpoint='/api/v1alpha/batches/batch_id', verb="DELETE")
REQUEST_TIME_GET_BATCH_UI = REQUEST_TIME.labels(endpoint='/batches/batch_id', verb='GET')
REQUEST_TIME_POST_CANCEL_BATCH_UI = REQUEST_TIME.labels(endpoint='/batches/batch_id/cancel', verb='POST')
REQUEST_TIME_POST_DELETE_BATCH_UI = REQUEST_TIME.labels(endpoint='/batches/batch_id/delete', verb='POST')
REQUEST_TIME_GET_BATCHES_UI = REQUEST_TIME.labels(endpoint='/batches', verb='GET')
REQUEST_TIME_GET_JOB_UI = REQUEST_TIME.labels(endpoint='/batches/batch_id/jobs/job_id', verb="GET")
REQUEST_TIME_GET_BILLING_UI = REQUEST_TIME.labels(endpoint='/billing', verb="GET")
REQUEST_TIME_GET_BILLING_PROJECTS_UI = REQUEST_TIME.labels(endpoint='/billing_projects', verb="GET")
REQUEST_TIME_POST_BILLING_PROJECT_REMOVE_USER_UI = REQUEST_TIME.labels(endpoint='/billing_projects/billing_project/users/user/remove', verb="POST")
REQUEST_TIME_POST_BILLING_PROJECT_ADD_USER_UI = REQUEST_TIME.labels(endpoint='/billing_projects/billing_project/users/add', verb="POST")
REQUEST_TIME_POST_CREATE_BILLING_PROJECT_UI = REQUEST_TIME.labels(endpoint='/billing_projects/create', verb="POST")
routes = web.RouteTableDef()
deploy_config = get_deploy_config()
BATCH_JOB_DEFAULT_CPU = os.environ.get('HAIL_BATCH_JOB_DEFAULT_CPU', '1')
BATCH_JOB_DEFAULT_MEMORY = os.environ.get('HAIL_BATCH_JOB_DEFAULT_MEMORY', '3.75G')
@routes.get('/healthcheck')
async def get_healthcheck(request): # pylint: disable=W0613
return web.Response()
async def _query_batch_jobs(request, batch_id):
state_query_values = {
'pending': ['Pending'],
'ready': ['Ready'],
'running': ['Running'],
'live': ['Ready', 'Running'],
'cancelled': ['Cancelled'],
'error': ['Error'],
'failed': ['Failed'],
'bad': ['Error', 'Failed'],
'success': ['Success'],
'done': ['Cancelled', 'Error', 'Failed', 'Success']
}
db = request.app['db']
# batch has already been validated
where_conditions = [
'(jobs.batch_id = %s)'
]
where_args = [batch_id]
last_job_id = request.query.get('last_job_id')
if last_job_id is not None:
last_job_id = int(last_job_id)
where_conditions.append('(jobs.job_id > %s)')
where_args.append(last_job_id)
q = request.query.get('q', '')
terms = q.split()
for t in terms:
if t[0] == '!':
negate = True
t = t[1:]
else:
negate = False
if '=' in t:
k, v = t.split('=', 1)
condition = '''
(EXISTS (SELECT * FROM `job_attributes`
WHERE `job_attributes`.batch_id = jobs.batch_id AND
`job_attributes`.job_id = jobs.job_id AND
`job_attributes`.`key` = %s AND
`job_attributes`.`value` = %s))
'''
args = [k, v]
elif t.startswith('has:'):
k = t[4:]
condition = '''
(EXISTS (SELECT * FROM `job_attributes`
WHERE `job_attributes`.batch_id = jobs.batch_id AND
`job_attributes`.job_id = jobs.job_id AND
`job_attributes`.`key` = %s))
'''
args = [k]
elif t in state_query_values:
values = state_query_values[t]
condition = ' OR '.join([
'(jobs.state = %s)' for v in values])
condition = f'({condition})'
args = values
else:
session = await aiohttp_session.get_session(request)
set_message(session, f'Invalid search term: {t}.', 'error')
return ([], None)
if negate:
condition = f'(NOT {condition})'
where_conditions.append(condition)
where_args.extend(args)
sql = f'''
SELECT jobs.*, batches.format_version, job_attributes.value AS name, SUM(`usage` * rate) AS cost
FROM jobs
INNER JOIN batches ON jobs.batch_id = batches.id
LEFT JOIN job_attributes
ON jobs.batch_id = job_attributes.batch_id AND
jobs.job_id = job_attributes.job_id AND
job_attributes.`key` = 'name'
LEFT JOIN aggregated_job_resources
ON jobs.batch_id = aggregated_job_resources.batch_id AND
jobs.job_id = aggregated_job_resources.job_id
LEFT JOIN resources
ON aggregated_job_resources.resource = resources.resource
WHERE {' AND '.join(where_conditions)}
GROUP BY jobs.batch_id, jobs.job_id
ORDER BY jobs.batch_id, jobs.job_id ASC
LIMIT 50;
'''
sql_args = where_args
jobs = [job_record_to_dict(record, record['name'])
async for record
in db.select_and_fetchall(sql, sql_args)]
if len(jobs) == 50:
last_job_id = jobs[-1]['job_id']
else:
last_job_id = None
return (jobs, last_job_id)
@routes.get('/api/v1alpha/batches/{batch_id}/jobs')
@prom_async_time(REQUEST_TIME_GET_JOBS)
@rest_authenticated_users_only
async def get_jobs(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
db = request.app['db']
record = await db.select_and_fetchone(
'''
SELECT * FROM batches
WHERE user = %s AND id = %s AND NOT deleted;
''', (user, batch_id))
if not record:
raise web.HTTPNotFound()
jobs, last_job_id = await _query_batch_jobs(request, batch_id)
resp = {
'jobs': jobs
}
if last_job_id is not None:
resp['last_job_id'] = last_job_id
return web.json_response(resp)
async def _get_job_log_from_record(app, batch_id, job_id, record):
state = record['state']
ip_address = record['ip_address']
if state == 'Running':
async with aiohttp.ClientSession(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=60)) as session:
try:
url = (f'http://{ip_address}:5000'
f'/api/v1alpha/batches/{batch_id}/jobs/{job_id}/log')
resp = await request_retry_transient_errors(session, 'GET', url)
return await resp.json()
except aiohttp.ClientResponseError as e:
if e.status == 404:
return None
raise
if state in ('Error', 'Failed', 'Success'):
log_store = app['log_store']
batch_format_version = BatchFormatVersion(record['format_version'])
async def _read_log_from_gcs(task):
try:
data = await log_store.read_log_file(batch_format_version, batch_id, job_id, record['attempt_id'], task)
except google.api_core.exceptions.NotFound:
id = (batch_id, job_id)
log.exception(f'missing log file for {id}')
data = None
return task, data
spec = json.loads(record['spec'])
tasks = []
has_input_files = batch_format_version.get_spec_has_input_files(spec)
if has_input_files:
tasks.append('input')
tasks.append('main')
has_output_files = batch_format_version.get_spec_has_output_files(spec)
if has_output_files:
tasks.append('output')
return dict(await asyncio.gather(*[_read_log_from_gcs(task) for task in tasks]))
return None
async def _get_job_log(app, batch_id, job_id, user):
db = app['db']
record = await db.select_and_fetchone('''
SELECT jobs.state, jobs.spec, ip_address, format_version, jobs.attempt_id
FROM jobs
INNER JOIN batches
ON jobs.batch_id = batches.id
LEFT JOIN attempts
ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id AND jobs.attempt_id = attempts.attempt_id
LEFT JOIN instances
ON attempts.instance_name = instances.name
WHERE user = %s AND jobs.batch_id = %s AND NOT deleted AND jobs.job_id = %s;
''',
(user, batch_id, job_id))
if not record:
raise web.HTTPNotFound()
return await _get_job_log_from_record(app, batch_id, job_id, record)
async def _get_attributes(app, record):
db = app['db']
batch_id = record['batch_id']
job_id = record['job_id']
format_version = BatchFormatVersion(record['format_version'])
if not format_version.has_full_spec_in_gcs():
spec = json.loads(record['spec'])
return spec.get('attributes')
records = db.select_and_fetchall('''
SELECT `key`, `value`
FROM job_attributes
WHERE batch_id = %s AND job_id = %s;
''',
(batch_id, job_id))
return {record['key']: record['value'] async for record in records}
async def _get_full_job_spec(app, record):
db = app['db']
log_store = app['log_store']
batch_id = record['batch_id']
job_id = record['job_id']
format_version = BatchFormatVersion(record['format_version'])
if not format_version.has_full_spec_in_gcs():
return json.loads(record['spec'])
token, start_job_id = await SpecWriter.get_token_start_id(db, batch_id, job_id)
try:
spec = await log_store.read_spec_file(batch_id, token, start_job_id, job_id)
return json.loads(spec)
except google.api_core.exceptions.NotFound:
id = (batch_id, job_id)
log.exception(f'missing spec file for {id}')
return None
async def _get_full_job_status(app, record):
log_store = app['log_store']
batch_id = record['batch_id']
job_id = record['job_id']
attempt_id = record['attempt_id']
state = record['state']
format_version = BatchFormatVersion(record['format_version'])
if state in ('Pending', 'Ready', 'Cancelled'):
return
if state in ('Error', 'Failed', 'Success'):
if not format_version.has_full_status_in_gcs():
return json.loads(record['status'])
try:
status = await log_store.read_status_file(batch_id, job_id, attempt_id)
return json.loads(status)
except google.api_core.exceptions.NotFound:
id = (batch_id, job_id)
log.exception(f'missing status file for {id}')
return None
assert state == 'Running'
assert record['status'] is None
ip_address = record['ip_address']
async with aiohttp.ClientSession(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=60)) as session:
try:
url = (f'http://{ip_address}:5000'
f'/api/v1alpha/batches/{batch_id}/jobs/{job_id}/status')
resp = await request_retry_transient_errors(session, 'GET', url)
return await resp.json()
except aiohttp.ClientResponseError as e:
if e.status == 404:
return None
raise
@routes.get('/api/v1alpha/batches/{batch_id}/jobs/{job_id}/log')
@prom_async_time(REQUEST_TIME_GET_JOB_LOG)
@rest_authenticated_users_only
async def get_job_log(request, userdata): # pylint: disable=R1710
batch_id = int(request.match_info['batch_id'])
job_id = int(request.match_info['job_id'])
user = userdata['username']
job_log = await _get_job_log(request.app, batch_id, job_id, user)
return web.json_response(job_log)
async def _query_batches(request, user):
db = request.app['db']
where_conditions = ['user = %s', 'NOT deleted']
where_args = [user]
last_batch_id = request.query.get('last_batch_id')
if last_batch_id is not None:
last_batch_id = int(last_batch_id)
where_conditions.append('(id < %s)')
where_args.append(last_batch_id)
q = request.query.get('q', '')
terms = q.split()
for t in terms:
if t[0] == '!':
negate = True
t = t[1:]
else:
negate = False
if '=' in t:
k, v = t.split('=', 1)
condition = '''
(EXISTS (SELECT * FROM `batch_attributes`
WHERE `batch_attributes`.batch_id = id AND
`batch_attributes`.`key` = %s AND
`batch_attributes`.`value` = %s))
'''
args = [k, v]
elif t.startswith('has:'):
k = t[4:]
condition = '''
(EXISTS (SELECT * FROM `batch_attributes`
WHERE `batch_attributes`.batch_id = id AND
`batch_attributes`.`key` = %s))
'''
args = [k]
elif t == 'open':
condition = "(`state` = 'open')"
args = []
elif t == 'closed':
condition = "(`state` != 'open')"
args = []
elif t == 'complete':
condition = "(`state` = 'complete')"
args = []
elif t == 'running':
condition = "(`state` = 'running')"
args = []
elif t == 'cancelled':
condition = '(cancelled)'
args = []
elif t == 'failure':
condition = '(n_failed > 0)'
args = []
elif t == 'success':
# need complete because there might be no jobs
condition = "(`state` = 'complete' AND n_succeeded = n_jobs)"
args = []
else:
session = await aiohttp_session.get_session(request)
set_message(session, f'Invalid search term: {t}.', 'error')
return ([], None)
if negate:
condition = f'(NOT {condition})'
where_conditions.append(condition)
where_args.extend(args)
sql = f'''
SELECT batches.*, SUM(`usage` * rate) AS cost
FROM batches
LEFT JOIN aggregated_batch_resources
ON batches.id = aggregated_batch_resources.batch_id
LEFT JOIN resources
ON aggregated_batch_resources.resource = resources.resource
WHERE {' AND '.join(where_conditions)}
GROUP BY batches.id
ORDER BY batches.id DESC
LIMIT 50;
'''
sql_args = where_args
batches = [batch_record_to_dict(batch)
async for batch
in db.select_and_fetchall(sql, sql_args)]
if len(batches) == 50:
last_batch_id = batches[-1]['id']
else:
last_batch_id = None
return (batches, last_batch_id)
@routes.get('/api/v1alpha/batches')
@prom_async_time(REQUEST_TIME_GET_BATCHES)
@rest_authenticated_users_only
async def get_batches(request, userdata):
user = userdata['username']
batches, last_batch_id = await _query_batches(request, user)
body = {
'batches': batches
}
if last_batch_id is not None:
body['last_batch_id'] = last_batch_id
return web.json_response(body)
def check_service_account_permissions(user, sa):
if sa is None:
return
if user == 'ci':
if sa['name'] in ('ci-agent', 'admin'):
if DEFAULT_NAMESPACE == 'default': # real-ci needs access to all namespaces
return
if sa['namespace'] == BATCH_PODS_NAMESPACE:
return
if user == 'test':
if sa['name'] == 'test-batch-sa' and sa['namespace'] == BATCH_PODS_NAMESPACE:
return
raise web.HTTPBadRequest(reason=f'unauthorized service account {(sa["namespace"], sa["name"])} for user {user}')
@routes.post('/api/v1alpha/batches/{batch_id}/jobs/create')
@prom_async_time(REQUEST_TIME_POST_CREATE_JOBS)
@rest_authenticated_users_only
async def create_jobs(request, userdata):
app = request.app
db = app['db']
log_store = app['log_store']
worker_type = app['worker_type']
worker_cores = app['worker_cores']
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
# restrict to what's necessary; in particular, drop the session
# which is sensitive
userdata = {
'username': user,
'gsa_key_secret_name': userdata['gsa_key_secret_name'],
'tokens_secret_name': userdata['tokens_secret_name']
}
async with LoggingTimer(f'batch {batch_id} create jobs') as timer:
async with timer.step('fetch batch'):
record = await db.select_and_fetchone(
'''
SELECT `state`, format_version FROM batches
WHERE user = %s AND id = %s AND NOT deleted;
''',
(user, batch_id))
if not record:
raise web.HTTPNotFound()
if record['state'] != 'open':
raise web.HTTPBadRequest(reason=f'batch {batch_id} is not open')
batch_format_version = BatchFormatVersion(record['format_version'])
async with timer.step('get request json'):
job_specs = await request.json()
async with timer.step('validate job_specs'):
try:
validate_jobs(job_specs)
except ValidationError as e:
raise web.HTTPBadRequest(reason=e.reason)
async with timer.step('build db args'):
spec_writer = SpecWriter(log_store, batch_id)
jobs_args = []
job_parents_args = []
job_attributes_args = []
n_ready_jobs = 0
ready_cores_mcpu = 0
n_ready_cancellable_jobs = 0
ready_cancellable_cores_mcpu = 0
prev_job_idx = None
start_job_id = None
for spec in job_specs:
job_id = spec['job_id']
parent_ids = spec.pop('parent_ids', [])
always_run = spec.pop('always_run', False)
if batch_format_version.has_full_spec_in_gcs():
attributes = spec.pop('attributes', None)
else:
attributes = spec.get('attributes')
id = (batch_id, job_id)
if start_job_id is None:
start_job_id = job_id
if batch_format_version.has_full_spec_in_gcs() and prev_job_idx:
if job_id != prev_job_idx + 1:
raise web.HTTPBadRequest(
reason=f'noncontiguous job ids found in the spec: {prev_job_idx} -> {job_id}')
prev_job_idx = job_id
resources = spec.get('resources')
if not resources:
resources = {}
spec['resources'] = resources
if 'cpu' not in resources:
resources['cpu'] = BATCH_JOB_DEFAULT_CPU
if 'memory' not in resources:
resources['memory'] = BATCH_JOB_DEFAULT_MEMORY
req_cores_mcpu = parse_cpu_in_mcpu(resources['cpu'])
req_memory_bytes = parse_memory_in_bytes(resources['memory'])
if req_cores_mcpu == 0:
raise web.HTTPBadRequest(
reason=f'bad resource request for job {id}: '
f'cpu cannot be 0')
cores_mcpu = adjust_cores_for_memory_request(req_cores_mcpu, req_memory_bytes, worker_type)
cores_mcpu = adjust_cores_for_packability(cores_mcpu)
if cores_mcpu > worker_cores * 1000:
total_memory_available = worker_memory_per_core_gb(worker_type) * worker_cores
raise web.HTTPBadRequest(
reason=f'resource requests for job {id} are unsatisfiable: '
f'requested: cpu={resources["cpu"]}, memory={resources["memory"]} '
f'maximum: cpu={worker_cores}, memory={total_memory_available}G')
secrets = spec.get('secrets')
if not secrets:
secrets = []
if len(secrets) != 0 and user != 'ci':
secrets = [(secret["namespace"], secret["name"]) for secret in secrets]
raise web.HTTPBadRequest(reason=f'unauthorized secret {secrets} for user {user}')
for secret in secrets:
if user != 'ci':
raise web.HTTPBadRequest(reason=f'unauthorized secret {(secret["namespace"], secret["name"])}')
spec['secrets'] = secrets
secrets.append({
'namespace': BATCH_PODS_NAMESPACE,
'name': userdata['gsa_key_secret_name'],
'mount_path': '/gsa-key',
'mount_in_copy': True
})
sa = spec.get('service_account')
check_service_account_permissions(user, sa)
env = spec.get('env')
if not env:
env = []
spec['env'] = env
if len(parent_ids) == 0:
state = 'Ready'
n_ready_jobs += 1
ready_cores_mcpu += cores_mcpu
if not always_run:
n_ready_cancellable_jobs += 1
ready_cancellable_cores_mcpu += cores_mcpu
else:
state = 'Pending'
spec_writer.add(json.dumps(spec))
db_spec = batch_format_version.db_spec(spec)
jobs_args.append(
(batch_id, job_id, state, json.dumps(db_spec),
always_run, cores_mcpu, len(parent_ids)))
for parent_id in parent_ids:
job_parents_args.append(
(batch_id, job_id, parent_id))
if attributes:
for k, v in attributes.items():
job_attributes_args.append(
(batch_id, job_id, k, v))
if batch_format_version.has_full_spec_in_gcs():
async with timer.step('write spec to gcs'):
await spec_writer.write()
rand_token = random.randint(0, app['n_tokens'] - 1)
n_jobs = len(job_specs)
async with timer.step('insert jobs'):
@transaction(db)
async def insert(tx):
try:
await tx.execute_many('''
INSERT INTO jobs (batch_id, job_id, state, spec, always_run, cores_mcpu, n_pending_parents)
VALUES (%s, %s, %s, %s, %s, %s, %s);
''',
jobs_args)
except pymysql.err.IntegrityError as err:
# 1062 ER_DUP_ENTRY https://dev.mysql.com/doc/refman/5.7/en/server-error-reference.html#error_er_dup_entry
if err.args[0] == 1062:
log.info(f'bunch containing job {(batch_id, jobs_args[0][1])} already inserted ({err})')
return
raise
try:
await tx.execute_many('''
INSERT INTO `job_parents` (batch_id, job_id, parent_id)
VALUES (%s, %s, %s);
''',
job_parents_args)
except pymysql.err.IntegrityError as err:
# 1062 ER_DUP_ENTRY https://dev.mysql.com/doc/refman/5.7/en/server-error-reference.html#error_er_dup_entry
if err.args[0] == 1062:
raise web.HTTPBadRequest(
text=f'bunch contains job with duplicated parents ({err})')
raise
await tx.execute_many('''
INSERT INTO `job_attributes` (batch_id, job_id, `key`, `value`)
VALUES (%s, %s, %s, %s);
''',
job_attributes_args)
await tx.execute_update('''
INSERT INTO batches_staging (batch_id, token, n_jobs, n_ready_jobs, ready_cores_mcpu)
VALUES (%s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
n_jobs = n_jobs + %s,
n_ready_jobs = n_ready_jobs + %s,
ready_cores_mcpu = ready_cores_mcpu + %s;
''',
(batch_id, rand_token,
n_jobs, n_ready_jobs, ready_cores_mcpu,
n_jobs, n_ready_jobs, ready_cores_mcpu))
await tx.execute_update('''
INSERT INTO batch_cancellable_resources (batch_id, token, n_ready_cancellable_jobs, ready_cancellable_cores_mcpu)
VALUES (%s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
n_ready_cancellable_jobs = n_ready_cancellable_jobs + %s,
ready_cancellable_cores_mcpu = ready_cancellable_cores_mcpu + %s;
''',
(batch_id, rand_token,
n_ready_cancellable_jobs, ready_cancellable_cores_mcpu,
n_ready_cancellable_jobs, ready_cancellable_cores_mcpu))
if batch_format_version.has_full_spec_in_gcs():
await tx.execute_update('''
INSERT INTO batch_bunches (batch_id, token, start_job_id)
VALUES (%s, %s, %s);
''',
(batch_id, spec_writer.token, start_job_id))
try:
await insert() # pylint: disable=no-value-for-parameter
except aiohttp.web.HTTPException:
raise
except Exception as err:
raise ValueError(f'encountered exception while inserting a bunch'
f'jobs_args={json.dumps(jobs_args)}'
f'job_parents_args={json.dumps(job_parents_args)}') from err
return web.Response()
@routes.post('/api/v1alpha/batches/create')
@prom_async_time(REQUEST_TIME_POST_CREATE_BATCH)
@rest_authenticated_users_only
async def create_batch(request, userdata):
app = request.app
db = app['db']
batch_spec = await request.json()
try:
validate_batch(batch_spec)
except ValidationError as e:
raise web.HTTPBadRequest(reason=e.reason)
user = userdata['username']
# restrict to what's necessary; in particular, drop the session
# which is sensitive
userdata = {
'username': user,
'gsa_key_secret_name': userdata['gsa_key_secret_name'],
'tokens_secret_name': userdata['tokens_secret_name']
}
billing_project = batch_spec['billing_project']
token = batch_spec['token']
attributes = batch_spec.get('attributes')
@transaction(db)
async def insert(tx):
rows = tx.execute_and_fetchall(
'''
SELECT * FROM billing_project_users
WHERE billing_project = %s AND user = %s
LOCK IN SHARE MODE;
''',
(billing_project, user))
rows = [row async for row in rows]
if len(rows) != 1:
assert len(rows) == 0
raise web.HTTPForbidden(reason=f'unknown billing project {billing_project}')
maybe_batch = await tx.execute_and_fetchone(
'''
SELECT * FROM batches
WHERE token = %s AND user = %s FOR UPDATE;
''',
(token, user))
if maybe_batch is not None:
return maybe_batch['id']
now = time_msecs()
id = await tx.execute_insertone(
'''
INSERT INTO batches (userdata, user, billing_project, attributes, callback, n_jobs, time_created, token, state, format_version)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
''',
(json.dumps(userdata), user, billing_project, json.dumps(attributes),
batch_spec.get('callback'), batch_spec['n_jobs'],
now, token, 'open', BATCH_FORMAT_VERSION))
if attributes:
await tx.execute_many(
'''
INSERT INTO `batch_attributes` (batch_id, `key`, `value`)
VALUES (%s, %s, %s)
''',
[(id, k, v) for k, v in attributes.items()])
return id
id = await insert() # pylint: disable=no-value-for-parameter
return web.json_response({'id': id})
async def _get_batch(app, batch_id, user):
db = app['db']
record = await db.select_and_fetchone('''
SELECT batches.*, SUM(`usage` * rate) AS cost FROM batches
LEFT JOIN aggregated_batch_resources
ON batches.id = aggregated_batch_resources.batch_id
LEFT JOIN resources
ON aggregated_batch_resources.resource = resources.resource
WHERE user = %s AND id = %s AND NOT deleted
GROUP BY batches.id;
''', (user, batch_id))
if not record:
raise web.HTTPNotFound()
return batch_record_to_dict(record)
async def _cancel_batch(app, batch_id, user):
db = app['db']
record = await db.select_and_fetchone(
'''
SELECT `state` FROM batches
WHERE user = %s AND id = %s AND NOT deleted;
''',
(user, batch_id))
if not record:
raise web.HTTPNotFound()
if record['state'] == 'open':
raise web.HTTPBadRequest(reason=f'cannot cancel open batch {batch_id}')
await db.just_execute(
'CALL cancel_batch(%s);', (batch_id,))
app['cancel_batch_state_changed'].set()
return web.Response()
async def _delete_batch(app, batch_id, user):
db = app['db']
record = await db.select_and_fetchone(
'''
SELECT `state` FROM batches
WHERE user = %s AND id = %s AND NOT deleted;
''',
(user, batch_id))
if not record:
raise web.HTTPNotFound()
await db.just_execute(
'CALL cancel_batch(%s);', (batch_id,))
await db.execute_update(
'UPDATE batches SET deleted = 1 WHERE id = %s;', (batch_id,))
if record['state'] == 'running':
app['delete_batch_state_changed'].set()
@routes.get('/api/v1alpha/batches/{batch_id}')
@prom_async_time(REQUEST_TIME_POST_GET_BATCH)
@rest_authenticated_users_only
async def get_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
return web.json_response(await _get_batch(request.app, batch_id, user))
@routes.patch('/api/v1alpha/batches/{batch_id}/cancel')
@prom_async_time(REQUEST_TIME_PATCH_CANCEL_BATCH)
@rest_authenticated_users_only
async def cancel_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
await _cancel_batch(request.app, batch_id, user)
return web.Response()
@routes.patch('/api/v1alpha/batches/{batch_id}/close')
@prom_async_time(REQUEST_TIME_PATCH_CLOSE_BATCH)
@rest_authenticated_users_only
async def close_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
app = request.app
db = app['db']
record = await db.select_and_fetchone(
'''
SELECT 1 FROM batches
WHERE user = %s AND id = %s AND NOT deleted;
''',
(user, batch_id))
if not record:
raise web.HTTPNotFound()
try:
now = time_msecs()
await check_call_procedure(
db, 'CALL close_batch(%s, %s);', (batch_id, now))
except CallError as e:
# 2: wrong number of jobs
if e.rv['rc'] == 2:
expected_n_jobs = e.rv['expected_n_jobs']
actual_n_jobs = e.rv['actual_n_jobs']
raise web.HTTPBadRequest(
reason=f'wrong number of jobs: expected {expected_n_jobs}, actual {actual_n_jobs}')
raise
async with in_cluster_ssl_client_session(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=60)) as session:
await request_retry_transient_errors(
session, 'PATCH',
deploy_config.url('batch-driver', f'/api/v1alpha/batches/{user}/{batch_id}/close'),
headers=app['driver_headers'])
return web.Response()
@routes.delete('/api/v1alpha/batches/{batch_id}')
@prom_async_time(REQUEST_TIME_DELETE_BATCH)
@rest_authenticated_users_only
async def delete_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
await _delete_batch(request.app, batch_id, user)
return web.Response()
@routes.get('/batches/{batch_id}')
@prom_async_time(REQUEST_TIME_GET_BATCH_UI)
@web_authenticated_users_only()
async def ui_batch(request, userdata):
app = request.app
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
batch = await _get_batch(app, batch_id, user)
jobs, last_job_id = await _query_batch_jobs(request, batch_id)
for j in jobs:
j['duration'] = humanize_timedelta_msecs(j['duration'])
batch['jobs'] = jobs
page_context = {
'batch': batch,
'q': request.query.get('q'),
'last_job_id': last_job_id
}
return await render_template('batch', request, userdata, 'batch.html', page_context)
@routes.post('/batches/{batch_id}/cancel')
@prom_async_time(REQUEST_TIME_POST_CANCEL_BATCH_UI)
@check_csrf_token
@web_authenticated_users_only(redirect=False)
async def ui_cancel_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
await _cancel_batch(request.app, batch_id, user)
session = await aiohttp_session.get_session(request)
set_message(session, f'Batch {batch_id} cancelled.', 'info')
location = request.app.router['batches'].url_for()
raise web.HTTPFound(location=location)
@routes.post('/batches/{batch_id}/delete')
@prom_async_time(REQUEST_TIME_POST_DELETE_BATCH_UI)
@check_csrf_token
@web_authenticated_users_only(redirect=False)
async def ui_delete_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
await _delete_batch(request.app, batch_id, user)
session = await aiohttp_session.get_session(request)
set_message(session, f'Batch {batch_id} deleted.', 'info')
location = request.app.router['batches'].url_for()
raise web.HTTPFound(location=location)
@routes.get('/batches', name='batches')
@prom_async_time(REQUEST_TIME_GET_BATCHES_UI)
@web_authenticated_users_only()
async def ui_batches(request, userdata):
user = userdata['username']
batches, last_batch_id = await _query_batches(request, user)
page_context = {
'batches': batches,
'q': request.query.get('q'),
'last_batch_id': last_batch_id
}
return await render_template('batch', request, userdata, 'batches.html', page_context)
async def _get_job(app, batch_id, job_id, user):
db = app['db']
record = await db.select_and_fetchone('''
SELECT jobs.*, ip_address, format_version, SUM(`usage` * rate) AS cost
FROM jobs
INNER JOIN batches
ON jobs.batch_id = batches.id
LEFT JOIN attempts
ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id AND jobs.attempt_id = attempts.attempt_id
LEFT JOIN instances
ON attempts.instance_name = instances.name
LEFT JOIN aggregated_job_resources
ON jobs.batch_id = aggregated_job_resources.batch_id AND
jobs.job_id = aggregated_job_resources.job_id
LEFT JOIN resources
ON aggregated_job_resources.resource = resources.resource
WHERE user = %s AND jobs.batch_id = %s AND NOT deleted AND jobs.job_id = %s
GROUP BY jobs.batch_id, jobs.job_id;
''',
(user, batch_id, job_id))
if not record:
raise web.HTTPNotFound()
full_status, full_spec, attributes = await asyncio.gather(
_get_full_job_status(app, record),
_get_full_job_spec(app, record),
_get_attributes(app, record)
)
job = job_record_to_dict(record, attributes.get('name'))
job['status'] = full_status
job['spec'] = full_spec
if attributes:
job['attributes'] = attributes
return job
async def _get_attempts(app, batch_id, job_id, user):
db = app['db']
attempts = db.select_and_fetchall('''
SELECT attempts.*
FROM jobs
INNER JOIN batches ON jobs.batch_id = batches.id
LEFT JOIN attempts ON jobs.batch_id = attempts.batch_id and jobs.job_id = attempts.job_id
WHERE user = %s AND jobs.batch_id = %s AND NOT deleted AND jobs.job_id = %s;
''',
(user, batch_id, job_id))
attempts = [attempt async for attempt in attempts]
if len(attempts) == 0:
raise web.HTTPNotFound()
if len(attempts) == 1 and attempts[0]['attempt_id'] is None:
return None
for attempt in attempts:
start_time = attempt['start_time']
if start_time is not None:
attempt['start_time'] = time_msecs_str(start_time)
else:
del attempt['start_time']
end_time = attempt['end_time']
if end_time is not None:
attempt['end_time'] = time_msecs_str(end_time)
else:
del attempt['end_time']
if start_time is not None:
# elapsed time if attempt is still running
if end_time is None:
end_time = time_msecs()
duration_msecs = max(end_time - start_time, 0)
attempt['duration'] = humanize_timedelta_msecs(duration_msecs)
return attempts
@routes.get('/api/v1alpha/batches/{batch_id}/jobs/{job_id}/attempts')
@prom_async_time(REQUEST_TIME_GET_ATTEMPTS)
@rest_authenticated_users_only
async def get_attempts(request, userdata):
batch_id = int(request.match_info['batch_id'])
job_id = int(request.match_info['job_id'])
user = userdata['username']
attempts = await _get_attempts(request.app, batch_id, job_id, user)
return web.json_response(attempts)
@routes.get('/api/v1alpha/batches/{batch_id}/jobs/{job_id}')
@prom_async_time(REQUEST_TIME_GET_JOB)
@rest_authenticated_users_only
async def get_job(request, userdata):
batch_id = int(request.match_info['batch_id'])
job_id = int(request.match_info['job_id'])
user = userdata['username']
status = await _get_job(request.app, batch_id, job_id, user)
return web.json_response(status)
@routes.get('/batches/{batch_id}/jobs/{job_id}')
@prom_async_time(REQUEST_TIME_GET_JOB_UI)
@web_authenticated_users_only()
async def ui_get_job(request, userdata):
app = request.app
batch_id = int(request.match_info['batch_id'])
job_id = int(request.match_info['job_id'])
user = userdata['username']
job_status, attempts, job_log = await asyncio.gather(_get_job(app, batch_id, job_id, user),
_get_attempts(app, batch_id, job_id, user),
_get_job_log(app, batch_id, job_id, user))
page_context = {
'batch_id': batch_id,
'job_id': job_id,
'job_log': job_log,
'attempts': attempts,
'job_status': json.dumps(job_status, indent=2)
}
return await render_template('batch', request, userdata, 'job.html', page_context)
async def _query_billing(request):
db = request.app['db']
date_format = '%m/%d/%Y'
default_start = datetime.datetime.now().replace(day=1)
default_start = datetime.datetime.strftime(default_start, date_format)
default_end = datetime.datetime.now()
default_end = datetime.datetime.strftime(default_end, date_format)
async def parse_error(msg):
session = await aiohttp_session.get_session(request)
set_message(session, msg, 'error')
return ([], default_start, default_end)
start_query = request.query.get('start', default_start)
try:
start = datetime.datetime.strptime(start_query, date_format)
start = start.timestamp() * 1000
except ValueError:
return await parse_error(f"Invalid value for start '{start_query}'; must be in the format of MM/DD/YYYY.")
end_query = request.query.get('end', default_end)
try:
end = datetime.datetime.strptime(end_query, date_format)
end = (end + datetime.timedelta(days=1)).timestamp() * 1000
except ValueError:
return await parse_error(f"Invalid value for end '{end_query}'; must be in the format of MM/DD/YYYY.")
if start > end:
return await parse_error(f'Invalid search; start must be earlier than end.')
sql = f'''
SELECT
billing_project,
`user`,
CAST(SUM(IF(format_version < 3, msec_mcpu, 0)) AS SIGNED) as msec_mcpu,
SUM(IF(format_version >= 3, `usage` * rate, NULL)) as cost
FROM batches
LEFT JOIN aggregated_batch_resources
ON aggregated_batch_resources.batch_id = batches.id
LEFT JOIN resources
ON resources.resource = aggregated_batch_resources.resource
WHERE `time_completed` >= %s AND `time_completed` <= %s
GROUP BY billing_project, `user`;
'''
sql_args = (start, end)
def billing_record_to_dict(record):
cost_msec_mcpu = cost_from_msec_mcpu(record['msec_mcpu'])
cost_resources = record['cost']
record['cost'] = coalesce(cost_msec_mcpu, 0) + coalesce(cost_resources, 0)
del record['msec_mcpu']
return record
billing = [billing_record_to_dict(record)
async for record
in db.select_and_fetchall(sql, sql_args)]
return (billing, start_query, end_query)
@routes.get('/billing')
@prom_async_time(REQUEST_TIME_GET_BILLING_UI)
@web_authenticated_developers_only()
async def ui_get_billing(request, userdata):
billing, start, end = await _query_billing(request)
billing_by_user = {}
billing_by_project = {}
for record in billing:
billing_project = record['billing_project']
user = record['user']
cost = record['cost']
billing_by_user[user] = billing_by_user.get(user, 0) + cost
billing_by_project[billing_project] = billing_by_project.get(billing_project, 0) + cost
billing_by_project = [{'billing_project': billing_project, 'cost': f'${cost:.4f}'} for billing_project, cost in billing_by_project.items()]
billing_by_project.sort(key=lambda record: record['billing_project'])
billing_by_user = [{'user': user, 'cost': f'${cost:.4f}'} for user, cost in billing_by_user.items()]
billing_by_user.sort(key=lambda record: record['user'])
billing_by_project_user = [{'billing_project': record['billing_project'], 'user': record['user'], 'cost': f'${record["cost"]:.4f}'}
for record in billing]
billing_by_project_user.sort(key=lambda record: (record['billing_project'], record['user']))
page_context = {
'billing_by_project': billing_by_project,
'billing_by_user': billing_by_user,
'billing_by_project_user': billing_by_project_user,
'start': start,
'end': end
}
return await render_template('batch', request, userdata, 'billing.html', page_context)
@routes.get('/billing_projects')
@prom_async_time(REQUEST_TIME_GET_BILLING_PROJECTS_UI)
@web_authenticated_developers_only()
async def ui_get_billing_projects(request, userdata):
db = request.app['db']
@transaction(db, read_only=True)
async def select(tx):
billing_projects = {}
async for record in tx.execute_and_fetchall(
'SELECT * FROM billing_projects LOCK IN SHARE MODE;'):
name = record['name']
billing_projects[name] = []
async for record in tx.execute_and_fetchall(
'SELECT * FROM billing_project_users LOCK IN SHARE MODE;'):
billing_project = record['billing_project']
user = record['user']
billing_projects[billing_project].append(user)
return billing_projects
billing_projects = await select() # pylint: disable=no-value-for-parameter
page_context = {
'billing_projects': billing_projects
}
return await render_template('batch', request, userdata, 'billing_projects.html', page_context)
@routes.post('/billing_projects/{billing_project}/users/{user}/remove')
@prom_async_time(REQUEST_TIME_POST_BILLING_PROJECT_REMOVE_USER_UI)
@check_csrf_token
@web_authenticated_developers_only(redirect=False)
async def post_billing_projects_remove_user(request, userdata): # pylint: disable=unused-argument
db = request.app['db']
billing_project = request.match_info['billing_project']
user = request.match_info['user']
session = await aiohttp_session.get_session(request)
@transaction(db)
async def delete(tx):
row = await tx.execute_and_fetchone(
'''
SELECT billing_projects.name as billing_project, user
FROM billing_projects
LEFT JOIN (SELECT * FROM billing_project_users
WHERE billing_project = %s AND user = %s FOR UPDATE) AS t
ON billing_projects.name = t.billing_project
WHERE billing_projects.name = %s;
''',
(billing_project, user, billing_project))
if not row:
set_message(session, f'No such billing project {billing_project}.', 'error')
raise web.HTTPFound(deploy_config.external_url('batch', f'/billing_projects'))
assert row['billing_project'] == billing_project
if row['user'] is None:
set_message(session, f'User {user} is not member of billing project {billing_project}.', 'info')
raise web.HTTPFound(deploy_config.external_url('batch', f'/billing_projects'))
await tx.just_execute(
'''
DELETE FROM billing_project_users
WHERE billing_project = %s AND user = %s;
''',
(billing_project, user))
await delete() # pylint: disable=no-value-for-parameter
set_message(session, f'Removed user {user} from billing project {billing_project}.', 'info')
return web.HTTPFound(deploy_config.external_url('batch', f'/billing_projects'))
@routes.post('/billing_projects/{billing_project}/users/add')
@prom_async_time(REQUEST_TIME_POST_BILLING_PROJECT_ADD_USER_UI)
@check_csrf_token
@web_authenticated_developers_only(redirect=False)
async def post_billing_projects_add_user(request, userdata): # pylint: disable=unused-argument
db = request.app['db']
post = await request.post()
user = post['user']
billing_project = request.match_info['billing_project']
session = await aiohttp_session.get_session(request)
@transaction(db)
async def insert(tx):
row = await tx.execute_and_fetchone(
'''
SELECT billing_projects.name as billing_project, user
FROM billing_projects
LEFT JOIN (SELECT * FROM billing_project_users
WHERE billing_project = %s AND user = %s FOR UPDATE) AS t
ON billing_projects.name = t.billing_project
WHERE billing_projects.name = %s;
''',
(billing_project, user, billing_project))
if row is None:
set_message(session, f'No such billing project {billing_project}.', 'error')
raise web.HTTPFound(deploy_config.external_url('batch', f'/billing_projects'))
if row['user'] is not None:
set_message(session, f'User {user} is already member of billing project {billing_project}.', 'info')
raise web.HTTPFound(deploy_config.external_url('batch', f'/billing_projects'))
await tx.execute_insertone(
'''
INSERT INTO billing_project_users(billing_project, user)
VALUES (%s, %s);
''',
(billing_project, user))
await insert() # pylint: disable=no-value-for-parameter
set_message(session, f'Added user {user} to billing project {billing_project}.', 'info')
return web.HTTPFound(deploy_config.external_url('batch', f'/billing_projects'))
@routes.post('/billing_projects/create')
@prom_async_time(REQUEST_TIME_POST_CREATE_BILLING_PROJECT_UI)
@check_csrf_token
@web_authenticated_developers_only(redirect=False)
async def post_create_billing_projects(request, userdata): # pylint: disable=unused-argument
db = request.app['db']
post = await request.post()
billing_project = post['billing_project']
session = await aiohttp_session.get_session(request)
@transaction(db)
async def insert(tx):
row = await tx.execute_and_fetchone(
'''
SELECT 1 FROM billing_projects
WHERE name = %s
FOR UPDATE;
''',
(billing_project))
if row is not None:
set_message(session, f'Billing project {billing_project} already exists.', 'error')
raise web.HTTPFound(deploy_config.external_url('batch', f'/billing_projects'))
await tx.execute_insertone(
'''
INSERT INTO billing_projects(name)
VALUES (%s);
''',
(billing_project,))
await insert() # pylint: disable=no-value-for-parameter
set_message(session, f'Added billing project {billing_project}.', 'info')
return web.HTTPFound(deploy_config.external_url('batch', f'/billing_projects'))
@routes.get('')
@routes.get('/')
@web_authenticated_users_only()
async def index(request, userdata):
location = request.app.router['batches'].url_for()
raise web.HTTPFound(location=location)
async def cancel_batch_loop_body(app):
async with in_cluster_ssl_client_session(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=5)) as session:
await request_retry_transient_errors(
session, 'POST',
deploy_config.url('batch-driver', f'/api/v1alpha/batches/cancel'),
headers=app['driver_headers'])
should_wait = True
return should_wait
async def delete_batch_loop_body(app):
async with in_cluster_ssl_client_session(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=5)) as session:
await request_retry_transient_errors(
session, 'POST',
deploy_config.url('batch-driver', f'/api/v1alpha/batches/delete'),
headers=app['driver_headers'])
should_wait = True
return should_wait
async def on_startup(app):
pool = concurrent.futures.ThreadPoolExecutor()
app['blocking_pool'] = pool
db = Database()
await db.async_init()
app['db'] = db
row = await db.select_and_fetchone(
'''
SELECT worker_type, worker_cores, worker_disk_size_gb,
instance_id, internal_token, n_tokens FROM globals;
''')
app['worker_type'] = row['worker_type']
app['worker_cores'] = row['worker_cores']
app['worker_disk_size_gb'] = row['worker_disk_size_gb']
app['n_tokens'] = row['n_tokens']
instance_id = row['instance_id']
log.info(f'instance_id {instance_id}')
app['instance_id'] = instance_id
app['driver_headers'] = {
'Authorization': f'Bearer {row["internal_token"]}'
}
credentials = google.oauth2.service_account.Credentials.from_service_account_file(
'/gsa-key/key.json')
app['log_store'] = LogStore(BATCH_BUCKET_NAME, WORKER_LOGS_BUCKET_NAME, instance_id, pool, credentials=credentials)
cancel_batch_state_changed = asyncio.Event()
app['cancel_batch_state_changed'] = cancel_batch_state_changed
asyncio.ensure_future(retry_long_running(
'cancel_batch_loop',
run_if_changed, cancel_batch_state_changed, cancel_batch_loop_body, app))
delete_batch_state_changed = asyncio.Event()
app['delete_batch_state_changed'] = delete_batch_state_changed
asyncio.ensure_future(retry_long_running(
'delete_batch_loop',
run_if_changed, delete_batch_state_changed, delete_batch_loop_body, app))
async def on_cleanup(app):
blocking_pool = app['blocking_pool']
blocking_pool.shutdown()
def run():
app = web.Application(client_max_size=HTTP_CLIENT_MAX_SIZE)
setup_aiohttp_session(app)
setup_aiohttp_jinja2(app, 'batch.front_end')
setup_common_static_routes(routes)
app.add_routes(routes)
app.router.add_get("/metrics", server_stats)
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
web.run_app(deploy_config.prefix_application(app,
'batch',
client_max_size=HTTP_CLIENT_MAX_SIZE),
host='0.0.0.0',
port=5000,
access_log_class=AccessLogger,
ssl_context=get_in_cluster_server_ssl_context())
|
import os
PIXEL = "\u2584"
def colored(red: int, green: int, blue: int, text: str) -> str:
return f"\033[38;2;{red};{green};{blue}m{text}"
def clear_console():
os.system("clear" if os.name == "posix" else "cls")
|
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from toolshare.models.tool import Tool
from toolshare.models.user import User
from toolshare.models.reservation import Reservation
from toolshare.views.base_controller import BaseController
from toolshare.forms.forms_tool import ToolRegistrationForm
from toolshare.forms.forms_tool import BorrowToolForm, ChangeToolForm,ChangeToolAvailabilityForm
import datetime
from datetime import date, timedelta as td
from django.contrib import messages
from django.utils import timezone
import pdb
import pytz
from toolshare.forms.forms_tool import ChangeAvailabilityForm
from toolshare.utils import EmailSender
# Create your views here.
class ToolController(BaseController):
PAGE_SIZE = 4
@staticmethod
@login_required
def register_tool(request):
user = User.objects.get(pk=request.user.id)
if request.method == 'POST':
pass
registration_form = ToolRegistrationForm(request.POST,
request.FILES)
if registration_form.is_valid():
# Generate the new-user from the Form
new_tool = registration_form.save(commit=False)
new_tool.owner = user
new_tool.status = 'A'
if new_tool.shed is not None:
new_tool.pickup_location = 'At Shed'
new_tool.save()
return redirect('/toolshare/list-owned-tools')
else:
# Show the registration-form
dummy_tool = Tool()
dummy_tool.pickup_location = user.pickup_location
registration_form = ToolRegistrationForm(instance=dummy_tool)
registration_form.fields['shed'].queryset = user.shed_set
return render(request, 'toolshare/tool-registration.html', {
'registration_form': registration_form
})
@staticmethod
@login_required
def find_tool(request):
user = User.objects.get(pk=request.user.id)
if request.method == 'GET':
search_for = request.GET.get('search_for')
to_date = request.GET.get('to_date')
from_date = request.GET.get('from_date')
#Match empty fields to make query work
if to_date == '':
to_date = from_date
if from_date == '':
from_date = to_date
#tools = Tool.objects.all()
tools = Tool.objects.all().exclude(status='D')
if search_for is not None:
tools = tools.filter(name__contains=search_for)
#Find all reservations during the specified dates
if (to_date is not None and to_date != '') and (from_date is not None and from_date != ''):
reservations = Reservation.objects.filter(start_date__lte=to_date,
end_date__gte=from_date,
status='A')
#filter them from the results
for r in reservations:
tools = tools.exclude(reservation=r)
# Only tools in the share-zone
tools_shared_zone = list()
for tool in tools:
owner = User.objects.get(pk=tool.owner.id)
if owner.share_zone == user.share_zone:
tools_shared_zone.append(tool)
tools = tools_shared_zone
tool_paginator = Paginator(tools, ToolController.PAGE_SIZE)
page = request.GET.get('page')
try:
tool_page = tool_paginator.page(page)
except PageNotAnInteger:
tool_page = tool_paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
tool_page = tool_paginator.page(tool_paginator.num_pages)
search_for = request.GET.get('search_for')
if search_for is None:
search_for = ''
if from_date is None:
from_date = ''
if to_date is None:
to_date = ''
return render(request, 'toolshare/find_tool.html', {
'tool_count': len(tools),
'tool_page': tool_page,
'search_for': search_for,
'to_date': to_date,
'from_date': from_date
})
return render(request, 'toolshare/find_tool.html')
@staticmethod
@login_required
def list_owned_tools(request):
if request.method == 'GET':
user = User.objects.get(pk=request.user.id)
user_tools = user.tool_set.all()
tool_paginator = Paginator(user_tools, ToolController.PAGE_SIZE)
page = request.GET.get('page')
try:
tool_page = tool_paginator.page(page)
except PageNotAnInteger:
tool_page = tool_paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
tool_page = tool_paginator.page(tool_paginator.num_pages)
return render(request, 'toolshare/list-owned-tools.html', {
'tool_page': tool_page,
'user_tools': user_tools
})
def tool_detail(request, tool_id):
if request.method == 'GET':
tool = Tool.objects.get(pk=tool_id)
return render(request, 'toolshare/tool-detail.html', {
'tool':tool
})
@staticmethod
@login_required
def change_tool_info(request, tool_id):
tool = Tool.objects.get(pk= tool_id)
if request.method == 'POST':
changeToolInfo = ChangeToolForm(request.POST, request.FILES)
if changeToolInfo.is_valid():
newtool = changeToolInfo.save(commit=False)
#tool.name = newtool.name
tool.description = newtool.description
tool.category = newtool.category
tool.status = newtool.status
tool.special_instructions = newtool.special_instructions
tool.pickup_location = newtool.pickup_location
if "my_picture" in request.FILES:
tool.picture = request.FILES['my_picture']
tool.save()
return redirect('/toolshare/tool-detail/%s'%tool_id)
else:
changeToolInfo = ChangeToolForm(instance=tool)
return render(request, 'toolshare/tool-info.html',{
'changeToolInfo' : changeToolInfo,
'tool':tool
})
@staticmethod
@login_required
def change_tool_availability(request, tool_id):
tool = Tool.objects.get(pk= tool_id)
changeToolAvail_form = ChangeToolAvailabilityForm(requested_tool_id=tool_id)
if request.method == 'POST':
Flag = False
sharedTool = Tool.objects.get(pk = tool_id)
startdate = request.POST['start_date']
startDate = datetime.datetime.strptime(startdate, "%m/%d/%Y").replace(tzinfo=pytz.UTC)
enddate = request.POST['end_date']
endDate = datetime.datetime.strptime(enddate, "%m/%d/%Y").replace(tzinfo=pytz.UTC)
# Get all the reservation list of the tool
tool_reservations = Reservation.objects.filter(tool_id = tool_id)
for reserve in tool_reservations:
if reserve.status == 'RP' and reserve.borrower != request.user:
if (startDate >= reserve.start_date and startDate <= reserve.end_date) or (startDate <= reserve.start_date and startDate <= reserve.end_date):
messages.add_message(request, messages.ERROR, 'You cannot change the tool availability.')
Flag = False
else:
Flag = True
else:
Flag = True
if Flag:
for reserve in tool_reservations:
if reserve.borrower != request.user:
if reserve.status == 'P' or reserve.status == 'A':
if (startDate >= reserve.start_date and startDate <= reserve.end_date) or (startDate <= reserve.start_date and startDate <= reserve.end_date):
reserve.cancel_msg = 'Your tool reservation has been canceled since the owner changed the availability.'
reserve.status = 'C'
reserve.save()
newReservation = Reservation()
newReservation.status = 'A'
newReservation.tool = sharedTool
newReservation.start_date = startDate
newReservation.end_date = endDate
newReservation.borrower = request.user
newReservation.lender = request.user
newReservation.save()
return redirect('/toolshare/change-tool-availability/%s'%tool_id)
else:
return render(request, 'toolshare/change-tool-availability.html', {'tool': tool,'changeToolAvail_form': changeToolAvail_form,'tool_id': tool_id,'current_date': datetime.datetime.now().strftime('%m/%d/%Y')})
@staticmethod
@login_required
def deactivate_tool(request, tool_id):
tool = Tool.objects.get(pk=tool_id)
user = User.objects.get(pk=request.user.id)
if tool.owner == user:
tool.status = 'D'
tool.save()
return redirect('/toolshare/tool-detail/%s'%tool_id)
#return render(request, 'toolshare/tool-detail.html',{
# 'tool':tool
#})
@staticmethod
@login_required
def change_availability_tool(request, tool_id):
tool = Tool.objects.get(pk= tool_id)
user = User.objects.get(pk = request.user.id)
if request.method == 'POST':
change_avail_form = ChangeAvailabilityForm(request.POST, requested_tool_id=tool_id)
if change_avail_form.is_valid():
new_reservation = change_avail_form.save(commit=False)
new_reservation.tool = tool
new_reservation.borrower = user
if tool.shed is not None:
new_reservation.lender = tool.shed.coordinator
else:
new_reservation.lender = user
new_reservation.status = 'A'
# Cancel all approved and pending reservations
reservations = Reservation.objects.filter(
tool_id=tool.id,
end_date__gt=new_reservation.start_date,
start_date__lt=new_reservation.end_date
)
for reservation in reservations:
if (new_reservation.start_date <= reservation.end_date and
new_reservation.end_date >= reservation.start_date):
if reservation.status == 'A':
reservation.status = 'C'
reservation.cancel_msg = 'The owner cancelled this reservation.'
EmailSender.send_cancel_request_email(reservation)
elif reservation.status == 'P':
reservation.status = 'R'
reservation.reject_msg = 'The owner rejected this reservation.'
EmailSender.send_approve_reject_request_email(reservation)
reservation.save()
# Save the owner-reservation
new_reservation.save()
# Display the success message
a_start = new_reservation.start_date.strftime('%m/%d/%Y')
a_end = new_reservation.end_date.strftime('%m/%d/%Y')
messages.add_message(request, messages.SUCCESS, 'You changed the availability of your tool between {0} - {1}. \n"{2}" reservations were cancelled.'.format(a_start, a_end, len(reservations)))
return redirect('/toolshare/tool-detail/%s' % tool_id)
else:
change_avail_form = ChangeAvailabilityForm(requested_tool_id = tool_id)
return render(request, 'toolshare/change-tool-availability.html', {
'tool': tool,
'changeToolAvail_form': change_avail_form,
'tool_id': tool_id,
'current_date': datetime.datetime.now().strftime('%m/%d/%Y')
})
@staticmethod
@login_required
def activate_deactivate_tool(request, tool_id):
tool = Tool.objects.get(pk=tool_id)
if tool.status != 'D':
now = timezone.now()
reservations = Reservation.objects.filter(tool_id = tool.id, status ="A", start_date__gt = now)
if len(reservations) > 0:
messages.add_message(request, messages.WARNING, 'There are pending/approved reservations for the tool. Cancel the reservations first.')
return redirect('/toolshare/tool-detail/%s'%tool_id)
tool.status = 'D'
tool.save()
return redirect('/toolshare/tool-detail/%s'%tool_id)
else:
tool.status = 'A'
tool.save()
return redirect('/toolshare/tool-detail/%s'%tool_id) |
from click.testing import CliRunner
import deepdanbooru as dd
from deepdanbooru.commands.add_images_to_project import parse_tags, read_tag_file, copy_image_to_project, add_images_to_project
import pytest
from pathlib import Path
import sqlite3
from unittest.mock import MagicMock, Mock, patch
def test_parse_tags():
# can parse one liners
data = parse_tags("""filepath\ttag1 tag2""")
assert data == [('filepath', 'tag1 tag2')]
# can parse multi liners
data = parse_tags("""
file1\ttag1 tag2
file2\ttag1 tag2
""")
assert data == [
('file1', 'tag1 tag2'),
('file2', 'tag1 tag2')
]
# skip lines that have no tab separator
data = parse_tags("""
file1\ttag1 tag2
file2
""")
assert data == [
('file1', 'tag1 tag2'),
]
# preserve empty tag name
data = parse_tags("""
file1\t
""")
assert data == [
('file1', ''),
]
def test_read_tag_file():
runner = CliRunner()
with runner.isolated_filesystem():
with open('./tags.txt', 'w') as f:
f.write("file1\ttag1 tag2\n")
assert read_tag_file('./tags.txt') == [('file1', 'tag1 tag2')]
def test_copy_image_to_project():
runner = CliRunner()
# can copy image to project directory
with runner.isolated_filesystem():
tags = """
file1.jpg\ttag1 tag2
"""
data = parse_tags(tags)
for ddt in data:
with open(ddt[0], 'w') as f:
f.write(ddt[1])
dst_path = Path(copy_image_to_project('project', data[0][0])).resolve()
images_path = Path('project/images').resolve()
assert dst_path.relative_to(images_path) == Path('eb/ebffef8cba3ea86f0149626a579a6b2e.jpg')
# skips image if already exists in project directory
with runner.isolated_filesystem():
tags = """
file1.jpg\ttag1 tag2
"""
data = parse_tags(tags)
for ddt in data:
with open(ddt[0], 'w') as f:
f.write(ddt[1])
images_path = Path('project/images').resolve()
already_exists = (images_path / 'eb/ebffef8cba3ea86f0149626a579a6b2e.jpg')
already_exists.parent.mkdir(exist_ok=True, parents=True)
already_exists.touch()
assert copy_image_to_project('project', data[0][0]) == None
def test_add_images_to_project():
runner = CliRunner()
with runner.isolated_filesystem():
tags = """
file1.jpg\ttag1 tag2
"""
data = parse_tags(tags)
for ddt in data:
with open(ddt[0], 'w') as f:
f.write(ddt[1])
with open('tags.txt', 'w') as f:
f.write(tags)
project_context = {**dd.project.DEFAULT_PROJECT_CONTEXT, **{'database_path': 'db.sqlite'}}
dd.commands.create_project('project_path', project_context)
project_context = dd.project.load_context_from_project('project_path')
database_path = project_context['database_path']
con = sqlite3.connect(database_path)
cur = con.cursor()
dd.commands.make_posts_table(con, cur)
con.close()
add_images_to_project('project_path', 'tags.txt')
con = sqlite3.connect(database_path)
cur = con.cursor()
result = list(cur.execute("SELECT * FROM `posts`"))
con.close()
assert result == [(1, 'ebffef8cba3ea86f0149626a579a6b2e', 'jpg', 'tag1 tag2', 2, 0, 0)]
assert [p for p in Path('project_path/images').glob('**/*') if p.is_file()] == [Path('project_path/images/eb/ebffef8cba3ea86f0149626a579a6b2e.jpg')]
# def test_make_database_from_tag_file():
# runner = CliRunner()
# with runner.isolated_filesystem():
# tags = """
# file1.jpg\ttag1 tag2
# """
# data = parse_tags(tags)
# for ddt in data:
# with open(ddt[0], 'w') as f:
# f.write(ddt[1])
# with open('tags.txt', 'w') as f:
# f.write(tags)
# dd.commands.create_project('test_project')
# dd.commands.create_database_from_tag_file('test_project', 'tags.txt')
# project_context = dd.project.load_context_from_project('test_project')
# database_path = project_context['database_path']
# con = sqlite3.connect(database_path)
# cur = con.cursor()
# result = list(cur.execute("SELECT * FROM `posts`"))
# con.close()
# assert result == [(1, 'ebffef8cba3ea86f0149626a579a6b2e', 'jpg', 'tag1 tag2', 2, 0, 0)]
# assert [p for p in Path('project_path/images').glob('**/*') if p.is_file()] == [Path('project_path/images/eb/ebffef8cba3ea86f0149626a579a6b2e.jpg')]
|
from aiogram.types import CallbackQuery
from aiogram_dialog import DialogManager, ShowMode
async def enable_send_mode(
event: CallbackQuery, button, dialog_manager: DialogManager, **kwargs
):
dialog_manager.show_mode = ShowMode.SEND
async def get_result(dialog_manager: DialogManager, **kwargs):
return {
"result": dialog_manager.current_context().dialog_data["result"],
}
def when_not(key: str):
def f(data, whenable, manager):
return not data.get(key)
return f
|
import asyncio
import http.client
import json
import shutil
import ssl
import tempfile
import time
import unittest
from unittest import mock
import nats
from nats.aio.client import Client as NATS, __version__
from nats.aio.errors import *
from tests.utils import *
class HeadersTest(SingleServerTestCase):
@async_test
async def test_simple_headers(self):
nc = await nats.connect()
sub = await nc.subscribe("foo")
await nc.flush()
await nc.publish(
"foo", b'hello world', headers={
'foo': 'bar',
'hello': 'world-1'
}
)
msg = await sub.next_msg()
self.assertTrue(msg.headers != None)
self.assertEqual(len(msg.headers), 2)
self.assertEqual(msg.headers['foo'], 'bar')
self.assertEqual(msg.headers['hello'], 'world-1')
await nc.close()
@async_test
async def test_request_with_headers(self):
nc = await nats.connect()
async def service(msg):
# Add another header
msg.headers['quux'] = 'quuz'
await msg.respond(b'OK!')
await nc.subscribe("foo", cb=service)
await nc.flush()
msg = await nc.request(
"foo", b'hello world', headers={
'foo': 'bar',
'hello': 'world'
}
)
self.assertTrue(msg.headers != None)
self.assertEqual(len(msg.headers), 3)
self.assertEqual(msg.headers['foo'], 'bar')
self.assertEqual(msg.headers['hello'], 'world')
self.assertEqual(msg.headers['quux'], 'quuz')
self.assertEqual(msg.data, b'OK!')
await nc.close()
@async_test
async def test_empty_headers(self):
nc = await nats.connect()
sub = await nc.subscribe("foo")
await nc.flush()
await nc.publish("foo", b'hello world', headers={'': ''})
msg = await sub.next_msg()
self.assertTrue(msg.headers == None)
# Empty long key
await nc.publish("foo", b'hello world', headers={' ': ''})
msg = await sub.next_msg()
self.assertTrue(msg.headers == None)
# Empty long key
await nc.publish(
"foo", b'hello world', headers={'': ' '}
)
msg = await sub.next_msg()
self.assertTrue(msg.headers == None)
await nc.close()
if __name__ == '__main__':
import sys
runner = unittest.TextTestRunner(stream=sys.stdout)
unittest.main(verbosity=2, exit=False, testRunner=runner)
|
# from math import trunc
# n = float(input('Digite um número decimal: '))
# print(f'A parte inteira desse número é \033[32;1m{trunc(n)}\033[m')
num = float(input('Digite um valor: '))
print(f'A parte inteira desse número é \033[32;1m{int(num)}')
|
from ..core.q import Q
from ..core import Field
from ..core.api.special_values import Autogenerate, OMIT
from ..core.bindings import RelatedObjectBinding
from .dataset import Dataset, DatasetTypeBinder
from .treeq import TreeQBinder
class FilesystemBinder(DatasetTypeBinder):
def __init__(self, object_type, system):
super(FilesystemBinder, self).__init__(object_type, system)
self._treeq_binders = {}
def get_or_create_treeq_binder(self, filesystem):
filesystem_id = filesystem.get_id()
if filesystem_id not in self._treeq_binders:
self._treeq_binders[filesystem_id] = TreeQBinder(self.system, filesystem)
return self._treeq_binders[filesystem_id]
def delete_treeq_binder(self, filesystem):
self._treeq_binders.pop(filesystem.get_id(), None)
def get_treeq_binder_by_id(self, filesystem_id):
return self._treeq_binders[filesystem_id]
class Filesystem(Dataset):
FIELDS = [
Field("parent", type='infinisdk.infinibox.filesystem:Filesystem', cached=True, api_name="parent_id",
binding=RelatedObjectBinding('filesystems'), is_filterable=True),
Field("name", creation_parameter=True, mutable=True, is_filterable=True,
is_sortable=True, default=Autogenerate("fs_{uuid}")),
Field("root_mode", creation_parameter=True, hidden=True, optional=True),
Field("atime_mode", is_filterable=True, is_sortable=True),
Field("established", api_name="_is_established", type=bool, is_filterable=True, is_sortable=True, new_to="4.0"),
Field('data_snapshot_guid', is_filterable=True, is_sortable=True, feature_name="nas_replication"),
Field("snapdir_name", creation_parameter=True, optional=True, is_filterable=True, is_sortable=True,
feature_name="dot_snapshot"),
Field("visible_in_snapdir", type=bool, is_filterable=True, is_sortable=True, feature_name="dot_snapshot"),
Field("snapdir_accessible", type=bool, feature_name="dot_snapshot", creation_parameter=True, optional=True,
is_filterable=True, is_sortable=True)
]
BINDER_CLASS = FilesystemBinder
def __init__(self, system, initial_data):
super(Filesystem, self).__init__(system, initial_data)
self.treeqs = self.system.filesystems.get_or_create_treeq_binder(self)
def delete(self, force_if_snapshot_locked=OMIT):
super(Filesystem, self).delete(force_if_snapshot_locked=force_if_snapshot_locked)
self.system.filesystems.delete_treeq_binder(self)
@classmethod
def is_supported(cls, system):
return system.compat.has_nas()
def add_export(self, **kwargs):
return self.system.exports.create(filesystem=self, **kwargs)
def get_exports(self):
return self.system.exports.find(Q.filesystem_id == self.id)
|
import pytest
from ldap_filter import Filter
class TestFilterOutput:
def test_to_string(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*)(!(account=disabled)))'
parsed = Filter.parse(filt)
string = parsed.to_string()
assert string == filt
def test_string_typecast(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*)(!(account=disabled)))'
string = str(Filter.parse(filt))
assert string == filt
def test_to_simple_concat(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*)(!(account=disabled)))'
string = Filter.parse(filt) + ''
assert string == filt
def test_to_complex_concat(self):
filt = '(&(sn=ron)(sn=bob))'
string = Filter.parse(filt) + 'test'
assert string == '(&(sn=ron)(sn=bob))test'
class TestFilterFormatting:
def test_default_beautify(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*))'
parsed = Filter.parse(filt)
string = parsed.to_string(True)
assert string == '(&\n (|\n (sn=ron)\n (sn=bob)\n )\n (mail=*)\n)'
def test_custom_indent_beautify(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*))'
parsed = Filter.parse(filt)
string = parsed.to_string(2)
assert string == '(&\n (|\n (sn=ron)\n (sn=bob)\n )\n (mail=*)\n)'
def test_custom_indent_char_beautify(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*))'
parsed = Filter.parse(filt)
string = parsed.to_string(indent=2, indt_char='!')
assert string == '(&\n!!(|\n!!!!(sn=ron)\n!!!!(sn=bob)\n!!)\n!!(mail=*)\n)'
class TestFilterSimplify:
def test_optimized_filter(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*)(!(account=disabled)))'
parsed = Filter.parse(filt)
string = parsed.simplify().to_string()
assert string == filt
def test_unoptimized_filter(self):
filt = '(&(|(sn=ron)(&(sn=bob)))(|(mail=*))(!(account=disabled)))'
optimized = '(&(|(sn=ron)(sn=bob))(mail=*)(!(account=disabled)))'
parsed = Filter.parse(filt)
string = parsed.simplify().to_string()
assert string == optimized
|
#!/usr/bin/env python
# coding:utf-8
import os
import shutil
import zipfile
try:
import requests
except ImportError:
requests = None
print 'Project Creator depends on the "requests" lib.'
def download_lastest_src():
print 'Fetching the lastest version from github...'
r = requests.get('https://github.com/nekocode/kotgo/releases/latest', allow_redirects=False)
lastest_tag = r.headers['Location'].split('/')[-1]
zipfile_name = 'src-%s.zip' % lastest_tag
if os.path.exists(zipfile_name):
print 'Already downloaded [%s].' % zipfile_name
return zipfile_name, lastest_tag
print 'Downloading the lastest release [%s]...' % zipfile_name
url = 'https://github.com/nekocode/kotgo/archive/%s.zip' % lastest_tag
r = requests.get(url)
with open(zipfile_name, 'wb') as data:
data.write(r.content)
print 'Download finished.'
return zipfile_name, lastest_tag
def unzip_src_package(zipfile_name):
print 'Unziping [%s]...' % zipfile_name
zfile = zipfile.ZipFile(zipfile_name, 'r')
names = zfile.namelist()
root_dir = names[0]
if os.path.exists(root_dir):
print 'Already unzipped.'
return root_dir
for filename in names:
path = os.path.join('./', filename)
if path.endswith('/'):
if not os.path.exists(os.path.dirname(path)):
os.mkdir(os.path.dirname(path))
else:
file(path, 'wb').write(zfile.read(filename))
print 'Unzip finished.'
return root_dir
class TextProcesser:
def __init__(self, file_path):
self.file_path = file_path
self.commands = []
def rm_line_has_text(self, text):
self.commands.append(('rm_line', text))
return self
def replace_all_text(self, src, dst):
self.commands.append(('replace', src, dst))
return self
def replace_header(self, src, dst):
self.commands.append(('replace_header', src, dst))
return self
def remove_comment(self):
self.commands.append(('rm_comment', None))
return self
def recreate(self, text):
self.commands = []
self.commands.append(('recreate', text))
return self
def finish(self):
with open(self.file_path, 'r') as src_file, open(self.file_path + '.new', 'w') as new_file:
for line in src_file.readlines():
need_write = True
need_recreate = None
for cmd in self.commands:
if cmd[0] == 'rm_line' and cmd[1] in line:
need_write = False
break
elif cmd[0] == 'rm_comment' and \
(line.startswith('/**') or line.startswith(' * ') or line.startswith(' */')):
need_write = False
break
elif cmd[0] == 'recreate':
need_recreate = cmd[1]
break
elif cmd[0] == 'replace':
line = line.replace(cmd[1], cmd[2])
elif cmd[0] == 'replace_header' and (line.startswith('package') or line.startswith('import')):
line = line.replace(cmd[1], cmd[2])
if need_recreate is not None:
new_file.write(need_recreate)
break
if need_write:
new_file.write(line)
shutil.move(self.file_path + '.new', self.file_path)
class ProjectFactory:
def __init__(self, template_zip, version):
self.template_zip = template_zip
self.version = version
def create_project(self, project_name, package_name):
template_dir = unzip_src_package(self.template_zip)
if os.path.exists(project_name):
shutil.rmtree(project_name)
shutil.move(template_dir, project_name)
os.chdir(project_name)
shutil.move('sample', 'app')
print 'Creating project [%s]...' % project_name
self.process(project_name, package_name)
print 'Creat finished.'
def process(self, project_name, package_name):
# =================
# Root
# =================
# build.gradle
TextProcesser('build.gradle').rm_line_has_text('android-maven').finish()
# settings.gradle
TextProcesser('settings.gradle').recreate("include ':app', ':data', ':component'").finish()
# rm unnessary files
os.remove('README.md')
shutil.rmtree('art')
if os.path.exists('project_creator.py'):
os.remove('project_creator.py')
# =================
# component
# =================
# build.gradle
TextProcesser('component/build.gradle') \
.rm_line_has_text("apply plugin: 'com.github.dcendents.android-maven'") \
.rm_line_has_text("group='com.github.nekocode'") \
.finish()
# =================
# app
# =================
# build.gradle
TextProcesser('app/build.gradle') \
.replace_all_text('cn.nekocode.kotgo.sample', package_name) \
.finish()
# build.gradle
TextProcesser('app/proguard-rules.pro') \
.replace_all_text('cn.nekocode.kotgo.sample', package_name) \
.finish()
# AndroidManifest.xml
TextProcesser('app/src/main/AndroidManifest.xml') \
.replace_all_text('cn.nekocode.kotgo.sample', package_name) \
.finish()
# strings.xml
TextProcesser('app/src/main/res/values/strings.xml') \
.replace_all_text('Kotgo', project_name) \
.finish()
# move package
package_dir_postfix = package_name.replace('.', '/')
tmp_package_path = 'app/src/main/javaTmp/' + package_dir_postfix + '/'
old_package_path = 'app/src/main/java/cn/nekocode/kotgo/sample/'
os.makedirs(tmp_package_path)
for f in os.listdir(old_package_path):
shutil.move(old_package_path + f, tmp_package_path)
shutil.rmtree('app/src/main/java')
os.renames('app/src/main/javaTmp', 'app/src/main/java')
new_package_path = 'app/src/main/java/' + package_dir_postfix + '/'
# src files
def process_all_src(path):
for p in os.listdir(path):
if os.path.isdir(path + p):
process_all_src(path + p + '/')
elif p.endswith('.kt') or p.endswith('.java'):
TextProcesser(path + p) \
.remove_comment() \
.replace_header('cn.nekocode.kotgo.sample', package_name) \
.finish()
process_all_src(new_package_path)
# =================
# data
# =================
package_name += '.data'
# AndroidManifest.xml
TextProcesser('data/src/main/AndroidManifest.xml') \
.replace_all_text('cn.nekocode.kotgo.sample.data', package_name) \
.finish()
# move package
package_dir_postfix = package_name.replace('.', '/')
tmp_package_path = 'data/src/main/javaTmp/' + package_dir_postfix + '/'
old_package_path = 'data/src/main/java/cn/nekocode/kotgo/sample/data/'
os.makedirs(tmp_package_path)
for f in os.listdir(old_package_path):
shutil.move(old_package_path + f, tmp_package_path)
shutil.rmtree('data/src/main/java')
os.renames('data/src/main/javaTmp', 'data/src/main/java')
new_package_path = 'data/src/main/java/' + package_dir_postfix + '/'
# move test package
package_dir_postfix = package_name.replace('.', '/')
tmp_package_path = 'data/src/test/javaTmp/' + package_dir_postfix + '/'
old_package_path = 'data/src/test/java/cn/nekocode/kotgo/sample/data/'
os.makedirs(tmp_package_path)
for f in os.listdir(old_package_path):
shutil.move(old_package_path + f, tmp_package_path)
shutil.rmtree('data/src/test/java')
os.renames('data/src/test/javaTmp', 'data/src/test/java')
new_test_package_path = 'data/src/test/java/' + package_dir_postfix + '/'
# src files
def process_all_src(path):
for p in os.listdir(path):
if os.path.isdir(path + p):
process_all_src(path + p + '/')
elif p.endswith('.kt') or p.endswith('.java'):
TextProcesser(path + p) \
.remove_comment() \
.replace_header('cn.nekocode.kotgo.sample.data', package_name) \
.finish()
process_all_src(new_package_path)
process_all_src(new_test_package_path)
return self
def main():
project_name = raw_input('Input new project name: ')
package_path = raw_input('Input the full package path (such as com.company.test): ')
template_zip, version = download_lastest_src()
factory = ProjectFactory(template_zip, version)
factory.create_project(project_name, package_path)
if __name__ == '__main__' and requests is not None:
main()
|
# pylint: disable=redefined-builtin,invalid-name
"""
Configuration file for the Sphinx documentation builder.
https://www.sphinx-doc.org/en/master/usage/configuration.html
"""
from typing import Final, Sequence
import os
import sys
# region Path setup
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# endregion
author: Final[str] = ''
copyright: Final[str] = '2021'
project: Final[str] = 'Xirvik Tools'
"""The short X.Y version."""
version: Final[str] = '2.0.0'
"""The full version, including alpha/beta/rc tags."""
release: Final[str] = f'v{version}'
"""
Add any Sphinx extension module names here, as strings. They can be extensions
coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
"""
extensions: Final[Sequence[str]] = [
'sphinx.ext.autodoc', 'sphinx.ext.napoleon'
]
"""Add any paths that contain templates here, relative to this directory."""
templates_path: Final[Sequence[str]] = ['_templates']
"""
List of patterns, relative to source directory, that match files and
directories to ignore when looking for source files. This pattern also affects
html_static_path and html_extra_path.
"""
exclude_patterns: Final[Sequence[str]] = []
master_doc: Final[str] = 'index'
"""
Add any paths that contain custom static files (such as style sheets) here,
relative to this directory. They are copied after the builtin static files, so
a file named "default.css" will overwrite the builtin "default.css".
"""
html_static_path: Final[Sequence[str]] = ['_static']
"""
The theme to use for HTML and HTML Help pages. See the documentation for a
list of builtin themes.
"""
html_theme: Final[str] = 'alabaster'
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-09-28 05:40
from __future__ import unicode_literals
from django.db import migrations, models
import taiga.base.utils.time
class Migration(migrations.Migration):
dependencies = [
('projects', '0053_auto_20160927_0741'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='user_order',
field=models.BigIntegerField(default=taiga.base.utils.time.timestamp_ms, verbose_name='user order'),
),
migrations.AlterField(
model_name='projecttemplate',
name='order',
field=models.BigIntegerField(default=taiga.base.utils.time.timestamp_ms, verbose_name='user order'),
),
]
|
import os
from ajenti.api import *
from ajenti.plugins.main.api import SectionPlugin
from ajenti.ui import on
from ajenti.ui.binder import Binder
from reconfigure.configs import ExportsConfig
from reconfigure.items.exports import ExportData, ClientData
@plugin
class Exports (SectionPlugin):
config_path = '/etc/exports'
def init(self):
self.title = _('NFS Exports')
self.icon = 'hdd'
self.category = _('Software')
self.append(self.ui.inflate('exports:main'))
if not os.path.exists(self.config_path):
open(self.config_path, 'w').close()
self.config = ExportsConfig(path=self.config_path)
self.binder = Binder(None, self)
self.find('exports').new_item = lambda c: ExportData()
self.find('clients').new_item = lambda c: ClientData()
def on_page_load(self):
self.config.load()
self.binder.setup(self.config.tree).populate()
@on('save', 'click')
def save(self):
self.binder.update()
self.config.save()
self.context.notify('info', _('Saved'))
|
import sys
import json
import time
from pprint import pprint
from flask import request
from flask_bcrypt import Bcrypt
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from webapp import db
from webapp.blueprints.users.models import User
from webapp.blueprints.main.models import (
Board,
Card
)
from webapp.blueprints.main.utils import iter_pages
bcrypt = Bcrypt()
def main():
if len(sys.argv) == 2:
if sys.argv[1] == 'r':
db_reset(create=True, pr=True)
elif sys.argv[1] == 'rr':
db_reset(create=False, pr=True)
else:
pprint(Card.query.all())
pprint(Board.query.all())
def db_reset(create=False, pr=False):
db.drop_all()
db.create_all()
if create:
create_user(pr)
create_board(pr)
create_card(pr)
def create_board(pr):
print('\ncreate_board()')
with open('data/board.json', 'r') as f:
data = json.load(f)
for item in data.get('items'):
row = Board(title=item.get('title', ''))
db.session.add(row)
db.session.commit()
if pr in (True, 'board'):
print(row)
def create_card(pr):
print('\ncreate_card()')
with open('data/card.json', 'r') as f:
data = json.load(f)
for item in data.get('items'):
row = Card(
title=item.get('title', ''),
description=item.get('description', ''),
image=item.get('image', ''),
id_board=1)
db.session.add(row)
db.session.commit()
if pr in (True, 'card'):
print(row)
def create_user(pr):
print('\ncreate_user()')
with open('data/user.json', 'r') as f:
data = json.load(f)
for item in data.get('items'):
row = User(
email=item.get('email'),
password=bcrypt.generate_password_hash(item.get('password')))
db.session.add(row)
db.session.commit()
if pr in (True, 'user'):
print(row)
if __name__ == '__main__':
main()
|
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import simpson
print(os.getcwd())
def joukowski_map(mu_x, mu_y, num_pt):
# center of circle in complex plane
comp_cent = np.array([mu_x, mu_y])
# radius of circle in complex plane /
# distance from center to point (-1,0) in complex plane
r = np.sqrt((comp_cent[0]-1)**2 + (comp_cent[1]-0)**2)
# Circle coordinates calculations
angle = np.linspace(0, 2*np.pi, num_pt) # 500 points along circle [0, 2*pi]
comp_r = comp_cent[0] + r*np.cos(angle) # real coordinates along circle (horz.)
comp_i = comp_cent[1] + r*np.sin(angle) # imaginary coordinates along circle (vert.)
# Cartesian components of the Joukowsky transform
x = ((comp_r)*(comp_r**2+comp_i**2+1))/(comp_r**2+comp_i**2)
y = ((comp_i)*(comp_r**2+comp_i**2-1))/(comp_r**2+comp_i**2)
plt.plot(x,y)
plt.show()
########################################
# change chord length to be from x=0 to 1
# Compute the scale factor (actual chord length)
c = np.max(x)-np.min(x)
# Leading edge current position
LE = np.min(x/c)
# Corrected position of the coordinates
x = x/c-LE # move the leading edge
y = y/c
# return 500 points that make up airfoil shape
return x, y
def af_area(x, y):
'''
Use composite simpson's rule to find the approximate area inside the airfoil.
scipy.integrate.simpson(y[, x, dx, axis, even])
Integrate y(x) using samples along the given axis and the composite Simpson’s rule.
'''
area = integrate.simpson(y, x)
# # Once the different lines are computed, the area will be computed as the integral of those lines
#
# # In case the lower surface of the airfoil interceps the y = 0 axis, it must be divided so all areas
# # are computed independently
# lowerNeg = lower[lower[:,1]<0,:]
# lowerPos = lower[lower[:,1]>0,:]
#
# # Upper surface area
# A1 = integrate.simps(upper[np.argsort(upper[:,0]),1], upper[np.argsort(upper[:,0]),0])
# # Lower surface area for points with negative y
# A2 = -integrate.simps(lowerNeg[np.argsort(lowerNeg[:,0]),1], lowerNeg[np.argsort(lowerNeg[:,0]),0])
# # Possible lower surface area for points with positive y
# A3 = integrate.simps(lowerPos[np.argsort(lowerPos[:,0]),1], lowerPos[np.argsort(lowerPos[:,0]),0])
#
# # The area will be the sum of the areas and substracting the possible intercept of both
# area = A1 + A2 - A3
# example data:
# -0.2125,0.084375,-11.441091805932968,-0.1382712420131816
x, y = joukowski_map(-0.2125, 0.084375, 500)
area = abs(simpson(y, x))
print(area)
|
"""Interface that defines a puzzle."""
class PuzzleInterface:
def execute(self) -> None:
"""Executes the puzzle"""
pass
|
from transformers import BertModel, BertForMaskedLM, DistilBertModel, DistilBertForMaskedLM
def get_kobert_model(cache_dir=None):
""" Return BertModel for Kobert """
if cache_dir is not None:
model = BertModel.from_pretrained(cache_dir)
else:
model = BertModel.from_pretrained('monologg/kobert')
return model
def get_kobert_lm(cache_dir=None):
""" Return BertForMaskedLM for Kobert """
if cache_dir is not None:
model = BertModel.from_pretrained(cache_dir)
else:
model = BertForMaskedLM.from_pretrained('monologg/kobert-lm')
return model
def get_distilkobert_model(cache_dir=None):
""" Return DistilBertModel for DistilKobert """
if cache_dir is not None:
model = BertModel.from_pretrained(cache_dir)
else:
model = DistilBertModel.from_pretrained('monologg/distilkobert')
return model
def get_distilkobert_lm(cache_dir=None):
""" Return DistilBertForMaskedLM for DistilKobert """
if cache_dir is not None:
model = BertModel.from_pretrained(cache_dir)
else:
model = DistilBertForMaskedLM.from_pretrained('monologg/distilkobert')
return model
|
# Copyright (c) 2013 Huan Do, http://huan.do
import ast
from constant_finder import ConstantFinder
from constant_changer import ConstantChanger
class ConstantVisitor(object):
"""Rename all constants and takes a note of it, so that
in the final phase we can inject an assignment node that
declares them.
"""
def __init__(self, env):
self.env = env
self.tree = env.tree
def traverse(self):
constant_finder = ConstantFinder(self.env)
constant_finder.visit(self.tree)
ConstantChanger(self.env).visit(self.tree)
if len(constant_finder.assignment_manager.assignments):
assign_node = constant_finder.assignment_manager.get_assign_node()
self.tree.body = [assign_node] + self.tree.body
|
import logging, re
import sys
sys.path.append('')
from cube.io_utils.objects import Document
class Encodings:
def __init__(self, verbose=False):
self.word_list = {}
self.hol_word_list = []
self.char2int = {}
self.label2int = {}
self.labels = []
self.word2int = {}
self.upos2int = {}
self.xpos2int = {}
self.attrs2int = {}
self.upos_list = []
self.xpos_list = []
self.attrs_list = []
self.characters = []
self.verbose = verbose
self.num_langs = 0
def compute(self, train: Document, dev: Document, word_cutoff=7, char_cutoff=5, CUPT_format=False):
if self.verbose:
print("Computing encoding maps... ")
self.word2int['<PAD>'] = 0
self.hol_word_list.append('<PAD>')
self.word2int['<UNK>'] = 1
self.hol_word_list.append('<UNK>')
self.char2int['<PAD>'] = 0
self.char2int['<UNK>'] = 1
self.char2int[' '] = 2
self.upos2int['<PAD>'] = 0
self.upos_list.append('<PAD>')
self.xpos2int['<PAD>'] = 0
self.xpos_list.append('<PAD>')
self.attrs2int['<PAD>'] = 0
self.attrs_list.append('<PAD>')
self.upos2int['<UNK>'] = 1
self.upos_list.append('<UNK>')
self.xpos2int['<UNK>'] = 1
self.xpos_list.append('<UNK>')
self.attrs2int['<UNK>'] = 1
self.attrs_list.append('<PAD>')
self.label2int['<PAD>'] = 0
self.labels.append('<PAD>')
self.label2int['<UNK>'] = 1
self.labels.append('<UNK>')
self.characters.append("<PAD>")
self.characters.append("<UNK>")
self.characters.append(" ")
char_count = {}
word_count = {}
for sentence in train.sentences: # xxx
lang_id = sentence.lang_id
if lang_id + 1 > self.num_langs:
self.num_langs = lang_id + 1
for entry in sentence.words: # entry is a Word
word = entry.word.lower()
if word not in word_count:
word_count[word] = 1
else:
word_count[word] = word_count[word] + 1
if word not in self.word_list:
self.word_list[word] = 0 # word is inside trainset
uniword = entry.word.lower()
uniword = re.sub('\d', '0', uniword)
for i in range(len(uniword)):
char = uniword[i].lower()
if char not in char_count:
char_count[char] = 1
else:
char_count[char] = char_count[char] + 1
# if char not in self.char2int:
# self.char2int[char] = len(self.char2int)
label = entry.label
if CUPT_format and tag_type == 'label':
if entry.label != "*":
labels = entry.label.split(';')
entry_labels = [label.split(':')[1] for label in labels if ':' in label]
for entry_label in entry_labels:
self.label2int.setdefault(entry_label, len(self.label2int))
else:
if label not in self.label2int:
self.label2int[label] = len(self.label2int)
self.labels.append(label)
# morphological encodings
if entry.upos not in self.upos2int:
self.upos2int[entry.upos] = len(self.upos2int)
self.upos_list.append(entry.upos)
if entry.xpos not in self.xpos2int:
self.xpos2int[entry.xpos] = len(self.xpos2int)
self.xpos_list.append(entry.xpos)
if entry.attrs not in self.attrs2int:
self.attrs2int[entry.attrs] = len(self.attrs2int)
self.attrs_list.append(entry.attrs)
if dev is not None:
for sentence in dev.sentences:
lang_id = sentence._lang_id
for entry in sentence.words:
word = entry.word.lower()
if word not in self.word_list:
self.word_list[word] = 1 # word is inside devset only
for word in word_count:
if word_count[word] >= word_cutoff:
self.word2int[word] = len(self.word2int)
self.hol_word_list.append(word)
for char in char_count:
if char_count[char] >= char_cutoff and char not in self.char2int:
self.char2int[char] = len(self.char2int)
self.characters.append(char)
# force add digits
for digit in range(10):
ds = str(digit)
if ds not in self.char2int:
self.char2int[ds] = len(self.char2int)
self.characters.append(ds)
if self.verbose:
print("done\n")
print("Unique words: " + str(len(self.word_list)))
print("Unique chars: " + str(len(self.char2int)))
print("Unique labels: " + str(len(self.label2int)))
print("Unique UPOS: " + str(len(self.upos2int)))
print("Unique XPOS: " + str(len(self.xpos2int)))
print("Unique ATTRS: " + str(len(self.attrs2int)))
print("Holistic word count: " + str(len(self.word2int)))
def update_wordlist(self, dataset):
for seq in dataset.sequences:
for entry in seq:
word = entry.word.lower()
if word not in self.word_list:
self.word_list[word] = 2 # word is inside an auxiliarly set (probably test)
def load(self, filename):
# We only read character2int, labels, holistic words and label2int here. word_list should be recomputed for every dataset (if deemed necessary)
with open(filename, "r", encoding="utf8") as f:
line = f.readline()
self.num_langs = int(line.strip().split(' ')[-1])
line = f.readline()
num_labels = int(line.split(" ")[1])
if self.verbose:
print("Loading labels " + str(num_labels))
self.labels = [""] * num_labels
for _ in range(num_labels):
line = f.readline()
parts = line.split("\t")
key = parts[0]
value = int(parts[1])
self.label2int[key] = value
self.labels[value] = key
line = f.readline()
num_characters = int(line.split(" ")[1])
self.characters = [""] * num_characters
if self.verbose:
print("Loading characters " + str(num_characters))
for _ in range(num_characters):
line = f.readline()
parts = line.split("\t")
key = parts[0]
value = int(parts[1])
self.char2int[key] = value
self.characters[value] = key
line = f.readline()
num_words = int(line.split(" ")[1])
if self.verbose:
print("Loading words " + str(num_words))
for _x in range(num_words):
line = f.readline()
parts = line.split("\t")
key = parts[0]
value = int(parts[1])
self.word2int[key] = value
# morphological attributes
line = f.readline()
num_labels = int(line.split(" ")[1])
if self.verbose:
print("Loading upos " + str(num_labels))
self.upos_list = [""] * num_labels
for _ in range(num_labels):
line = f.readline()
parts = line.split("\t")
key = parts[0]
value = int(parts[1])
self.upos2int[key] = value
self.upos_list[value] = key
line = f.readline()
num_labels = int(line.split(" ")[1])
self.xpos_list = [""] * num_labels
if self.verbose:
print("Loading xpos " + str(num_labels))
for _ in range(num_labels):
line = f.readline()
parts = line.split("\t")
key = parts[0]
value = int(parts[1])
self.xpos2int[key] = value
self.xpos_list[value] = key
line = f.readline()
num_labels = int(line.split(" ")[1])
self.attrs_list = [""] * num_labels
if self.verbose:
print("Loading attrs " + str(num_labels))
for _ in range(num_labels):
line = f.readline()
parts = line.split("\t")
key = parts[0]
value = int(parts[1])
self.attrs2int[key] = value
self.attrs_list[value] = key
f.close()
def save(self, filename):
f = open(filename, "w", encoding="utf8")
f.write("LANGS " + str(self.num_langs) + "\n")
f.write("LABELS " + str(len(self.label2int)) + "\n")
for label in self.label2int:
f.write(str(label) + "\t" + str(self.label2int[label]) + "\n")
f.write("CHARACTERS " + str(len(self.char2int)) + "\n")
for character in self.char2int:
f.write(character + "\t" + str(self.char2int[character]) + "\n")
f.write("WORDS " + str(len(self.word2int)) + "\n")
for word in self.word2int:
f.write(word + "\t" + str(self.word2int[word]) + "\n")
f.write("UPOS " + str(len(self.upos2int)) + "\n")
for label in self.upos2int:
f.write(label + "\t" + str(self.upos2int[label]) + "\n")
f.write("XPOS " + str(len(self.xpos2int)) + "\n")
for label in self.xpos2int:
f.write(label + "\t" + str(self.xpos2int[label]) + "\n")
f.write("ATTRS " + str(len(self.attrs2int)) + "\n")
for label in self.attrs2int:
f.write(label + "\t" + str(self.attrs2int[label]) + "\n")
f.close()
|
import factory
from unittest.mock import patch
from test_plus.test import TestCase
from studio.services import get_ai_list, post_import_ai
from studio.tests.factories import (
AiFactory,
AIImportJSON,
NameExistsFactory,
SuccessFactory,
UnauthorizedFactory
)
class TestAIList(TestCase):
def setUp(self):
"""Provide a user token"""
self.token = 'token'
@patch('studio.services.fetch_api')
def test_anonymous(self, mock_get):
"""Anonymous user shouldn't get a list"""
# Configure the mock
mock_get.return_value = factory.build(
dict,
FACTORY_CLASS=UnauthorizedFactory
)
response = get_ai_list(self.token)
self.assertEqual(response['status']['code'], 401)
@patch('studio.services.fetch_api')
def test_registered(self, mock_get):
"""Registered user can get AIs list"""
# Configure the mock
mock_get.return_value = {
'ai_list': [],
'status': {
'code': 200
}
}
response = get_ai_list(self.token)
self.assertEqual(response['status']['code'], 200)
class TestImportAI(TestCase):
def setUp(self):
"""Provide a user token"""
self.token = 'token'
self.created = {
**factory.build(dict, FACTORY_CLASS=AiFactory),
**factory.build(dict, FACTORY_CLASS=SuccessFactory)
}
@patch('studio.services.fetch_api')
@patch('studio.services.config')
def test_anonymous(self, mock_config, mock_get):
"""Anonymous user shouldn't be able to POST"""
mock_config.return_value = {'API_LONG_POLLING': 300}
# Configure the mock
mock_get.return_value = factory.build(
dict,
FACTORY_CLASS=UnauthorizedFactory
)
response = post_import_ai(False, {})
self.assertEqual(response['status']['code'], 401)
@patch('studio.services.fetch_api')
@patch('studio.services.config')
def test_registered(self, mock_config, mock_get):
"""Registered user can POST an Import JSON"""
mock_config.return_value = {'API_LONG_POLLING': 300}
# Configure the mock
mock_get.return_value = factory.build(
dict,
FACTORY_CLASS=SuccessFactory
)
response = post_import_ai(self.token, {})
self.assertEqual(response['status']['code'], 201)
@patch('studio.services.fetch_api')
@patch('studio.services.config')
def test_import_success(self, mock_config, mock_get):
"""If the bot is created we return 201 and bot info with AIID."""
# Configure the mock
mock_get.return_value = self.created
mock_config.return_value = {'API_LONG_POLLING': 300}
response = post_import_ai(self.token, factory.build(
dict,
FACTORY_CLASS=AIImportJSON)
)
self.assertEqual(response['status']['code'], 201)
self.assertEqual(self.created['aiid'], response['aiid'])
@patch('studio.services.fetch_api')
@patch('studio.services.config')
def test_import_existing_name(self, mock_config, mock_get):
"""
If a bot with the same name exists, API returns information about it
"""
mock_config.return_value = {'API_LONG_POLLING': 300}
# Configure the mock
mock_get.return_value = factory.build(
dict,
FACTORY_CLASS=NameExistsFactory
)
response = post_import_ai(self.token, factory.build(
dict,
FACTORY_CLASS=AIImportJSON)
)
self.assertEqual(response['status']['code'], 400)
self.assertEqual(
response['status']['info'],
'A bot with that name already exists'
)
|
""" CSeq C Sequentialization Framework
mapper module
maps from variables in the input program to propositional variables in the propositional formula (based on our forked version of CBMC)
written by Omar Inverso.
"""
VERSION = 'labs_mapper-2020.05.19'
#VERSION = 'labs_mapper-2018.10.22' # forked from mapper-2018.05.24 for sequentialised programs
#VERSION = 'mapper-2018.05.24'
#VERSION = 'mapper-2018.04.21'
#VERSION = 'feeder-2015.07.16' # CSeq 1.0 Release - ASE2015
"""
Prerequisites:
Input parsable by the backend.
Note: when the map lookup is insuccessfully there may be three cases:
1. you are not using the modified CBMC version that generates an extended map at the end of the DIMACS fie
2. the input program is very simple and thus trivially verifies safe, so the DIMACS is not generated
3. the variable or function name is wrong.
TODO:
- add explicit timeout parameter (now hardcoded) for generating the propositional formula
- when the backend is not available, there should be an exception.
Changelog:
2020.05.19 adjusted for SLiVER-1.9 (Luca Di Stefano)
2018.05.31 forked from mapper for sequentialised programs
2018.04.21 forked from latest stable feeder module
"""
import os, sys, getopt, time, signal, subprocess, shlex
import math
from threading import Timer
import pycparser.c_parser, pycparser.c_ast, pycparser.c_generator
import core.module, core.parser, core.utils
from core.module import ModuleError
from info import Info
from utils import findpropositionalvar, findpropositionalvarsize, get_bin
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Options and Parameters below.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
backendFilename = {}
backendFilename['cbmc-assumptions'] = '../cbmc/cbmc-simulator'
cmdLineOptions = {}
cmdLineOptions['cbmc-assumptions'] = ' ';
class labs_mapper(core.module.BasicModule):
def init(self):
self.addInputParam('backend', 'backend', 'b', default='cbmc-assumptions', optional=False)
self.addInputParam('split', '....', 'v', 'choice', optional=False)
self.addInputParam('steps', 'number of system evolutions', 's', '1', optional=False)
self.addInputParam('cores', 'number of cores (power of two) for parallel analysis (0 = auto)', 'c', '1', optional=False)
self.addInputParam('simulate', '0 for verification mode; otherwise # of traces to generate', 't', '0', optional=True) # TODO
self.addInputParam('info', 'LAbS system info', 'i', '', optional=True)
self.addOutputParam('extrargs')
self.addOutputParam('info')
self.addOutputParam('simulate')
def loadfromstring(self, string, env):
contexts = 0
cmdline = ''
contexts = int(self.getInputParamValue('steps'))
backend = self.getInputParamValue('backend')
spliton = self.getInputParamValue('split')
cores = int(self.getInputParamValue('cores')) #TODO: check cores is power of two
simulate = self.getInputParamValue('simulate')
if backend != 'cbmc-assumptions': self.error("backend (%s) not supported" % backend)
seqfile = core.utils.rreplace(env.inputfile, '/', '/_cs_', 1) if '/' in env.inputfile else '_cs_' + env.inputfile
core.utils.saveFile(seqfile, string)
''' Run the verification tool on the input file '''
if backend == 'cbmc-assumptions':
cmdline = backendFilename[backend] + " " + seqfile + " --dimacs | grep \"^c \"" #--outfile " + seqfile+".dimacs"
command = core.utils.Command(cmdline)
out, err, code = command.run(timeout=int(7200))[0:3] # store stdout, stderr, process' return value
if code != 0:
raise IOError(err)
lines = out.splitlines()
processed_info = Info.parse(self.getInputParamValue('info'))
core.utils.saveFile(seqfile+".map", out)
self.setOutputParam('info', processed_info)
self.output = string
def verificationmode(cores):
extrargs = []
log = int(math.log(float(cores),2.0))
varset = []
# TODO change fn_name according to 'spliton'
# fn_name = 'init'
# splitonfull = 'c %s::1::%s!0@1#1 ' % (fn_name, spliton)
splitonfull = 'c %s#2 ' % (spliton)
if contexts == 1:
cores = 1
try:
choice_bitwidth = (findpropositionalvarsize(splitonfull,lines).bind(self))/contexts
except:
self.error("DIMACS lookup failed for the given symbol %s" % spliton)
# split on least significant digits of the symbolic variables that represent the context-switch points
if cores >= 2: varset.append(findpropositionalvar(splitonfull,lines,0).bind(self))
if cores >= 4: varset.append(findpropositionalvar(splitonfull,lines,1*choice_bitwidth).bind(self))
if cores >= 8: varset.append(findpropositionalvar(splitonfull,lines,2*choice_bitwidth).bind(self))
if cores >= 16: varset.append(findpropositionalvar(splitonfull,lines,3*choice_bitwidth).bind(self))
if cores > 1:
for k in range(0,cores):
extrarg = " --assume "
boh = '%s' % get_bin(k,log)
i=0
for v in varset:
extrarg += "%s=%s%s" %(v, 0 if boh[i]=='0' else 1, ',' if i<len(varset)-1 else '')
i+=1
extrargs.append(extrarg)
#if 'warning' in err: self.warn('warnings on stderr from the backend')
# to parallel feeder
self.setOutputParam('extrargs', extrargs)
if not simulate or int(simulate) == 0:
verificationmode(cores)
else:
self.setOutputParam('simulate', int(simulate))
|
from __future__ import print_function, unicode_literals
import sys
import scipy.io
import scipy.misc
from cscPy.mano.network.Const import *
import cscPy.dataAugment.augment as augment
#sys.path.append('..')
import pickle
import os
from torch.utils.data import Dataset
from cscPy.mano.network.utils import *
from cscPy.Const.const import *
#from cscPy.mano.network.manolayer import VPoser,MANO_SMPL
import tqdm
import numpy as np
from cscPy.globalCamera.camera import perspective_back_projection, CameraIntrinsics, CameraSeries,perspective_projection
import cscPy.dataAugment.augment as augment
import torchvision
from torch.utils.data import Dataset
import cv2
from torch.utils.data import Dataset
from cscPy.multiviewDataset.toolkit import MultiviewDatasetDemo
from cscPy.handProcess.dataAugment import processing_augmentation
from cscPy.handProcess.preprocessImages import preprocessMVdataset,imcrop
import torchvision
import cv2
from cscPy.mano.network.utils import *
from cscPy.mano.network.utilsSmallFunctions import *
from cscPy.mano.network.Const import *
from cscPy.globalCamera.camera import CameraIntrinsics,perspective_projection
import matplotlib.pyplot as plt
# SET THIS to where RHD is located on your machine
path_to_db = './RHD_published_v2/'
if(not os.path.exists(path_to_db)):
path_to_db = '/mnt/data/shicheng/RHD_published_v2/'
if(not os.path.exists(path_to_db)):
path_to_db = '/home/csc/dataset/RHD_published_v2/'
if (not os.path.exists(path_to_db)):
path_to_db = '/mnt/ssd/csc/RHD_published_v2/'
if (not os.path.exists(path_to_db)):
path_to_db = '/mnt/data/csc/RHD_published_v2/'
#os.environ["DISPLAY"] = "localhost:11.0"
set = 'training'
path_to_db = '/mnt/data/shicheng/STB/'
if(not os.path.exists(path_to_db)):
path_to_db = '/home/csc/dataset/'
if(not os.path.exists(path_to_db)):
path_to_db = '/mnt/ssd/csc/STB/'
if(not os.path.exists(path_to_db)):
path_to_db = '/mnt/data/csc/STB/'
R = np.array([0.00531,-0.01196,0.00301])
T = np.array([-24.0381, -0.4563, -1.2326])
#for rgb image
fx = 607.92271
fy = 607.88192
tx = 314.78337
ty = 236.42484
K = np.array([[fx,0,tx],[0,fy,ty],[0,0,1]])
import cv2
rotationMatrix = cv2.Rodrigues(R)[0]
T = np.reshape(T,[1,1,1,3,1])
imageNum = 1500
class STBDiscretedDataloader(Dataset):
def __init__(self,train=True,setNum=10,skip=1,gtbone=False,avetemp=False,bonecoeff=1.6):
assert avetemp==False,"not implement"
if(setNum==2):
self.sequences=['B1Counting', 'B1Random']
elif(setNum==10):
self.sequences = ['B2Counting', 'B2Random', 'B3Counting', 'B3Random', 'B4Counting', 'B4Random',
'B5Counting', 'B5Random', 'B6Counting', 'B6Random']
elif(setNum==12):
self.sequences = ['B2Counting', 'B2Random', 'B3Counting', 'B3Random', 'B4Counting', 'B4Random',
'B5Counting', 'B5Random', 'B6Counting', 'B6Random','B1Counting', 'B1Random']
else:assert False
self.handPara=[]
self.train=train
self.gtbone=gtbone
self.datasetname = 'STB'
for seq in self.sequences:
matfile = '%slabels/%s_SK.mat' % (path_to_db, seq)
data = scipy.io.loadmat(matfile)
self.handPara.append(data['handPara'])
self.handPara=np.array(self.handPara).transpose(0,3,2,1).astype(np.float32)
self.bonecoeff = bonecoeff
self.palmcoeff = 2
wrist_xyz = self.handPara[:,:,16:17, :] + 1.43 * (self.handPara[:,:,0:1, :] - self.handPara[:,:,16:17, :])
self.handPara = np.concatenate([wrist_xyz, self.handPara[:,:,1:, :]], axis=2)
self.handPara = self.handPara[:,:,STB2Bighand_skeidx, :][:,:,Bighand2mano_skeidx, :]
self.handPara=np.expand_dims(self.handPara,axis=4)
#print('self.handPara',self.handPara.shape)
self.handPara = (np.transpose(rotationMatrix) @ (self.handPara - T))[...,0] / 1000
self.handPara[:, :, :, 0] = -self.handPara[:, :, :, 0] # left hand to right hand
joints=self.handPara.copy()
print('STB scale', self.handPara[0, 0, 0, :], len(self.sequences) * imageNum,'bonecoeff',bonecoeff)
self.ref=get32fTensor(joints[0,0])
# if avetemp:self.ref=get32fTensor(getRefJointsFromDataset(joints/1000,0))
# else:self.ref = get32fTensor(joints[0]/1000)
self.boneLenMean, self.boneLenStd,self.curvatureMean, self.curvatureStd=\
getBonePalmMeanStd(joints.reshape(-1,21,3),bonecoeff=self.bonecoeff,palmcoeff=self.palmcoeff,debug=True)
def __len__(self):
return len(self.sequences)*imageNum
def __getitem__(self, idx):
folder_idx=idx//imageNum
id=idx%imageNum
img_path_rgb = '%s%s/%s_%s_%d.png' % (path_to_db, self.sequences[folder_idx], 'SK', 'color', id)
image = scipy.misc.imread(img_path_rgb)
kp_coord_xyz=self.handPara[folder_idx,id].copy()
k=K
k_ = np.linalg.inv(k)
kp_coord_uvd = perspectiveProjection(kp_coord_xyz.copy(), k).astype(np.float32)
image = image.reshape(480, 640, 3)
image = cv2.flip(image, 1)
if (self.gtbone):
boneLenMean, boneLenStd, curvatureMean, curvatureStd = \
getBonePalmMeanStd(kp_coord_xyz * 1000, bonecoeff=self.bonecoeff,
palmcoeff=self.palmcoeff, debug=True)
else:
boneLenMean, boneLenStd, curvatureMean, curvatureStd = \
self.boneLenMean, self.boneLenStd, self.curvatureMean, self.curvatureStd
uv_vis = np.ones(21).astype(np.bool)
# print("uvd", kp_coord_uvd)
# for i in range(kp_coord_uvd.shape[0]):
# image=cv2.circle(image, (kp_coord_uvd[i, 0], kp_coord_uvd[i, 1]), 3, (255,0,0))
# image=cv2.putText(image,str(i),(kp_coord_uvd[i, 0], kp_coord_uvd[i, 1]),cv2.FONT_HERSHEY_SIMPLEX,
# 1,(255))
# cv2.imshow('img', image)
# cv2.waitKey(0)
assert np.sum(np.abs(kp_coord_uvd[:, -1] - kp_coord_xyz[:, -1])) < 1e-4
pose3d = kp_coord_uvd[:21, :]
pose3d_root = pose3d[4:5, -1:]
pose3d_rel = pose3d - pose3d_root # relative coords in metric coords
index_root_bone_length = np.sqrt(np.sum((kp_coord_xyz[4, :] - kp_coord_xyz[5, :]) ** 2))
scaled = index_root_bone_length
relative_depth = (pose3d_rel / scaled)
# print(np.max(np.abs(relative_depth[:,-1])))
pose_uv_all = kp_coord_uvd[:21, :2]
crop_center = pose_uv_all[4, :2]
crop_center = np.reshape(crop_center, 2)
pose_uv_vis = pose_uv_all[uv_vis, :]
crop_size = np.max(np.absolute(pose_uv_vis - crop_center) * 1.2)
crop_size = np.minimum(np.maximum(crop_size, 25.0), 200.0)
image_crop, (u1, v1, u2, v2) = imcrop(image, crop_center, crop_size)
image_crop = cv2.resize(image_crop, (256, 256), interpolation=cv2.INTER_NEAREST)
scaleu = 256 / (u2 - u1)
scalev = 256 / (v2 - v1)
dl = 10
scale = (np.array([scaleu, scalev, 1 / dl]) * np.array([64 / 256, 64 / 256, 64])).astype(np.float32)
image_crop = image_crop.reshape([256, 256, 3])
pose3d = kp_coord_uvd.reshape([21, 3]).copy()
transition = np.array([-u1, -v1, dl // 2]).astype(np.float32)
# print(relative_depth.shape)
pose3d[:, -1] = relative_depth[:, -1].copy()
pose3d += transition
pose3d = pose3d * scale
if (self.train):
image_crop, pose3d, randTrans, randScale, randAngle = \
augment.processing_augmentation_Heatmap(image_crop, pose3d)
pose3d = np.reshape(pose3d, [21, 3])
# cjitter = torchvision.transforms.ColorJitter(brightness=0.8, contrast=[0.4, 1.6], saturation=[0.4, 1.6],
# hue=0.1)
image_trans = torchvision.transforms.Compose([ torchvision.transforms.ToTensor()])
else:
randTrans, randScale, randAngle = np.zeros([1, 2]), np.ones([1, 1]), np.zeros(1)
pose3d = np.reshape(pose3d, [21, 3])
image_trans = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
image_crop = image_trans(torchvision.transforms.ToPILImage()((image_crop).astype(np.uint8)))
# print('image_crop', image_crop.shape)
# img=(image_crop.permute(1,2,0).numpy() * 255).astype(np.uint8).copy()
# cv2.imshow('o1', img)
# cv2.waitKey(0)
dic = {"image": image_crop, "pose_gt": get32fTensor(pose3d), 'scale': get32fTensor(scale),
'transition': get32fTensor(transition),
"pose3d_root": get32fTensor(pose3d_root), "scaled": get32fTensor(scaled),
'K_': get32fTensor(k_).reshape(1, 3, 3),'K': get32fTensor(k).reshape(1, 3, 3),
'3d': torch.tensor([1 if self.datasetname == 'RHD' else 0]).long(), 'randTrans': get32fTensor(randTrans),
'randScale': get32fTensor(randScale), 'randAngle': get32fTensor(randAngle),
'kp_coord_uvd': get32fTensor(kp_coord_uvd), 'kp_coord_xyz': get32fTensor(kp_coord_xyz),
'ref': self.ref.clone(),
'boneLenMean': boneLenMean, 'boneLenStd': boneLenStd,
'curvatureMean': curvatureMean, 'curvatureStd': curvatureStd, }
if (self.train == False and self.datasetname=='MV'): dic['imgori'] = get32fTensor(image)
return dic
if __name__ == '__main__':
STBDiscretedDateset3D.__getitem__(0)
pass
|
'''
Copyright 2017, Dell, Inc.
Author(s):
UCS test script that tests:
-The Poller workflow
-The Poller data
'''
import fit_path # NOQA: unused import
import unittest
from common import fit_common
from nosedep import depends
from nose.plugins.attrib import attr
import ucs_common
import flogging
logs = flogging.get_loggers()
@attr(all=True, regression=True, smoke=False, ucs_rackhd=True)
class rackhd20_ucs_catalogs(unittest.TestCase):
NODELIST = []
RACK_NODELIST = []
CHASSIS_NODELIST = []
BLADE_NODELIST = []
CATALOGS = {}
@classmethod
def setUpClass(cls):
if not ucs_common.get_nodes_utility():
raise Exception("error getting node list")
if not ucs_common.get_obms_utility():
raise Exception("error getting obms list")
@classmethod
def tearDownClass(cls):
if not ucs_common.restore_node_utility():
raise Exception("error restoring node list")
if not ucs_common.restore_obms_utility():
raise Exception("error restoring obms list")
def get_ucs_node_list(self):
api_data = fit_common.rackhdapi('/api/2.0/nodes')
self.assertEqual(api_data['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for node in api_data['json']:
if node["obms"] != [] and node["obms"][0]["service"] == "ucs-obm-service":
self.NODELIST.append(node["id"])
node_name = node["name"].split("/")[-1]
if "rack" in node_name:
self.RACK_NODELIST.append(node["id"])
elif "blade" in node_name:
self.BLADE_NODELIST.append(node["id"])
elif "chassis" in node_name:
self.CHASSIS_NODELIST.append(node["id"])
@unittest.skipUnless("ucsm_ip" in fit_common.fitcfg(), "")
def test_check_ucs_params(self):
if not ucs_common.is_ucs_valid():
raise unittest.SkipTest("Ucs parameters are not valid or UCSPE emulator is not ready, skipping all UCS tests")
@depends(after=[test_check_ucs_params])
def test_api_20_workflow_ucs_catalogs(self):
"""
Tests the UCS Poller workflow in rackHD
:return:
"""
data_payload = {
"name": "Graph.Ucs.Discovery",
"options": {
"defaults": {
"username": ucs_common.UCSM_USER,
"password": ucs_common.UCSM_PASS,
"ucs": ucs_common.UCSM_IP,
"uri": ucs_common.UCS_SERVICE_URI
},
"when-discover-physical-ucs": {
"discoverPhysicalServers": "true",
},
"when-discover-logical-ucs": {
"discoverLogicalServer": "false"
},
"when-catalog-ucs": {
"autoCatalogUcs": "false"
}
}
}
header = {"Content-Type": "application/json"}
api_data = fit_common.rackhdapi("/api/2.0/workflows", action="post",
headers=header, payload=data_payload)
self.assertEqual(api_data['status'], 201,
'Incorrect HTTP return code, expected 201, got:' + str(api_data['status']))
id = api_data["json"]["context"]["graphId"]
status = ucs_common.wait_utility(str(id), 0, "Ucs Discovery")
self.assertEqual(status, 'succeeded', 'Ucs Discovery graph returned status {}'.format(status))
self.get_ucs_node_list()
errNodes = ''
errGraphs = ''
for node in self.NODELIST:
postUrl = '/api/2.0/nodes/' + node + "/workflows?name=Graph.Ucs.Catalog"
header = {"Content-Type": "application/json"}
api_data = fit_common.rackhdapi(postUrl, headers=header, action="post", payload={})
if api_data['status'] != 201:
errNodes += 'POST for node {} returned {}, '.format(node, api_data['status'])
status = ucs_common.wait_utility(api_data["json"]["instanceId"], 0, "Catalog")
if status != 'succeeded':
errGraphs += 'graph id {} finished with status: {}, '.format(api_data["json"]["instanceId"], status)
logs.info_1("Posted URL: {0} with status: {1}".format(postUrl, api_data['status']))
self.assertEqual(len(errNodes), 0, errNodes)
self.assertEqual(len(errGraphs), 0, errGraphs)
@depends(after=[test_api_20_workflow_ucs_catalogs])
def test_api_20_get_catalogs(self):
msg = "Description: Check catalogs data per node."
logs.info_2("\t{0}".format(msg))
for node in self.NODELIST:
api_data = fit_common.rackhdapi("/api/2.0/nodes/" + node + "/catalogs")
self.assertEqual(api_data['status'],
200,
"Incorrect HTTP return code, expected 200, got:{0}"
.format(str(api_data['status'])))
self.CATALOGS[node] = api_data['json']
for item in api_data['json']:
for subitem in ['node', 'id', 'source', 'data']:
self.assertIn(subitem, item, subitem + ' field error')
@depends(after=[test_api_20_get_catalogs])
def test_api_20_verify_catalogs_source(self):
msg = "Description: Check source of catalogs created for node"
logs.info_2("\t{0}".format(msg))
for node in self.NODELIST:
sources = []
for item in self.CATALOGS[node]:
sources.append(item['source'])
logs.info_5("Node {0} contains source: {1}".format(node, sources))
self.assertIn("UCS", sources, node + " catalogs doesn't contain UCS source")
for node in self.RACK_NODELIST + self.BLADE_NODELIST:
sources = []
for item in self.CATALOGS[node]:
sources.append(item['source'])
self.assertIn("UCS:board", sources, node + " catalogs doesn't contain UCS:board source")
@depends(after=[test_api_20_get_catalogs])
def test_api_20_vefify_catalogs_source_data(self):
msg = "Description: Check source data of catalogs created for node"
logs.info_2("\t{0}".format(msg))
for node in self.NODELIST:
for item in self.CATALOGS[node]:
logs.info_2("Checking source:{0}".format(item['source']))
self.assertNotEqual(item, '', 'Empty JSON Field')
sourcedata = fit_common.rackhdapi("/api/2.0/nodes/" + node + "/catalogs/" + item['source'])
self.assertGreater(len(sourcedata['json']['id']), 0, 'id field error')
self.assertGreater(len(sourcedata['json']['node']), 0, 'node field error')
self.assertGreater(len(sourcedata['json']['source']), 0, 'source field error')
self.assertGreater(len(sourcedata['json']['updatedAt']), 0, 'updatedAt field error')
self.assertGreater(len(sourcedata['json']['createdAt']), 0, 'createdAt field error')
if __name__ == '__main__':
unittest.main()
|
import torch
import torch.nn as nn
from model.pointnetpp_segmodel_sea import PointNetPPInstSeg
from model.primitive_fitting_net_sea import PrimitiveFittingNet
# from model.pointnetpp_segmodel_cls_sea import InstSegNet
from model.pointnetpp_segmodel_cls_sea_v2 import InstSegNet
# from datasets.Indoor3DSeg_dataset import Indoor3DSemSeg
from datasets.instseg_dataset import InstSegmentationDataset
from datasets.ABC_dataset import ABCDataset
from datasets.ANSI_dataset import ANSIDataset
from torch.nn import functional as F
from tqdm import tqdm
import os
from torch.utils import data
from model.utils import iou
# import horovod.torch as hvd
import torch.multiprocessing as mp
# from filelock import FileLock
import time
import numpy as np
from model.utils import batched_index_select, calculate_acc
from .trainer_utils import MultiMultinomialDistribution
import logging
from model.loss_model_v5 import ComputingGraphLossModel
from .trainer_utils import get_masks_for_seg_labels, compute_param_loss, DistributionTreeNode, DistributionTreeNodeV2, DistributionTreeNodeArch
from datasets.partnet_dataset import PartNetInsSeg
from model.loss_utils import get_one_hot
from model.loss_utils import compute_embedding_loss
from model.loss_utils import compute_miou, npy
from model.abc_utils import compute_entropy, construction_affinity_matrix_normal
from model.utils import mean_shift
class TrainerInstSegmentation(nn.Module):
def __init__(self, dataset_root, num_points=512, batch_size=32, num_epochs=200, cuda=None, dataparallel=False,
use_sgd=False, weight_decay_sgd=5e-4,
resume="", dp_ratio=0.5, args=None):
super(TrainerInstSegmentation, self).__init__()
# n_layers: int, feat_dims: list, n_samples: list, n_class: int, in_feat_dim: int
self.num_epochs = num_epochs
if cuda is not None:
self.device = torch.device("cuda:" + str(cuda)) if torch.cuda.is_available() else torch.device("cpu")
else:
self.device = torch.device("cpu")
''' SET horovod configurations '''
# hvd.init()
torch.cuda.set_device(args.gpu)
torch.set_num_threads(5)
kwargs = {'num_workers': 5, 'pin_memory': True}
# When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent
# issues with Infiniband implementations that are not fork-safe
if (kwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context') and
mp._supports_context and 'forkserver' in mp.get_all_start_methods()):
kwargs['multiprocessing_context'] = 'forkserver'
self.kwargs = kwargs
''' SET horovod configurations '''
''' SET arguments '''
# todo: use the following configurations --- num_points = 10000; pred_nmasks = 100; not_clip = True; task = instseg_h
self.args = args
self.batch_size = int(self.args.batch_size)
self.num_epochs = int(self.args.epochs)
self.dp_ratio = float(self.args.dp_ratio)
self.landmark_type = args.landmark_type
self.init_lr = float(self.args.init_lr)
self.weight_decay = float(self.args.weight_decay)
self.lr_decay_ratio = float(self.args.lr_decay_ratio)
self.step_size = int(self.args.step_size)
self.n_points = int(self.args.num_points)
self.resume = self.args.resume
feat_dims = self.args.feat_dims.split(",")
self.feat_dims = [int(fd) for fd in feat_dims]
self.use_ansi = self.args.use_ansi
self.with_fitting_loss = self.args.with_fitting_loss
self.n_max_instances = self.args.n_max_instances
self.sea_interval = self.args.sea_interval
# number of added intermediate losses
self.nn_inter_loss = self.args.nn_inter_loss
n_samples = self.args.n_samples.split(",")
self.n_samples = [int(ns) for ns in n_samples]
self.map_feat_dim = int(self.args.map_feat_dim)
self.n_layers = int(self.args.n_layers)
assert self.n_layers == len(
self.n_samples), f"Expect the times of down-sampling equal to n_layers, got n_layers = {self.n_layers}, times of down-sampling = {len(self.n_samples)}."
assert self.n_layers == len(
self.feat_dims), f"Expect the number of feature dims equal to n_layers, got n_layers = {self.n_layers}, number of dims = {len(self.feat_dims)}."
''' SET arguments '''
''' GET model & loss selection parameters '''
conv_select_types = self.args.conv_select_types.split(",")
self.conv_select_types = [int(tt) for tt in conv_select_types]
self.conv_select_types = [0,0,1] # [0,0,0]
self.point_feat_selection = int(self.args.point_feat_selection)
self.point_geo_feat_selection = int(self.args.point_geo_feat_selection)
# print(self.args.contrast_selection)
contrast_selection = self.args.contrast_selection.split(",")
self.contrast_selection = [int(cs) for cs in contrast_selection]
self.no_spectral = self.args.no_spectral
''' SET working dirs '''
self.model_dir = "task_{}_debug_{}_npoints_{}_nn_inter_loss_{}_in_model_loss_model_{}_inst_part_seg_mixing_type_{}_init_lr_{}_bsz_{}_weight_decay_{}_more_bn_resume_{}".format(
self.args.task,
str(args.debug),
str(self.n_points),
str(self.nn_inter_loss),
str(args.in_model_loss_model),
self.landmark_type,
str(self.init_lr),
str(batch_size),
str(self.weight_decay),
str(True if len(resume) > 0 else False)
)
#
# with FileLock(os.path.expanduser("~/.horovod_lock")):
if not os.path.exists("./prm_cache"):
os.mkdir("./prm_cache")
if not os.path.exists(os.path.join("./prm_cache", self.model_dir)):
os.mkdir(os.path.join("./prm_cache", self.model_dir))
self.model_dir = "./prm_cache/" + self.model_dir
''' SET working dirs '''
''' SET working dir for loss model '''
self.loss_model_save_path = os.path.join(self.model_dir, "loss_model")
# with FileLock(os.path.expanduser("~/.horovod_lock")):
if os.path.exists(self.loss_model_save_path):
print(f"=== REMOVE the existing loss model file from {self.loss_model_save_path} ===")
import shutil
shutil.rmtree(self.loss_model_save_path)
if not os.path.exists(self.loss_model_save_path):
os.mkdir(self.loss_model_save_path)
self.args.loss_model_save_path = self.loss_model_save_path
''' SET working dir for loss model '''
''' GET model '''
self.args.lr_scaler = 1.0
self.model = InstSegNet(
n_layers=self.n_layers,
feat_dims=self.feat_dims,
n_samples=self.n_samples,
map_feat_dim=self.map_feat_dim,
args=self.args # args
)
self.model.cuda()
''' GET model '''
''' SET datasets & data-loaders & data-samplers '''
self.dataset_root = self.args.dataset_root
self.nmasks = int(self.args.pred_nmasks)
# DATASETS for train & val & test
self.train_dataset = self.args.train_dataset
self.val_dataset = self.args.val_dataset
self.test_dataset = self.args.test_dataset
# DATA-SPLITTING configurations
self.split_type = self.args.split_type
self.split_train_test = self.args.split_train_test
### GET test data-types for all categories in test setting ####
self.pure_test = self.args.pure_test
pure_test_types = self.args.pure_test_types.split(";")
self.pure_test_types = [str(tpt) for tpt in pure_test_types]
### GET test data-types for all categories in test setting ####
# TRAIN & VAL & TEST PartNet types
partnet_train_types = self.args.partnet_train_types.split(";")
self.partnet_train_types = [str(tpt) for tpt in partnet_train_types]
partnet_val_types = self.args.partnet_val_types.split(";")
self.partnet_val_types = [str(tpt) for tpt in partnet_val_types]
partnet_test_types = self.args.partnet_test_types.split(";")
self.partnet_test_types = [str(tpt) for tpt in partnet_test_types]
#
train_prim_types = self.args.train_prim_types.split(",")
self.train_prim_types = [int(tpt) for tpt in train_prim_types]
test_prim_types = self.args.test_prim_types.split(",")
self.test_prim_types = [int(tpt) for tpt in test_prim_types]
#### SET pure test setting ####
self.test_performance = self.args.test_performance
#### SET pure test setting ####
self.best_eval_acc = -999.0
train_partnet_shapes = ['Lamp', 'Chair', 'StorageFurniture']
val_partnet_shapes = ['Bowl', 'Bag', 'Bed', 'Bottle', 'Bowl', 'Clock', 'Dishwasher', 'Display', 'Door', 'Earphone', 'Faucet', 'Hat', 'Knife', 'Microwave', 'Mug', 'Refrigerator','Scissors', 'Laptop']
if self.test_performance:
train_partnet_shapes = train_partnet_shapes + val_partnet_shapes
val_partnet_shapes = train_partnet_shapes
# test_partnet_shapes = ['Vase']
test_partnet_shapes = ['Laptop']
self.partnet_train_set = PartNetInsSeg(
root_dir=self.dataset_root, split='train', normalize=True, transform=None, shape=train_partnet_shapes,
level=3, cache_mode=False
)
self.partnet_val_set = PartNetInsSeg(
root_dir=self.dataset_root, split='val' if self.test_performance else 'train',
normalize=True, transform=None, shape=val_partnet_shapes,
level=3, cache_mode=False
)
self.partnet_test_set = PartNetInsSeg(
root_dir=self.dataset_root, split='train', normalize=True, transform=None, shape=test_partnet_shapes,
level=3, cache_mode=False
)
print("Loaded...")
self.train_loader = data.DataLoader(
self.partnet_train_set, batch_size=self.batch_size,
# sampler=self.train_sampler,
shuffle=True,
**kwargs)
self.val_loader = data.DataLoader(
self.partnet_val_set, batch_size=self.batch_size,
# sampler=self.val_sampler,
shuffle=False,
**kwargs)
self.test_loader = data.DataLoader(
self.partnet_test_set, batch_size=self.batch_size,
# sampler=self.test_sampler,
shuffle=False,
**kwargs)
''' SET datasets & data-loaders & data-samplers '''
''' SET optimizers '''
lr_scaler = 1.0
self.lr_scaler = lr_scaler
self.optimizer = torch.optim.Adam(
self.model.parameters(),
lr=self.init_lr * lr_scaler,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=self.weight_decay)
''' SET optimizers '''
''' SET related sampling distributions '''
# number of grouping operations, binary operations and unary operations
self.nn_grp_opers = args.nn_grp_opers # 4 # bz x N x k x f... -> bz x N x f...: sum, mean, max, svd
self.nn_binary_opers = args.nn_binary_opers # 6 # add, minus, element-wise multiply, cross product, cartesian product, matrix-vector product
self.nn_unary_opers = args.nn_unary_opers # 7 # identity, square, 2, -, -2, inv, orth
self.nn_in_feats = args.nn_in_feats # 2
''' SET sampling tree '''
if self.args.v2_tree:
sampling_tree = DistributionTreeNodeV2
else:
sampling_tree = DistributionTreeNode
if not args.debug and not args.pure_test:
if not args.debug_arch:
self.sampling_tree_rt = sampling_tree(cur_depth=0, nn_grp_opers=self.nn_grp_opers, nn_binary_opers=self.nn_binary_opers, nn_unary_opers=self.nn_unary_opers, nn_in_feat=self.nn_in_feats, args=args)
self.arch_dist = DistributionTreeNodeArch(cur_depth=0, tot_layers=3, nn_conv_modules=4, device=None,
args=None)
''' SET related sampling distributions '''
self.in_model_loss_model = args.in_model_loss_model
if not self.args.in_model_loss_model:
''' SET loss model '''
self.loss_model = ComputingGraphLossModel(pos_dim=3, pca_feat_dim=64, in_feat_dim=64, pp_sim_k=16, r=0.03,
lr_scaler=lr_scaler, init_lr=self.init_lr,
weight_decay=self.weight_decay,
loss_model_save_path=self.loss_model_save_path,
in_rep_dim=self.map_feat_dim if not self.args.use_spfn else 128,
nn_inter_loss=self.nn_inter_loss,
args=args
)
# SET optimizer for loss model
cur_optimizer = torch.optim.Adam(
self.loss_model.parameters(),
lr=self.init_lr,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=self.weight_decay)
self.head_optimizer = cur_optimizer
''' SET loss model '''
def get_gt_conf(self, momask):
# bz x nmask x N?
#
if momask.size(1) > momask.size(2):
momask = momask.transpose(1, 2)
gt_conf = torch.sum(momask, 2)
# gt_conf = torch.where(gt_conf > 0, 1, 0).float()
gt_conf = torch.where(gt_conf > 0, torch.ones_like(gt_conf), torch.zeros_like(gt_conf)).float()
return momask, gt_conf
def get_nn_segmentations(self, batch_inst_seg):
bz = batch_inst_seg.size(0)
tot_nn_segs = 0
for i in range(bz):
cur_inst_seg = batch_inst_seg[i]
cur_seg_nn = int(torch.max(cur_inst_seg).item()) + 1
tot_nn_segs += cur_seg_nn
return tot_nn_segs
def _clustering_test(
self, epoch, desc="val",
conv_select_types=[0, 0, 0, 0, 0],
loss_selection_dict=[],
save_samples=False,
sample_interval=20,
inference_prim_types=[2,6],
cur_loader=None,
cur_test_type="Lamp"
):
save_stats_path = os.path.join(self.model_dir, "inference_saved")
# inference_prim_types = [6]
inference_prim_types_str = [str(ttt) for ttt in inference_prim_types]
test_bar = tqdm(cur_loader)
self.model.eval()
poses, gt_segs, pred_segs = [], [], []
normals = []
all_miou = []
feat_losses = []
normal_losses = []
type_losses = []
all_miou = []
loss_nn = []
all_recalls = []
with torch.no_grad():
for i_batch, batch_dicts in enumerate(test_bar):
batch_pos = batch_dicts['points']
batch_inst_seg = batch_dicts['ins_id']
cur_batch_nn_seg = self.get_nn_segmentations(batch_inst_seg)
# tot_seg_nns.append(cur_batch_nn_seg)
batch_pos = batch_pos.float().cuda()
batch_inst_seg = batch_inst_seg.long().cuda()
batch_momasks = get_one_hot(batch_inst_seg, 200)
bz, N = batch_pos.size(0), batch_pos.size(1)
feats = {}
rt_values = self.model(
pos=batch_pos, masks=batch_momasks, inst_seg=batch_inst_seg, feats=feats,
conv_select_types=conv_select_types,
loss_selection_dict=loss_selection_dict,
loss_model=self.loss_model if not self.in_model_loss_model else self.model.intermediate_loss if self.args.add_intermediat_loss else None
)
# # separate losses for each mid-level prediction losses
# losses = []
seg_pred, gt_l, pred_conf, losses, fps_idx = rt_values
feat_ent_weight = 1.70
edge_ent_weight = 1.23
edge_knn = 50
normal_sigma = 0.1
edge_topK = 12
bandwidth = 0.85
spec_embedding_list = []
weight_ent = []
# bz x N x k
totx = losses['x']
# x = x.detach().cpu()
for jj in range(totx.size(0)):
x = totx[jj].unsqueeze(0)
cur_batch_inst_seg = batch_inst_seg[jj].unsqueeze(0)
# pred_type = statistics['type_per_point']
# normal_per_point = statistics['normal_per_point']
feat_loss, pull_loss, push_loss, _, _ = compute_embedding_loss(x, cur_batch_inst_seg)
# normal_loss = compute_normal_loss(normal_per_point, batch_normals)
# type_loss = compute_nnl_loss(pred_type, batch_primitives)
feat_losses.append(feat_loss.detach().cpu().item() * 1)
# normal_losses.append(normal_loss.detach().cpu().item() * 1)
# type_losses.append(type_loss.detach().cpu().item() * 1)
loss_nn.append(1)
feat_ent = feat_ent_weight - float(npy(compute_entropy(x)))
weight_ent.append(feat_ent)
spec_embedding_list.append(x)
# normal_pred = statistics["normalpred"]
# affinity_matrix_normal = construction_affinity_matrix_normal(batch_pos, batch_normals,
# sigma=normal_sigma,
# knn=edge_knn)
# edge_topk = edge_topK
# e, v = torch.lobpcg(affinity_matrix_normal, k=edge_topk, niter=10)
# v = v / (torch.norm(v, dim=-1, keepdim=True) + 1e-16)
# edge_ent = edge_ent_weight - float(npy(compute_entropy(v)))
# weight_ent.append(edge_ent)
# spec_embedding_list.append(v)
weighted_list = []
# norm_weight_ent = weight_ent / np.linalg.norm(weight_ent)
for i in range(len(spec_embedding_list)):
weighted_list.append(spec_embedding_list[i] * weight_ent[i])
spectral_embedding = torch.cat(weighted_list, dim=-1)
spec_cluster_pred = mean_shift(spectral_embedding, bandwidth=bandwidth)
miou, cur_recall = compute_miou(spec_cluster_pred, cur_batch_inst_seg, return_recall=True)
all_miou.append(miou.detach().item())
all_recalls.append(cur_recall)
test_bar.set_description(
'Test_{} Epoch: [{}/{}] Iou:{:.3f} recall:{:.3f} feat_loss:{:.3f} '.format(
desc,
epoch + 1, 200, float(sum(all_miou) / len(all_miou)),
float(sum(all_recalls) / sum(loss_nn)),
float(sum(feat_losses) / sum(loss_nn)),
))
with open(os.path.join(self.model_dir, "test_logs.txt"), "a") as wf:
prim_strr = ",".join(inference_prim_types_str)
wf.write(f"{prim_strr}: " + 'Iou:{:.3f} recall:{:.3f} feat_loss:{:.3f}'.format(
float(sum(all_miou) / len(all_miou)),
float(sum(all_recalls) / sum(loss_nn)),
float(sum(feat_losses) / sum(loss_nn)),) + "\n")
wf.close()
return float(sum(all_miou) / len(all_miou)), float(sum(all_recalls) / sum(loss_nn))
def _test(
self, epoch, desc="val",
conv_select_types=[0, 0, 0],
loss_selection_dict=[],
cur_loader=None,
cur_test_type="Lamp"
):
conv_select_types = self.conv_select_types
self.model.eval()
if not self.in_model_loss_model:
self.loss_model.eval()
with torch.no_grad():
if cur_loader is None:
if desc == "val":
cur_loader = self.val_loader
elif desc == "test":
cur_loader = self.test_loader
else:
raise ValueError(f"Unrecognized desc: {desc}")
# cur_loader = self.test_loader
loss_list = []
loss_nn = []
test_bar = tqdm(cur_loader)
iouvalue = []
gt_loss = []
avg_recall = []
tot_seg_nns = []
tot_poses = []
tot_gt_segs = []
tot_pred_segs = []
for batch_data in test_bar:
batch_pos = batch_data['points']
batch_inst_seg = batch_data['ins_id']
cur_batch_nn_seg = self.get_nn_segmentations(batch_inst_seg)
tot_seg_nns.append(cur_batch_nn_seg)
batch_pos = batch_pos.float().cuda()
batch_inst_seg = batch_inst_seg.long().cuda()
batch_momasks = get_one_hot(batch_inst_seg, 200)
bz, N = batch_pos.size(0), batch_pos.size(1)
feats = {}
rt_values = self.model(
pos=batch_pos, masks=batch_momasks, inst_seg=batch_inst_seg, feats=feats,
conv_select_types=conv_select_types,
loss_selection_dict=loss_selection_dict,
loss_model=self.loss_model if not self.in_model_loss_model else self.model.intermediate_loss if self.args.add_intermediat_loss else None
)
# # separate losses for each mid-level prediction losses
# losses = []
seg_pred, gt_l, pred_conf, losses, fps_idx = rt_values
cur_pred_seg = torch.argmax(seg_pred, dim=1)
tot_poses.append(batch_pos)
tot_gt_segs.append(batch_inst_seg)
tot_pred_segs.append(cur_pred_seg)
if "iou" in losses:
iou_value = losses["iou"]
cur_avg_recall = 0.0
seg_loss = None
else:
# nsmp = 256
# momasks_sub = batch_momasks.contiguous().view(bz * N, -1)[fps_idx, :].view(bz, nsmp, -1)
# #### For eal iou calculation ####
momasks_sub = batch_momasks
# momasks_sub = get_one_hot(batch_inst_seg, 30)
# momasks_sub size = bz x nsmp x nmasks
batch_momasks, batch_conf = self.get_gt_conf(momasks_sub)
batch_momasks = batch_momasks
#
try:
iou_value, gt_conf, cur_avg_recall = iou(seg_pred, batch_momasks, batch_conf)
except:
print(seg_pred)
continue
# print(iouvalue)
iou_value = iou_value.mean()
seg_loss = None
avg_recall.append(cur_avg_recall * bz)
neg_iou_loss = -1.0 * iou_value
loss = gt_l + neg_iou_loss
if seg_loss is not None and self.args.with_conf_loss:
loss += seg_loss.mean()
loss_list += [loss.detach().cpu().item() * bz]
loss_nn.append(bz)
try:
iouvalue.append(-neg_iou_loss.detach().cpu().item() * bz)
except:
iouvalue.append(-neg_iou_loss * bz)
test_bar.set_description(
# 'Test_{} Epoch: [{}/{}] Loss:{:.3f} GT_L:{:.3f} Losses:{} Iou:{:.3f} AvgRecall:{:.3f} Recode Iou: {} '.format(
'Test_{} Epoch: [{}/{}] Loss:{:.3f} GT_L:{:.3f} Iou:{:.3f} AvgRecall:{:.3f} AvgSegNN:{:.3f}'.format(
desc,
epoch + 1, 200, float(sum(loss_list) / sum(loss_nn)),
float(sum(gt_loss)) / float(sum(loss_nn)),
# str(losses),
float(sum(iouvalue) / sum(loss_nn)),
float(sum(avg_recall) / sum(loss_nn)),
float(float(sum(tot_seg_nns)) / float(sum(loss_nn))),
# str(thr_to_recall)
))
import random
# if len(test_bar) > 100:
# tot_poses = random.shuffle(tot_poses)
tot_poses = torch.cat(tot_poses, dim=0)
tot_gt_segs = torch.cat(tot_gt_segs, dim=0)
tot_pred_segs = torch.cat(tot_pred_segs, dim=0)
if tot_poses.size(0) > 100:
# tmp_idx = range(tot_poses.size(0))
tmp_idx = [iii for iii in range(tot_poses.size(0))]
random.shuffle(tmp_idx)
selected_idx = torch.from_numpy(np.array(tmp_idx, dtype=np.long)).cuda()
tot_poses = tot_poses[selected_idx]
tot_gt_segs = tot_gt_segs[selected_idx]
tot_pred_segs = tot_pred_segs[selected_idx]
sv_dir = "inst_inference_saved"
if not os.path.exists(sv_dir):
os.mkdir(sv_dir)
np.save(os.path.join(sv_dir, f"{cur_test_type}_pos.npy"), tot_poses.detach().cpu().numpy())
np.save(os.path.join(sv_dir, f"{cur_test_type}_gt_seg.npy"), tot_gt_segs.detach().cpu().numpy())
np.save(os.path.join(sv_dir, f"{cur_test_type}_pred_seg.npy"), tot_pred_segs.detach().cpu().numpy())
avg_loss = float(sum(loss_list)) / float(sum(loss_nn))
avg_gt_loss = float(sum(gt_loss)) / float(sum(loss_nn))
avg_iou = float(sum(iouvalue)) / float(sum(loss_nn))
avg_recall = float(sum(avg_recall)) / float(sum(loss_nn))
avg_seg_nns = float(sum(tot_seg_nns)) / float(sum(loss_nn))
with open(os.path.join(self.model_dir, "logs.txt"), "a") as wf:
wf.write("Test_{} Epoch: {:d}, loss: {:.4f} GT_L: {:.4f} Iou: {:.3f} AvgRecall: {:.3f} AvgSegNN:{:.3f}".format(
desc,
epoch + 1,
avg_loss,
avg_gt_loss,
avg_iou,
avg_recall,
avg_seg_nns
) + "\n")
wf.close()
logging.info("Test_{} Epoch: {:d}, loss: {:.4f} GT_L: {:.4f} Iou: {:.3f} AvgRecall: {:.3f} AvgSegNN:{:.3f}".format(
desc,
epoch + 1,
avg_loss,
avg_gt_loss,
avg_iou,
avg_recall,
avg_seg_nns
))
return avg_iou, avg_recall
def pure_test_all(self):
#### LOAD model ####
if self.args.resume != "":
logging.info(f"Loading model from {self.args.resume}")
ori_dict = torch.load(os.path.join(self.args.resume, "REIN_best_saved_model.pth"), map_location='cpu')
part_dict = dict()
model_dict = self.model.state_dict()
for k in ori_dict:
if k in model_dict:
v = ori_dict[k]
part_dict[k] = v
model_dict.update(part_dict)
self.model.load_state_dict(model_dict)
self.model.add_intermediat_loss = False
#### LOAD model ####
#### SET model architecture & loss dicts ####
baseline_value = torch.tensor([2, 0, 0], dtype=torch.long)
test_type_to_mrecall = {}
test_type_to_miou = {}
#### START test ####
for i, test_type in enumerate(self.pure_test_types):
cur_test_set = PartNetInsSeg(
root_dir=self.dataset_root, split='test', normalize=True, transform=None, shape=[test_type],
level=3, cache_mode=False
)
cur_test_loader = data.DataLoader(
cur_test_set, batch_size=self.batch_size,
shuffle=False, **self.kwargs)
# Classification based
# test_iou, test_recall = self._test(
# 1, desc="test",
# conv_select_types=baseline_value.tolist(),
# loss_selection_dict=[],
# cur_loader=cur_test_loader,
# cur_test_type=test_type
# )
# Clustering based
test_iou, test_recall = self._clustering_test(1, conv_select_types=baseline_value.tolist(),
loss_selection_dict=[],
cur_loader=cur_test_loader,
cur_test_type=test_type)
test_type_to_mrecall[test_type] = test_recall
test_type_to_miou[test_type] = test_iou
logging.info(f"{i}-th test type ({test_type}), avg_recall = {test_recall}, avg_iou = {test_iou}")
#### START test ####
# if hvd.rank() == 0:
print(test_type_to_mrecall)
print(test_type_to_mrecall.values())
print(test_type_to_miou)
print(test_type_to_miou.values())
def train_all(self):
#### IF `pure_test` is set, then only perform test on `args.pure_test_types` ####
assert self.args.resume != ""
self.pure_test_all()
|
#!/usr/bin/python3
from termcolor import colored
from tabulate import tabulate
from threading import Thread
import time
import argparse
import sys
import errno
import os
import shutil
import subprocess
import signal
# Output directory for the downloaded files. Cleaned afterwards
# Be extra careful when changing that value !
OUTPUT_DIR = "/tmp/suckit_bench"
# Set the running time for both benchmarks
RUN_TIME = 120
# Keep track of the current PID to SIGINT it
CUR_PID = 0
# Path to the suckit binary
SUCKIT_CMD = "suckit"
# URL to start scraping from
URL = "http://books.toscrape.com"
def print_info():
info = """
This benchmark aims to bench suckit against other, popular website
downloaders such as httrack
"""
time_str = """
Each program will run for {} seconds
""".format(RUN_TIME)
print(f"{colored(info, 'blue')}")
print(time_str)
def bench_worker(dir_name, cmd):
global CUR_PID
# Handle the case where the directory exists already
try:
os.mkdir(dir_name)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
os.chdir(dir_name)
CUR_PID = subprocess.Popen([cmd, URL],
stdout = open("/dev/null", "w"), shell = False).pid
def bench(dir_name, cmd):
thread = Thread(target = bench_worker, args = (dir_name, cmd, ))
thread.start()
# Let the benched program run for a certain amount of time
time.sleep(RUN_TIME)
thread.join()
os.kill(CUR_PID, signal.SIGINT)
# Count the number of files it downloaded
count = sum([len(files) for r, d, files in os.walk(".")])
# Go back to /tmp
os.chdir(OUTPUT_DIR)
return count
def flush_output(res):
print(tabulate(res, headers = ["name", "pages downloaded"]))
def main():
global OUTPUT_DIR
global RUN_TIME
global SUCKIT_CMD
global URL
parser = argparse.ArgumentParser(description = "SuckIT benchmark")
parser.add_argument("-o", "--output", action = "store", type = str, help = f"benchmark output directory (default_value = '{OUTPUT_DIR}')")
parser.add_argument("-t", "--time", action = "store", type = int, help = f"time given to each binary in seconds (default_value = RUN_TIME)")
parser.add_argument("-s", "--suckit", action = "store", type = str, help = f"path to the suckit binary (default_value = '{SUCKIT_CMD}')")
parser.add_argument("-u", "--url", action = "store", type = str, help = f"url to start scraping from (default_value = {URL})")
args = parser.parse_args()
if args.output:
OUTPUT_DIR = args.output
if args.time:
RUN_TIME = args.time
if args.suckit:
SUCKIT_CMD = os.path.abspath(args.suckit)
if args.url:
URL = os.path.abspath(args.url)
print_info()
# Handle the case where the directory exists already
try:
os.mkdir(OUTPUT_DIR)
except OSError as exc:
err ="""
You're trying to use an already existing directory as your
output directory. Since the directory will be counted and
removed after the benchmark, I can't let you do that !
"""
print(f"{colored(err, 'red')}")
raise
os.chdir(OUTPUT_DIR)
results = []
results.append(["suckit", bench("suckit", SUCKIT_CMD)])
results.append(["httrack", bench("httrack", "httrack")])
flush_output(results)
# Clean benchmark output
shutil.rmtree(OUTPUT_DIR)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from pithy.xml import *
from utest import *
html = Xml(tag='html', lang='en-us')
head = html.append(Xml(tag='head', ch=[Xml(tag='title', ch=['TITLE'])]))
body = html.append(Xml(tag='body'))
div0 = body.append(Xml(tag='div', cl='DIV-CLASS', id='DIV-ID-0'))
p0 = div0.append(Xml(tag='p', ch=['Paragraph #0 text.\n\npost-newlines.\n']))
div1 = body.append(Xml(tag='div', cl='DIV-CLASS', id='DIV-ID-1'))
p1 = div1.append(Xml(tag='p', ch=['Paragraph #1 text. post double space.\n']))
utest('en-us', html.get, 'lang')
utest(head, html.pick, 'head')
utest(div0, body.pick_first, 'div')
utest(div0, html.find_first, 'div')
utest_seq([div0, div1], body.pick_all, 'div')
utest_seq([div0, div1], html.find_all, 'div')
utest_seq([div0, div1], body.pick_all, cl='DIV-CLASS')
utest_seq([div0, div1], html.find_all, cl='DIV-CLASS')
utest(div1, body.pick_first, id='DIV-ID-1')
utest(div1, html.find_first, id='DIV-ID-1')
utest(p1, html.find, text='#1 text')
utest_exc(NoMatchError, html.pick, 'nonexistent')
utest_exc(NoMatchError, html.find, 'nonexistent')
utest_exc(MultipleMatchesError, body.pick, 'div')
utest_exc(MultipleMatchesError, html.find, 'div')
utest_exc(MultipleMatchesError, body.pick, cl='DIV-CLASS')
utest_exc(MultipleMatchesError, html.find, cl='DIV-CLASS')
sd0 = body.pick(id='DIV-ID-0', traversable=True)
sd1 = html.find(id='DIV-ID-1', traversable=True)
utest_val(div0, sd0.orig, 'sd0.orig')
utest_val(div1, sd1.orig, 'sd1.orig')
utest(div1, sd0.next)
utest(div0, sd1.prev)
|
#!/usr/bin/env python3
# -*- mode: python ; coding: utf-8 -*-
"""Tracks and analyses writing progress"""
import argparse
import datetime
import os.path
import sys
import uuid
from pathlib import Path
from tabulate import tabulate
from src.trak import core_methods
from src.trak.project import Project
from src.trak.session import Session
#from project import Project
#from session import Session
projects_path = Path(__file__).parent / "data/projects.dat"
trackdata_path = Path(__file__).parent / "data/trackdata.dat"
sessions_path = Path(__file__).parent / "data/sessions.dat"
ignore_path = Path(__file__).parent / "data/ignore.dat"
tmp_path = Path(__file__).parent / "data/tmp.dat"
def checks(trackstatus):
tracking = trackstatus
# The tmp.dat file is used to determine if tracking is on or off
# If it exists, tracking is set to 'off'; otherwise it is 'on'.
if not os.path.exists(tmp_path):
tmp = open(tmp_path, 'w+')
tracking = True
uid = uuid.uuid1()
new_session = Session(uid)
tmp.close()
# Test to make sure there's something to track
number_of_projects = len(open(projects_path).readlines())
if number_of_projects == 0:
if os.path.exists(tmp_path):
os.remove(tmp_path)
print("Nothing to track! Use the -n option to track a project.")
sys.exit()
# If TRACKING == True, record initial data to a temporary file;
# otherwise, record the tracking session to tracking.dat
if tracking:
# Write session info and start time at the top of tmp.dat
new_session.timestamp_tmp()
# Store tracked files & initial word counts in tmp.dat
new_session.write_tmp()
# Display a message indicating that tracking has begun
print(new_session.start_msg())
else:
# Retrieve the session ID from the top of tmp.dat
with open(tmp_path, 'r') as tmp:
dat = tmp.readline()
session_ID = dat.strip().split(', ')[0]
# Create a new session object
old_session = Session(session_ID)
# Write session data to trackdata.dat
core_methods.write_trackdata()
# Write the session details to file
old_session.write_session()
# Display a closing message
print(old_session.end_msg)
# TRACKING is False at the beginning of the program by default.
# It's only made True when tmp.dat is created and the user wants to
# track something. So, when tracking is False, it means that
# tmp.dat already exists, which only happens when the user has run
# 'trak' already. It was never made True at the initiation of this
# instance of the program. If so, delete tmp.dat. The data have already
# been recorded.
if not tracking:
os.remove(tmp_path)
# return tracking
def main():
# Create files where data will be stored if they don't exist already
if not os.path.exists(projects_path):
projects = open(projects_path, 'a')
if not os.path.exists(trackdata_path):
trackdata = open(trackdata_path, 'a')
if not os.path.exists(sessions_path):
sessions = open(sessions_path, 'a')
if not os.path.exists(ignore_path):
ignore = open(ignore_path, 'a')
# Parse program options
parser = argparse.ArgumentParser(description="Track and analyse writing progress")
parser.add_argument("-d", "--delete",
help="remove file or project from further tracking; remove ignored file to resume tracking")
parser.add_argument("-e", "--export",
help="export data to stdout")
parser.add_argument("-l", "--list",
help="list ignores, files, sessions, or projects")
parser.add_argument("-n", "--new",
help="track a new writing project",
action="store_true")
parser.add_argument("-r", "--record",
help="display all-time tracking records",
action="store_true")
parser.add_argument("-s", "--summary",
help="display summary of selected session",
action="store_true")
parser.add_argument("-x", "--expunge",
help="delete all data; restore factory defaults",
action='store_true')
parser.add_argument("-z", "--status",
help="determine whether tracking is on or off",
action='store_true')
args = parser.parse_args()
#####################################################
# Process user options ##
#####################################################
if args.expunge:
MSG = "This option will remove all data and return to factory defaults." + \
"\n" + "WARNING: This cannot be undone.\n"
print(MSG)
response = input('Are you sure you want to proceed with reset (y/n): ')
if response == 'y':
if os.path.exists(tmp_path):
os.remove(tmp_path)
os.remove(ignore_path)
os.remove(projects_path)
os.remove(sessions_path)
os.remove(trackdata_path)
print('Tracking has been restored to factory defaults')
else:
print('Action cancelled by user. Nothing removed.')
sys.exit()
if args.export:
if args.export == 'tracks':
with open(trackdata_path, 'r') as f:
t_data = f.readlines()
print('track_id, project_name, file_name, start_wc, end_wc')
for line in t_data:
print(line.strip())
elif args.export == 'files':
with open(projects_path, 'r') as f:
p_data = f.readlines()
for line in p_data:
(p_name, p_path) = line.strip().split(', ')
project = Project(p_name, p_path)
p_list = project.filepath_list()
for file in p_list:
print(file)
elif args.export == 'projects':
print('project_name, project_path')
with open(projects_path, 'r') as f:
p_data = f.readlines()
for line in p_data:
print(line.strip())
elif args.export == 'sessions':
print('session_id, start_time, end_time')
with open(sessions_path, 'r') as f:
s_data = f.readlines()
for session in s_data:
print(session.strip())
else:
print("Invalid entry. Valid entries include: tracks, files, projects, or sessions")
sys.exit()
sys.exit()
if args.record:
# Test to make sure there are data for which records can be estblished
number_of_tracks = len(open(trackdata_path).readlines())
if number_of_tracks == 0:
if os.path.exists(tmp_path):
os.remove(tmp_path)
print("No tracks have been recorded; no records can be displayed.")
sys.exit()
r_list = [['Total number of words tracked', core_methods.n_words()],
['Total number of sessions tracked', core_methods.n_sessions()],
['Total number of modified files', core_methods.n_files()]]
l_session = Session(core_methods.longest_session())
duration = l_session.total_duration()
duration = duration - datetime.timedelta(microseconds=duration.microseconds)
session_time = l_session.start_time()
session_date = session_time.date()
r_list.append(['Longest session', str(session_date) + " " + str(duration)])
w_session = Session(core_methods.most_words())
max_wc = w_session.total_wc()
s_start = w_session.start_time()
s_date = s_start.date()
r_list.append(['Most words written in a session', str(s_date) + ' ' + str(max_wc) + ' words'])
r_list.append(['Longest writing streak', core_methods.longest_streak()])
r_list.append(['Current streak', '{} days'.format(core_methods.current_streak())])
table = tabulate(r_list)
print(table)
sys.exit()
if args.status:
MSG = ''
if os.path.exists(tmp_path):
with open(tmp_path, 'r') as tmp:
tmp_data = tmp.readline()
s_id = tmp_data.strip().split(', ')[0]
s_start = tmp_data.strip().split(', ')[1]
MSG = 'Tracking is ON\n' + \
'This tracking session began at ' + s_start + '\n' + \
'Current writing streak is ' + str(core_methods.current_streak()) + ' days'
else:
MSG = 'Tracking is OFF\n' + \
'Current writing streak is ' + str(core_methods.current_streak()) + ' days'
print(MSG)
sys.exit()
if args.summary:
# Display a selection list of available sessions
sessions_list = []
with open(sessions_path, 'r') as s:
if os.stat(sessions_path).st_size == 0:
print('No writing sessions recorded. Process halted.')
sys.exit()
sessions_data = s.readlines()
for i, line in enumerate(sessions_data):
s_time = line.strip().split(', ')[1]
e_time = line.strip().split(', ')[2]
print('[' + str(i) + '] ' + s_time + ' to ' + e_time)
sessions_list.append(line)
# Get input from user
try:
selection = int(input('Select session to summarise [0-' +
str(len(sessions_list) - 1) + ']: '))
except ValueError:
print('Invalid input. Integer expected.')
sys.exit()
while len(sessions_list) > 1 and selection not in range(len(sessions_list)):
try:
selection = int(input('Invalid input. Please select valid session [0-' +
str(len(sessions_list) - 1) + ']: '))
except ValueError:
print('Invalid input. Integer expected')
sys.exit()
session_ID = sessions_list[selection].strip().split(', ')[0]
session = Session(session_ID)
print(session.summary())
print('Modified Files')
print(session.modified_files())
sys.exit()
if args.delete:
if args.delete == 'file':
# Get input from the user, and check it
proj_name = input('Project in which file resides: ')
project = Project(proj_name, )
while not project.is_name_taken():
print('No project is named {}: please try again'.format(proj_name))
proj_name = input('Project in which file resides: ')
if proj_name == "":
print('File deletion halted by user. Nothing deleted.')
sys.exit()
project = Project(proj_name, )
proj_path = project.get_path()
# If proj_path is already a file, just add it to ignore.dat
path_exp = os.path.expanduser(proj_path)
if os.path.isfile(path_exp):
with open('ignored_files.dat', 'a') as ignore:
ignore.write(proj_name + ', ' + str(proj_path) + '\n')
print('The file {} was successfully ignored'.format(proj_path))
sys.exit()
# Present the user with a numbered list of candidate files to delete
# from this project.
project = Project(proj_name, proj_path)
f_names = project.filepath_list()
print('Files belonging to project {}'.format(proj_name))
for i, file in enumerate(f_names):
print('[' + str(i) + '] ' + file)
try:
f_select = int(input('Choose a file to ignore [0-' + str(len(f_names) - 1) + ']: '))
except ValueError:
print('Invalid selection. Expected an integer.')
sys.exit()
# Write the path to the ignore file, and it will no longer be tracked.
path_select = f_names[f_select]
with open(ignore_path, 'a') as ignored:
ignored.write(proj_name + ', ' + path_select + '\n')
print('The file {} will no longer be tracked'.format(path_select))
elif args.delete == 'ignore':
if os.stat(ignore_path) == 0:
print('No files are currently being ignored. Nothing to display.')
sys.exit()
i_list = []
max_i = 0
with open(ignore_path, 'r') as iggy:
i_data = iggy.readlines()
for i, line in enumerate(i_data):
(project_name, file_path) = line.strip().split(', ')
i_list.append([project_name, file_path])
print('[' + str(i) + ']', project_name, file_path)
if len(i_list) > 0:
max_i = len(i_list) - 1
else:
print('No files are currently being ignored. Action halted.')
sys.exit()
# Get input from user
try:
selection = int(input('Select a file to stop ignoring [0-' + str(max_i) + ']: '))
except ValueError:
print('Invalid input. Integer expected.')
sys.exit()
while len(i_list) > 1 and selection not in range(len(i_list)):
try:
selection = int(
input('Invalid input. Please select valid ignore file [0-' + str(max_i) + ']: '))
except ValueError:
print('Invalid input. Integer expected')
sys.exit()
sel_proj = i_list[selection][0]
sel_file = i_list[selection][1]
with open(ignore_path, 'w') as iggy:
for item in i_list:
project_name = item[0]
file_path = item[1]
if not (project_name == sel_proj and file_path == sel_file):
iggy.write(project_name + ', ' + file_path + '\n')
print('The file {} in project {} will now be tracked'.format(sel_file, sel_proj))
elif args.delete == 'project':
proj_name = input('Project name to stop tracking: ')
with open(projects_path, 'r') as proj:
lines = proj.readlines()
project = Project(proj_name, )
while not project.is_name_taken():
print('No project is named {}: please try again'.format(proj_name))
proj_name = input('Project name to stop tracking: ')
if proj_name == "":
print('File deletion halted by user. Nothing deleted.')
sys.exit()
with open(projects_path, 'w') as proj:
for line in lines:
(p_name, p_path) = line.strip('\n').split(', ')
if p_name != proj_name:
proj.write(p_name + ', ' + p_path + '\n')
print('Project {} will no longer be tracked.'.format(proj_name))
else:
print("Acceptable arguments are: ignore, file, or project. Nothing removed")
sys.exit()
if args.list:
if args.list == 'files':
FILES = 0
if os.stat(projects_path).st_size == 0:
print("Nothing is currently being tracked. No files to display.")
else:
with open(projects_path, 'r') as proj:
p_data = proj.readlines()
for line in p_data:
(p_name, p_path) = line.strip().split(', ')
this_project = Project(p_name, p_path)
print('Project {}:\n'.format(p_name))
f_list = this_project.filepath_list()
FILES += len(f_list)
for file in f_list:
print(file)
print('Total: {} files tracked'.format(FILES))
elif args.list == 'ignores':
i_list = []
if os.stat(ignore_path).st_size == 0:
print("No files are currently being ignored. Nothing to display.")
sys.exit()
else:
with open(ignore_path) as iggy:
i_data = iggy.readlines()
for line in i_data:
i_list.append(line.strip().split(', '))
table = tabulate(i_list, headers=['Project', 'Ignored Files'])
print(table)
elif args.list == 'sessions':
s_list = []
if os.stat(sessions_path).st_size == 0:
print("No sessions have been recorded. Nothing to display.")
sys.exit()
else:
with open(sessions_path, 'r') as ses:
s_data = ses.readlines()
for line in s_data:
s_list.append(line.strip().split(', '))
table = tabulate(s_list, headers=['Session ID', 'Start time', 'End time'])
print(table)
elif args.list == 'projects':
p_list = []
if os.stat(projects_path).st_size == 0:
print("No projects are currently being tracked. Nothing to display")
else:
with open(projects_path, 'r') as proj:
p_data = proj.readlines()
for line in p_data:
p_list.append(line.strip().split(', '))
table = tabulate(p_list, headers=['Project name', 'Project path'])
print(table)
else:
print("Acceptable arguments are: ignores, files, projects, sessions.")
sys.exit()
elif args.new:
# User wants to add a new project
project_name = input("Name of new project: ")
with open(projects_path, 'r') as proj:
p_data = proj.readlines()
for line in p_data:
this_line = line.strip().split(', ')
if this_line[0] == project_name:
project_name = input("Name of new project: ")
if project_name == "":
print("New project tracking halted by user. Nothing tracked.")
sys.exit()
# User inputs the path of the new project
project_path = input("Path of new project: ")
# Make sure that the given path actually exists
while not os.path.exists(os.path.expanduser(project_path)):
print('\nPath does not exist. Please try again.')
project_path = input("Path of new project: ")
if project_path == "":
print("Tracking halted by user. Nothing tracked.")
sys.exit()
# Make sure that the path that was provided isn't already being tracked.
with open(projects_path, 'r') as f:
for line in f.readlines():
(line_proj, line_path) = line.strip('\n').split(', ')
if project_path == line_path:
print("\nThe path {} is already being tracked as {}".format(line_path, line_proj))
print("Nothing was added to the projects catalogue.")
sys.exit()
# Once all the checks have passed, create a new project
new_project = Project(project_name, project_path)
# Display a message once project's been written to file
print(new_project.write_project())
# Exit once the new project has been recorded
sys.exit()
checks(False)
if __name__ == '__main__':
sys.exit(main())
|
#!/usr/bin/env python
# coding=utf-8
"""
Ant Group
Copyright (c) 2004-2021 All Rights Reserved.
------------------------------------------------------
File Name : homo_enc.py
Author : Qizhi Zhang
Email: qizhi.zqz@antgroup.com
Create Time : 2021/9/29 下午3:19
Description : description what the main function of this file
"""
from stensorflow.global_var import StfConfig
import tensorflow as tf
def homo_init(sess=None):
init_coll = tf.compat.v1.get_collection(StfConfig.coll_name_vars_homo)
init_homo_op = tf.compat.v1.initialize_variables(init_coll)
if sess is not None:
sess.run(init_homo_op)
else:
return init_homo_op
def gene_key():
homo_module = StfConfig.homo_module
gene_key_zero = tf.Variable(initial_value=[0], trainable=False)
tf.compat.v1.add_to_collection(StfConfig.coll_name_vars_homo, gene_key_zero)
sk, pk, gk = homo_module.gen_key(gene_key_zero)
return sk, pk, gk
def enc(pk, x):
homo_module = StfConfig.homo_module
return homo_module.enc(pk, x)
def mat_mul_vec_to_share(pk, gk, mat, cipher_vec):
homo_module = StfConfig.homo_module
return homo_module.mat_mul_vec_to_share(pk, gk, mat, cipher_vec)
def dec(sk, size, cipher_vec):
homo_module = StfConfig.homo_module
z = homo_module.dec(sk, size, cipher_vec)
return tf.reshape(z, size)
def vec_mul_vec(pk, plain_vec, cipher_vec):
homo_module = StfConfig.homo_module
return homo_module.vec_mul_vec(pk, plain_vec, cipher_vec)
def cipher_to_share(size, pk, cipher):
homo_module = StfConfig.homo_module
cipher_out, share_out = homo_module.cipher_to_share(size, pk, cipher)
share_out = tf.reshape(share_out, [size])
return cipher_out, share_out
def vec_mul_vec_to_share(pk, plain_vec, cipher_vec):
cipher_out = vec_mul_vec(pk, plain_vec, cipher_vec)
size = tf.cast(tf.shape(plain_vec)[0], "int64")
z = cipher_to_share(size, pk, cipher_out)
return z
|
# 약수의 개수와 덧셈
def solution(left, right):
answer=0
for num in range(left,right+1):
cnt = 0
for i in range(1,num+1):
if num%i == 0:
cnt += 1
if cnt%2 == 0:
answer += num
else:
answer -= num
return answer
'''
테스트 1 〉 통과 (19.56ms, 10.3MB)
테스트 2 〉 통과 (5.09ms, 10.1MB)
테스트 3 〉 통과 (4.68ms, 10.1MB)
테스트 4 〉 통과 (2.26ms, 10.2MB)
테스트 5 〉 통과 (17.77ms, 10.2MB)
테스트 6 〉 통과 (1.52ms, 10.3MB)
테스트 7 〉 통과 (0.66ms, 10.1MB)
''' |
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# This file is part of Ansible Tower, but depends on code imported from Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
# Python
import codecs
import contextlib
import json
import os
import stat
import sys
import uuid
from copy import copy
# Ansible
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.plugins.callback.default import CallbackModule as DefaultCallbackModule
# AWX Display Callback
from .events import event_context
from .minimal import CallbackModule as MinimalCallbackModule
CENSORED = "the output has been hidden due to the fact that 'no_log: true' was specified for this result" # noqa
class BaseCallbackModule(CallbackBase):
'''
Callback module for logging ansible/ansible-playbook events.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
# These events should never have an associated play.
EVENTS_WITHOUT_PLAY = [
'playbook_on_start',
'playbook_on_stats',
]
# These events should never have an associated task.
EVENTS_WITHOUT_TASK = EVENTS_WITHOUT_PLAY + [
'playbook_on_setup',
'playbook_on_notify',
'playbook_on_import_for_host',
'playbook_on_not_import_for_host',
'playbook_on_no_hosts_matched',
'playbook_on_no_hosts_remaining',
]
def __init__(self):
super(BaseCallbackModule, self).__init__()
self.task_uuids = set()
@contextlib.contextmanager
def capture_event_data(self, event, **event_data):
event_data.setdefault('uuid', str(uuid.uuid4()))
if event not in self.EVENTS_WITHOUT_TASK:
task = event_data.pop('task', None)
else:
task = None
if event_data.get('res'):
if event_data['res'].get('_ansible_no_log', False):
event_data['res'] = {'censored': CENSORED}
if event_data['res'].get('results', []):
event_data['res']['results'] = copy(event_data['res']['results'])
for i, item in enumerate(event_data['res'].get('results', [])):
if isinstance(item, dict) and item.get('_ansible_no_log', False):
event_data['res']['results'][i] = {'censored': CENSORED}
with event_context.display_lock:
try:
event_context.add_local(event=event, **event_data)
if task:
self.set_task(task, local=True)
event_context.dump_begin(sys.stdout)
yield
finally:
event_context.dump_end(sys.stdout)
if task:
self.clear_task(local=True)
event_context.remove_local(event=None, **event_data)
def set_playbook(self, playbook):
# NOTE: Ansible doesn't generate a UUID for playbook_on_start so do it for them.
self.playbook_uuid = str(uuid.uuid4())
file_name = getattr(playbook, '_file_name', '???')
event_context.add_global(playbook=file_name, playbook_uuid=self.playbook_uuid)
self.clear_play()
def set_play(self, play):
if hasattr(play, 'hosts'):
if isinstance(play.hosts, list):
pattern = ','.join(play.hosts)
else:
pattern = play.hosts
else:
pattern = ''
name = play.get_name().strip() or pattern
event_context.add_global(play=name, play_uuid=str(play._uuid), play_pattern=pattern)
self.clear_task()
def clear_play(self):
event_context.remove_global(play=None, play_uuid=None, play_pattern=None)
self.clear_task()
def set_task(self, task, local=False):
# FIXME: Task is "global" unless using free strategy!
task_ctx = dict(
task=(task.name or task.action),
task_uuid=str(task._uuid),
task_action=task.action,
task_args='',
)
try:
task_ctx['task_path'] = task.get_path()
except AttributeError:
pass
if C.DISPLAY_ARGS_TO_STDOUT:
if task.no_log:
task_ctx['task_args'] = "the output has been hidden due to the fact that 'no_log: true' was specified for this result"
else:
task_args = ', '.join(('%s=%s' % a for a in task.args.items()))
task_ctx['task_args'] = task_args
if getattr(task, '_role', None):
task_role = task._role._role_name
else:
task_role = getattr(task, 'role_name', '')
if task_role:
task_ctx['role'] = task_role
if local:
event_context.add_local(**task_ctx)
else:
event_context.add_global(**task_ctx)
def clear_task(self, local=False):
task_ctx = dict(task=None, task_path=None, task_uuid=None, task_action=None, task_args=None, role=None)
if local:
event_context.remove_local(**task_ctx)
else:
event_context.remove_global(**task_ctx)
def v2_playbook_on_start(self, playbook):
self.set_playbook(playbook)
event_data = dict(
uuid=self.playbook_uuid,
)
with self.capture_event_data('playbook_on_start', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_start(playbook)
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None,
salt=None, default=None):
event_data = dict(
varname=varname,
private=private,
prompt=prompt,
encrypt=encrypt,
confirm=confirm,
salt_size=salt_size,
salt=salt,
default=default,
)
with self.capture_event_data('playbook_on_vars_prompt', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_vars_prompt(
varname, private, prompt, encrypt, confirm, salt_size, salt,
default,
)
def v2_playbook_on_include(self, included_file):
event_data = dict(
included_file=included_file._filename if included_file is not None else None,
)
with self.capture_event_data('playbook_on_include', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_include(included_file)
def v2_playbook_on_play_start(self, play):
self.set_play(play)
if hasattr(play, 'hosts'):
if isinstance(play.hosts, list):
pattern = ','.join(play.hosts)
else:
pattern = play.hosts
else:
pattern = ''
name = play.get_name().strip() or pattern
event_data = dict(
name=name,
pattern=pattern,
uuid=str(play._uuid),
)
with self.capture_event_data('playbook_on_play_start', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_play_start(play)
def v2_playbook_on_import_for_host(self, result, imported_file):
# NOTE: Not used by Ansible 2.x.
with self.capture_event_data('playbook_on_import_for_host'):
super(BaseCallbackModule, self).v2_playbook_on_import_for_host(result, imported_file)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
# NOTE: Not used by Ansible 2.x.
with self.capture_event_data('playbook_on_not_import_for_host'):
super(BaseCallbackModule, self).v2_playbook_on_not_import_for_host(result, missing_file)
def v2_playbook_on_setup(self):
# NOTE: Not used by Ansible 2.x.
with self.capture_event_data('playbook_on_setup'):
super(BaseCallbackModule, self).v2_playbook_on_setup()
def v2_playbook_on_task_start(self, task, is_conditional):
# FIXME: Flag task path output as vv.
task_uuid = str(task._uuid)
if task_uuid in self.task_uuids:
# FIXME: When this task UUID repeats, it means the play is using the
# free strategy, so different hosts may be running different tasks
# within a play.
return
self.task_uuids.add(task_uuid)
self.set_task(task)
event_data = dict(
task=task,
name=task.get_name(),
is_conditional=is_conditional,
uuid=task_uuid,
)
with self.capture_event_data('playbook_on_task_start', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
def v2_playbook_on_cleanup_task_start(self, task):
# NOTE: Not used by Ansible 2.x.
self.set_task(task)
event_data = dict(
task=task,
name=task.get_name(),
uuid=str(task._uuid),
is_conditional=True,
)
with self.capture_event_data('playbook_on_task_start', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_cleanup_task_start(task)
def v2_playbook_on_handler_task_start(self, task):
# NOTE: Re-using playbook_on_task_start event for this v2-specific
# event, but setting is_conditional=True, which is how v1 identified a
# task run as a handler.
self.set_task(task)
event_data = dict(
task=task,
name=task.get_name(),
uuid=str(task._uuid),
is_conditional=True,
)
with self.capture_event_data('playbook_on_task_start', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_handler_task_start(task)
def v2_playbook_on_no_hosts_matched(self):
with self.capture_event_data('playbook_on_no_hosts_matched'):
super(BaseCallbackModule, self).v2_playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
with self.capture_event_data('playbook_on_no_hosts_remaining'):
super(BaseCallbackModule, self).v2_playbook_on_no_hosts_remaining()
def v2_playbook_on_notify(self, handler, host):
# NOTE: Not used by Ansible < 2.5.
event_data = dict(
host=host.get_name(),
handler=handler.get_name(),
)
with self.capture_event_data('playbook_on_notify', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_notify(handler, host)
'''
ansible_stats is, retoractively, added in 2.2
'''
def v2_playbook_on_stats(self, stats):
self.clear_play()
# FIXME: Add count of plays/tasks.
event_data = dict(
changed=stats.changed,
dark=stats.dark,
failures=stats.failures,
ok=stats.ok,
processed=stats.processed,
skipped=stats.skipped
)
# write custom set_stat artifact data to the local disk so that it can
# be persisted by awx after the process exits
custom_artifact_data = stats.custom.get('_run', {}) if hasattr(stats, 'custom') else {}
if custom_artifact_data:
# create the directory for custom stats artifacts to live in (if it doesn't exist)
custom_artifacts_dir = os.path.join(os.getenv('AWX_PRIVATE_DATA_DIR'), 'artifacts')
os.makedirs(custom_artifacts_dir, mode=stat.S_IXUSR + stat.S_IWUSR + stat.S_IRUSR)
custom_artifacts_path = os.path.join(custom_artifacts_dir, 'custom')
with codecs.open(custom_artifacts_path, 'w', encoding='utf-8') as f:
os.chmod(custom_artifacts_path, stat.S_IRUSR | stat.S_IWUSR)
json.dump(custom_artifact_data, f)
with self.capture_event_data('playbook_on_stats', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_stats(stats)
@staticmethod
def _get_event_loop(task):
if hasattr(task, 'loop_with'): # Ansible >=2.5
return task.loop_with
elif hasattr(task, 'loop'): # Ansible <2.4
return task.loop
return None
def v2_runner_on_ok(self, result):
# FIXME: Display detailed results or not based on verbosity.
# strip environment vars from the job event; it already exists on the
# job and sensitive values are filtered there
if result._task.action in ('setup', 'gather_facts'):
result._result.get('ansible_facts', {}).pop('ansible_env', None)
event_data = dict(
host=result._host.get_name(),
remote_addr=result._host.address,
task=result._task,
res=result._result,
event_loop=self._get_event_loop(result._task),
)
with self.capture_event_data('runner_on_ok', **event_data):
super(BaseCallbackModule, self).v2_runner_on_ok(result)
def v2_runner_on_failed(self, result, ignore_errors=False):
# FIXME: Add verbosity for exception/results output.
event_data = dict(
host=result._host.get_name(),
remote_addr=result._host.address,
res=result._result,
task=result._task,
ignore_errors=ignore_errors,
event_loop=self._get_event_loop(result._task),
)
with self.capture_event_data('runner_on_failed', **event_data):
super(BaseCallbackModule, self).v2_runner_on_failed(result, ignore_errors)
def v2_runner_on_skipped(self, result):
event_data = dict(
host=result._host.get_name(),
remote_addr=result._host.address,
task=result._task,
event_loop=self._get_event_loop(result._task),
)
with self.capture_event_data('runner_on_skipped', **event_data):
super(BaseCallbackModule, self).v2_runner_on_skipped(result)
def v2_runner_on_unreachable(self, result):
event_data = dict(
host=result._host.get_name(),
remote_addr=result._host.address,
task=result._task,
res=result._result,
)
with self.capture_event_data('runner_on_unreachable', **event_data):
super(BaseCallbackModule, self).v2_runner_on_unreachable(result)
def v2_runner_on_no_hosts(self, task):
# NOTE: Not used by Ansible 2.x.
event_data = dict(
task=task,
)
with self.capture_event_data('runner_on_no_hosts', **event_data):
super(BaseCallbackModule, self).v2_runner_on_no_hosts(task)
def v2_runner_on_async_poll(self, result):
# NOTE: Not used by Ansible 2.x.
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
jid=result._result.get('ansible_job_id'),
)
with self.capture_event_data('runner_on_async_poll', **event_data):
super(BaseCallbackModule, self).v2_runner_on_async_poll(result)
def v2_runner_on_async_ok(self, result):
# NOTE: Not used by Ansible 2.x.
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
jid=result._result.get('ansible_job_id'),
)
with self.capture_event_data('runner_on_async_ok', **event_data):
super(BaseCallbackModule, self).v2_runner_on_async_ok(result)
def v2_runner_on_async_failed(self, result):
# NOTE: Not used by Ansible 2.x.
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
jid=result._result.get('ansible_job_id'),
)
with self.capture_event_data('runner_on_async_failed', **event_data):
super(BaseCallbackModule, self).v2_runner_on_async_failed(result)
def v2_runner_on_file_diff(self, result, diff):
# NOTE: Not used by Ansible 2.x.
event_data = dict(
host=result._host.get_name(),
task=result._task,
diff=diff,
)
with self.capture_event_data('runner_on_file_diff', **event_data):
super(BaseCallbackModule, self).v2_runner_on_file_diff(result, diff)
def v2_on_file_diff(self, result):
# NOTE: Logged as runner_on_file_diff.
event_data = dict(
host=result._host.get_name(),
task=result._task,
diff=result._result.get('diff'),
)
with self.capture_event_data('runner_on_file_diff', **event_data):
super(BaseCallbackModule, self).v2_on_file_diff(result)
def v2_runner_item_on_ok(self, result):
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
)
with self.capture_event_data('runner_item_on_ok', **event_data):
super(BaseCallbackModule, self).v2_runner_item_on_ok(result)
def v2_runner_item_on_failed(self, result):
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
)
with self.capture_event_data('runner_item_on_failed', **event_data):
super(BaseCallbackModule, self).v2_runner_item_on_failed(result)
def v2_runner_item_on_skipped(self, result):
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
)
with self.capture_event_data('runner_item_on_skipped', **event_data):
super(BaseCallbackModule, self).v2_runner_item_on_skipped(result)
def v2_runner_retry(self, result):
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
)
with self.capture_event_data('runner_retry', **event_data):
super(BaseCallbackModule, self).v2_runner_retry(result)
class AWXDefaultCallbackModule(BaseCallbackModule, DefaultCallbackModule):
CALLBACK_NAME = 'awx_display'
class AWXMinimalCallbackModule(BaseCallbackModule, MinimalCallbackModule):
CALLBACK_NAME = 'minimal'
def v2_playbook_on_play_start(self, play):
pass
def v2_playbook_on_task_start(self, task, is_conditional):
self.set_task(task)
|
__all__ = ['SolverOptions',
'SolverResultsOptions', 'SolverOdeOptions',
'McOptions']
from ..optionsclass import optionsclass
import multiprocessing
@optionsclass("solver")
class SolverOptions:
"""
Class of options for evolution solvers such as :func:`qutip.mesolve` and
:func:`qutip.mcsolve`. Options can be specified either as arguments to the
constructor::
opts = SolverOptions(progress_bar='enhanced', ...)
or by changing the class attributes after creation::
opts = SolverOptions()
opts['progress_bar'] = 'enhanced'
Returns options class to be used as options in evolution solvers.
The default can be changed by::
qutip.settings.solver['progress_bar'] = 'enhanced'
Options
-------
progress_bar : str {'text', 'enhanced', 'tqdm', ''}
How to present the solver progress.
True will result in 'text'.
'tqdm' uses the python module of the same name and raise an error if
not installed.
Empty string or False will disable the bar.
progress_kwargs : dict
kwargs to pass to the progress_bar. Qutip's bars use `chunk_size`.
"""
options = {
# (turned off for batch unitary propagator mode)
"progress_bar": "text",
# Normalize output of solvers
# (turned off for batch unitary propagator mode)
"progress_kwargs": {"chunk_size":10},
}
@optionsclass("ode", SolverOptions)
class SolverOdeOptions:
"""
Class of options for the ODE integrator of solvers such as
:func:`qutip.mesolve` and :func:`qutip.mcsolve`. Options can be
specified either as arguments to the SolverOptions constructor::
opts = SolverOptions(method=bdf, ...)
or by changing the class attributes after creation::
opts = SolverOptions()
opts.ode['method'] = 'bdf'
Returns options class to be used as options in evolution solvers.
The default can be changed by::
qutip.settings.solver.ode['method'] = 'bdf'
Options
-------
method : str {'adams', 'bdf', 'dop853', 'lsoda', 'vern7', 'vern9', 'diag'}
Integration method.
atol : float {1e-8}
Absolute tolerance.
rtol : float {1e-6}
Relative tolerance.
order : int {12}
Order of integrator (<=12 'adams', <=5 'bdf')
nsteps : int {2500}
Max. number of internal steps/call.
first_step : float {0}
Size of initial step (0 = automatic).
min_step : float {0}
Minimum step size (0 = automatic).
max_step : float {0}
Maximum step size (0 = automatic)
tidy: bool {True}
tidyup Hamiltonian before calculation
operator_data_type: type or str {""}
Data type of the operator used during the ODE evolution, as a subclass
of :class:`qutip.data.Data` or an alias of such. Use an empty string to
keep the input state type. Most solver can only work with `Dense`.
state_data_type: type or str {'dense'}
Data type of the state used during the ODE evolution, as a subclass of
:class:`qutip.data.Data` or an alias of such. Use an empty string to
keep the input state type. Most solver can only work with `Dense`.
feedback_normalize: bool
Normalize the state before passing it to coefficient when using
feedback.
"""
options = {
# Integration method (default = 'adams', for stiff 'bdf')
"method": 'adams',
"rhs": '',
# Absolute tolerance (default = 1e-8)
"atol": 1e-8,
# Relative tolerance (default = 1e-6)
"rtol": 1e-6,
# Maximum order used by integrator (<=12 for 'adams', <=5 for 'bdf')
"order": 12,
# Max. number of internal steps/call
"nsteps": 2500,
# Size of initial step (0 = determined by solver)
"first_step": 0,
# Max step size (0 = determined by solver)
"max_step": 0,
# Minimal step size (0 = determined by solver)
"min_step": 0,
# tidyup Hamiltonian before calculation (default = True)
"tidy": True,
"operator_data_type": "",
"state_data_type": "dense",
# Normalize the states received in feedback_args
"feedback_normalize": True,
}
extra_options = set()
@optionsclass("results", SolverOptions)
class SolverResultsOptions:
"""
Class of options for Results of evolution solvers such as
:func:`qutip.mesolve` and :func:`qutip.mcsolve`.
Options can be specified when constructing SolverOptions
opts = SolverOptions(store_final_state=True, ...)
or by changing the class attributes after creation::
opts = SolverOptions()
opts.results["store_final_state"] = True
Returns options class to be used as options in evolution solvers.
The default can be changed by::
qutip.settings.solver.result['store_final_state'] = True
Options
-------
store_final_state : bool {False, True}
Whether or not to store the final state of the evolution in the
result class.
store_states : bool {False, True, None}
Whether or not to store the state vectors or density matrices.
On `None` the states will be saved if no expectation operators are
given.
normalize_output : str {"", "ket", "all"}
normalize output state to hide ODE numerical errors.
"all" will normalize both ket and dm.
On "ket", only 'ket' output are normalized.
Leave empty for no normalization.
"""
options = {
# store final state?
"store_final_state": False,
# store states even if expectation operators are given?
"store_states": None,
# Normalize output of solvers
# (turned off for batch unitary propagator mode)
"normalize_output": "ket",
}
@optionsclass("mcsolve", SolverOptions)
class McOptions:
"""
Class of options specific for :func:`qutip.mcsolve`.
Options can be specified either as arguments to the constructor of
SolverOptions::
opts = SolverOptions(norm_tol=1e-3, ...)
or by changing the class attributes after creation::
opts = SolverOptions()
opts.mcsolve['norm_tol'] = 1e-3
Returns options class to be used as options in evolution solvers.
The default can be changed by::
qutip.settings.options.mcsolve['norm_tol'] = 1e-3
Options
-------
norm_tol : float {1e-4}
Tolerance used when finding wavefunction norm in mcsolve.
norm_t_tol : float {1e-6}
Tolerance used when finding wavefunction time in mcsolve.
norm_steps : int {5}
Max. number of steps used to find wavefunction norm to within norm_tol
in mcsolve.
keep_runs_results: bool
Keep all trajectories results or save only the average.
map : str {'parallel', 'serial', 'loky'}
How to run the trajectories.
'parallel' use python's multiprocessing.
'loky' use the pyhon module of the same name (not installed with qutip).
map_options: dict
keys:
'num_cpus': number of cpus to use.
'timeout': maximum time for all trajectories. (sec)
'job_timeout': maximum time per trajectory. (sec)
Only finished trajectories will be returned when timeout is reached.
mc_corr_eps : float {1e-10}
Arbitrarily small value for eliminating any divide-by-zero errors in
correlation calculations when using mcsolve.
"""
options = {
# Tolerance for wavefunction norm (mcsolve only)
"norm_tol": 1e-4,
# Tolerance for collapse time precision (mcsolve only)
"norm_t_tol": 1e-6,
# Max. number of steps taken to find wavefunction norm to within
# norm_tol (mcsolve only)
"norm_steps": 5,
"map": "parallel_map",
"keep_runs_results": False,
"mc_corr_eps": 1e-10,
"map_options": {
'num_cpus': multiprocessing.cpu_count(),
'timeout':1e8,
'job_timeout':1e8
},
}
|
import json
import random
import uuid
from time import sleep
import pandas as pd
from kafka import KafkaProducer
from config import config
def start_producing():
# init data loader
numerical_features = ["age", "fnlwgt", "capital_gain", "capital_loss", "hours_per_week"]
categorical_features = [
"workclass",
"education",
"marital_status",
"occupation",
"relationship",
"race",
"gender",
"native_country",
]
feature_names = numerical_features + categorical_features
test_silver_path = "data/silver/adult.test.parquet"
df_test = pd.read_parquet(test_silver_path)
X_test = df_test[feature_names]
adult_data_loader = AdultDataLoader(X_test)
producer = KafkaProducer(bootstrap_servers=config.KAFKA_HOST)
for i in range(200):
message_id = str(uuid.uuid4())
message_content = next(adult_data_loader.generate_random_single_message())
message = {"request_id": message_id, "data": json.loads(message_content.to_json())}
producer.send("app_messages", json.dumps(message).encode("utf-8"))
producer.flush()
print("\033[1;31;40m -- PRODUCER: Sent message with id {}".format(message_id))
sleep(2)
class AdultDataLoader:
def __init__(
self,
dataset: pd.DataFrame,
) -> None:
self.dataset_ = dataset
def generate_random_single_message(self) -> pd.Series:
N = len(self.dataset_)
idx = random.randint(0, N)
yield self.dataset_.iloc[idx]
|
"""
Utitility functions for working with operators
"""
__all__ = [
'latex_align',
'collect_by_nc',
'collect_by_order',
'extract_operators',
'extract_operator_products',
'extract_all_operators',
'operator_order',
'operator_sort_by_order',
'drop_higher_order_terms',
'drop_terms_containing',
'drop_c_number_terms',
'lindblad_dissipator',
'master_equation',
'operator_lindblad_dissipator',
'operator_master_equation',
]
from collections import defaultdict
from sympy import (Basic, Add, Mul, Pow, exp, I, S, factor,
diff, Function, Eq, latex)
from sympy.physics.quantum import Operator, Commutator, Dagger
debug = False
# -----------------------------------------------------------------------------
# IPython notebook related functions
#
from IPython.display import Latex
def latex_align(data, env="align*", delim=None, breaks=None): # or set col_delim="&" for auto align
if isinstance(data, list):
delim = " " if delim is None else delim
body = " \\\\\n".join([delim.join([latex(col) for col in row])
for row in data])
if isinstance(data, Basic):
args = Add.make_args(data)
delim = "& " if delim is None else delim
breaks = range(4, len(args)-3, 4) if breaks is None else breaks
breaks = zip([0] + list(breaks), list(breaks) + [len(args)])
def fmt_line(i, j):
line = latex(Add(*args[i:j]))
if i != 0 and latex(Add(*args[i:j]))[0] != '-':
line = "+" + line
return delim + line
body = "\\\\\n".join([fmt_line(i,j) for i,j in breaks])
return Latex("\\begin{{{0}}}\n{1}\n\\end{{{0}}}".format(env, body))
# -----------------------------------------------------------------------------
# Utility functions for manipulating operator expressions
#
def collect_by_nc(expr, evaluate=True):
collected, disliked = defaultdict(list), S.Zero
for arg in Add.make_args(expr):
c, nc = arg.args_cnc()
if nc: collected[Mul(*nc)].append(Mul(*c))
else: disliked += Mul(*c)
collected = {k: Add(*v) for k, v in collected.items()}
if disliked is not S.Zero:
collected[S.One] = disliked
if evaluate:
return Add(*[key*val for key, val in collected.items()])
else:
return collected
def collect_by_order(expr, evaluate=True):
"""
return dict d such that expr == Add(*[d[n] for n in d])
where Expr d[n] contains only terms with operator order n
"""
args = Add.make_args(expr)
d = {}
for arg in args:
n = operator_order(arg)
if n in d: d[n] += arg
else: d[n] = arg
d = {n : factor(collect_by_nc(arg)) for n, arg in d.items()}
if evaluate:
return Add(*[arg for arg in d.values()], evaluate=False)
else:
return d
def extract_operators(e, independent=False):
return list(set([e for e in preorder_traversal(O)
if isinstance(e, Operator)]))
def extract_operator_products(expr):
"""
Return a list of unique quantum operator products in the expression e.
"""
if isinstance(expr, Operator):
return [expr]
elif isinstance(expr, Add):
return list(set([op for arg in expr.args
for op in extract_operator_products(arg)]))
c, nc = expr.args_cnc()
return [Mul(*nc)] if nc else []
def extract_operator_subexprs(expr):
args = Mul.make_args(expr)
return [Mul(*args[i:j]) for i in range(len(args) + 1)
for j in range(i + 1, len(args) + 1)]
def extract_all_operators(expr):
"""
Extract all unique operators in the normal ordered for of a given
operator expression, including composite operators. The resulting list
of operators are sorted in increasing order.
"""
ops = extract_operator_products(expr)
return list(set([op_sub for op in ops
for op_sub in extract_operator_subexprs(op)]))
def operator_order(op):
if isinstance(op, Operator):
return 1
if isinstance(op, Mul):
return sum([operator_order(arg) for arg in op.args])
if isinstance(op, Pow):
return operator_order(op.base) * op.exp
return 0
def operator_sort_by_order(ops):
return sorted(sorted(ops, key=repr), key=operator_order)
def drop_higher_order_terms(e, order):
"""
Drop any terms with operator order greater than order arg
"""
if isinstance(e, Add):
e = Add(*(arg for arg in e.args if operator_order(arg) <= order))
return e
def drop_terms_containing(e, e_drops):
"""
Drop terms contaning factors in the list e_drops
"""
if isinstance(e, Add):
# fix this
#e = Add(*(arg for arg in e.args if not any([e_drop in arg.args
# for e_drop in e_drops])))
new_args = []
for term in e.args:
keep = True
for e_drop in e_drops:
if e_drop in term.args:
keep = False
if isinstance(e_drop, Mul):
if all([(f in term.args) for f in e_drop.args]):
keep = False
if keep:
# new_args.append(arg)
new_args.append(term)
e = Add(*new_args)
#e = Add(*(arg.subs({key: 0 for key in e_drops}) for arg in e.args))
return e
def drop_c_number_terms(e):
"""
Drop commuting terms from the expression e
"""
if isinstance(e, Add):
return Add(*(arg for arg in e.args if not arg.is_commutative))
return e
# ----------------------------------------------------------------------------
# Master equations and adjoint master equations
#
def lindblad_dissipator(a, rho):
"""
Lindblad dissipator
"""
return (a*rho*Dagger(a) - rho*Dagger(a)*a/2 - Dagger(a)*a*rho/2)
def operator_lindblad_dissipator(a, rho):
"""
Lindblad operator dissipator
"""
return (Dagger(a)*rho*a - rho*Dagger(a)*a/2 - Dagger(a)*a*rho/2)
def master_equation(rho_t, t, H, a_ops, use_eq=True):
"""
Lindblad master equation
"""
lhs = diff(rho_t, t)
rhs = (-I*Commutator(H, rho_t) +
sum([lindblad_dissipator(a, rho_t) for a in a_ops]))
return Eq(lhs, rhs) if use_eq else (lhs, rhs)
def operator_master_equation(op_t, t, H, a_ops, use_eq=True):
"""
Adjoint master equation
"""
lhs = diff(op_t, t)
rhs = (I*Commutator(H, op_t) +
sum([operator_lindblad_dissipator(a, op_t) for a in a_ops]))
return Eq(lhs, rhs) if use_eq else (lhs, rhs)
|
# uncompyle6 version 3.7.0
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.18 (v2.7.18:8d21aa21f2, Apr 20 2020, 13:19:08) [MSC v.1500 32 bit (Intel)]
# Embedded file name: scripts/common/battle_results/event.py
from battle_results_constants import BATTLE_RESULT_ENTRY_TYPE as ENTRY_TYPE
from DictPackers import ValueReplayPacker
BATTLE_RESULTS = [
(
'eventPoints', int, 0, None, 'sum', ENTRY_TYPE.VEHICLE_ALL),
(
'eventPointsLeft', int, 0, None, 'sum', ENTRY_TYPE.VEHICLE_ALL),
(
'eventPointsTotal', int, 0, None, 'sum', ENTRY_TYPE.VEHICLE_ALL),
(
'environmentID', int, 0, None, 'sum', ENTRY_TYPE.VEHICLE_ALL),
(
'eventLorePoints', int, 0, None, 'sum', ENTRY_TYPE.VEHICLE_ALL),
(
'commanderPoints', int, 0, None, 'sum', ENTRY_TYPE.VEHICLE_ALL),
(
'commanderLevelReached', bool, False, None, 'any', ENTRY_TYPE.VEHICLE_ALL),
(
'difficultyLevel', int, 0, None, 'any', ENTRY_TYPE.VEHICLE_ALL),
(
'eventAFKViolator', bool, False, None, 'skip', ENTRY_TYPE.VEHICLE_ALL),
(
'boosterApplied', bool, False, None, 'any', ENTRY_TYPE.VEHICLE_ALL),
(
'commanderTokenDelta', int, 0, None, 'any', ENTRY_TYPE.VEHICLE_ALL),
(
'commanderTokenCount', int, 0, None, 'any', ENTRY_TYPE.VEHICLE_ALL),
(
'commanderQuestBonusCount', int, 0, None, 'any', ENTRY_TYPE.VEHICLE_ALL),
(
'commanderCurrentLevel', int, 0, None, 'any', ENTRY_TYPE.VEHICLE_ALL),
(
'eventAFKBanned', bool, False, None, 'skip', ENTRY_TYPE.VEHICLE_ALL)] |
import numpy as np
import cv2 as cv
import sys
def threshold(path, reverse=False):
img = cv.imread(path)
if img is None:
return None
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
mod = cv.THRESH_BINARY
if reverse == True:
mod = cv.THRESH_BINARY_INV
ret, img_sh = cv.threshold(img_gray, 100, 255, mod)
return img_sh
if __name__ == "__main__":
if len(sys.argv) != 3:
print("usage: python threshold.py <input-pic> <output-pic>")
raise(NameError)
img = threshold(sys.argv[1], False)
if img == None:
print(sys.argv[1] + " is NOT a valid picture URI!")
raise(NameError)
cv.imwrite(sys.argv[2], img)
|
r"""
=======
liionpack
=======
liionpack is a tool for simulating battery packs with pybamm. It can design the
pack with a combination of batteries connected in series and parallel or can
read a netlist.
"""
from .simulations import *
from .utils import *
from .netlist_utils import *
from .sim_utils import *
from .solver_utils import *
from .protocols import *
from .plots import *
from .logger import *
__version__ = "0.0.1"
|
#! /usr/bin/env python3
#
# Copyright 2019 California Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ISOFIT: Imaging Spectrometer Optimal FITting
# Author: Winston Olson-Duvall, winston.olson-duvall@jpl.nasa.gov
import argparse
import numpy as np
import os
import scipy as s
import spectral.io.envi as envi
# Return the header associated with an image file
def find_header(imgfile):
if os.path.exists(imgfile + '.hdr'):
return imgfile + '.hdr'
ind = imgfile.rfind('.raw')
if ind >= 0:
return imgfile[0:ind] + '.hdr'
ind = imgfile.rfind('.img')
if ind >= 0:
return imgfile[0:ind] + '.hdr'
raise IOError('No header found for file {0}'.format(imgfile));
# parse the command line (perform the correction on all command line arguments)
def main():
parser = argparse.ArgumentParser(description="Upgrade AVIRIS-C radiances")
parser.add_argument('infile', type=str, metavar='INPUT')
parser.add_argument('outfile', type=str, metavar='OUTPUT')
parser.add_argument('--scaling', '-s', action='store')
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
hdrfile = find_header(args.infile)
hdr = envi.read_envi_header(hdrfile)
hdr['data type'] = '4'
hdr['byte order'] = '0'
if hdr['interleave'] != 'bip':
raise ValueError('I expected BIP interleave')
hdr['interleave'] = 'bil'
hdr['data ignore value'] = '-9999'
envi.write_envi_header(args.outfile + '.hdr', hdr)
lines = int(hdr['lines'])
samples = int(hdr['samples'])
bands = int(hdr['bands'])
frame = samples * bands
if args.verbose:
print('Lines: %i Samples: %i Bands: %i\n' % (lines, samples, bands))
if args.scaling is None:
scaling = np.ones(bands, dtype=s.float32)
else:
scaling = s.loadtxt(args.scaling)
prefix = os.path.split(args.infile)[-1][:3]
if prefix in ['f95', 'f96', 'f97', 'f98', 'f99', 'f00',
'f01', 'f02', 'f03', 'f04', 'f05']:
gains = s.r_[50.0 * np.ones(160), 100.0 * np.ones(64)]
elif prefix in ['f06', 'f07', 'f08', 'f09', 'f10', 'f11',
'f12', 'f13', 'f14', 'f15', 'f16', 'f17',
'f18', 'f19', 'f20', 'f21']:
gains = s.r_[300.0 * np.ones(110), 600.0 * np.ones(50), 1200.0 * np.ones(64)]
else:
raise ValueError('Unrecognized year prefix "%s"' % prefix)
with open(args.infile, 'rb') as fin:
with open(args.outfile, 'wb') as fout:
for line in range(lines):
X = np.fromfile(fin, dtype=s.int16, count=frame)
X.byteswap(True)
X = X.flatten()
bad = X < -49
X = np.array(X, dtype=s.float32)
X = np.array(X.reshape((samples, bands)))
X = scaling * X / gains
X = X.flatten()
X[bad] = -9999.0
X = X.reshape((samples, bands))
X = X.T.flatten() # convert from BIP to BIL
X = np.array(X, dtype=s.float32)
X.tofile(fout)
if __name__ == "__main__":
main()
|
""" This module contains the non-standard mass values for blocks in Starmade"""
#: A Python dict of block IDs with non standard mass
#: In the form of <block_id>: <mass>
#: To get the mass of a block ID in Python, you can do:
#: NON_STANDARD_MASS.get(block_id, 0.1)
NON_STANDARD_MASS = {
5: 0.15,
15: 0.05,
22: 0.05,
47: 0.05,
55: 0.025,
56: 0.05,
59: 0.25,
62: 0.025,
63: 0.05,
64: 0.025,
69: 0.15,
70: 0.15,
73: 0.025,
74: 0.025,
75: 0.15,
76: 0.15,
77: 0.15,
78: 0.15,
79: 0.15,
80: 0.025,
81: 0.15,
82: 0.025,
83: 0.025,
84: 0.025,
85: 0.01,
86: 0.025,
87: 0.025,
89: 0.01,
90: 0.025,
91: 0.025,
92: 0.025,
93: 0.01,
95: 0.01,
96: 0.01,
97: 0.01,
98: 0.01,
99: 0.01,
100: 0.01,
101: 0.01,
102: 0.01,
103: 0.01,
104: 0.01,
105: 0.01,
106: 0.01,
107: 0.01,
108: 0.01,
109: 0.01,
113: 0.05,
122: 0.15,
138: 0.025,
139: 0.025,
140: 0.025,
141: 0.025,
142: 0.01,
143: 0.025,
144: 0.01,
145: 0.025,
146: 0.01,
150: 0.01,
151: 0.025,
152: 0.01,
153: 0.025,
154: 0.01,
155: 0.025,
156: 0.01,
157: 0.025,
158: 0.01,
159: 0.025,
160: 0.01,
161: 0.025,
162: 0.01,
163: 0.025,
164: 0.01,
165: 0.025,
166: 0.01,
170: 0.01,
171: 0.025,
172: 0.01,
173: 0.025,
174: 0.01,
178: 0.01,
179: 0.025,
180: 0.01,
181: 0.025,
182: 0.01,
186: 0.01,
190: 0.01,
194: 0.01,
198: 0.01,
202: 0.01,
203: 0.025,
204: 0.01,
205: 0.025,
219: 0.01,
220: 0.01,
223: 0.01,
224: 0.01,
225: 0.01,
226: 0.01,
227: 0.01,
228: 0.01,
229: 0.01,
230: 0.01,
231: 0.01,
232: 0.01,
233: 0.01,
234: 0.01,
235: 0.01,
236: 0.01,
237: 0.01,
238: 0.01,
239: 0.01,
240: 0.01,
241: 0.01,
242: 0.01,
243: 0.01,
244: 0.01,
245: 0.01,
246: 0.01,
247: 0.01,
248: 0.01,
249: 0.01,
250: 0.01,
251: 0.01,
252: 0.01,
253: 0.01,
254: 0.01,
263: 0.25,
264: 0.25,
265: 0.25,
266: 0.25,
267: 0.25,
268: 0.25,
269: 0.25,
270: 0.25,
271: 0.25,
272: 0.01,
273: 0.01,
274: 0.025,
275: 0.025,
276: 0.025,
277: 0.01,
278: 0.01,
279: 0.01,
280: 0.01,
281: 0.01,
282: 0.025,
283: 0.025,
284: 0.025,
285: 0.025,
286: 0.01,
287: 0.025,
288: 0.01,
291: 0.25,
293: 0.15,
294: 0.15,
295: 0.15,
296: 0.15,
297: 0.15,
298: 0.15,
299: 0.15,
300: 0.15,
301: 0.15,
302: 0.15,
303: 0.15,
304: 0.15,
305: 0.15,
306: 0.15,
307: 0.15,
308: 0.15,
309: 0.15,
310: 0.15,
311: 0.25,
312: 0.25,
313: 0.25,
314: 0.25,
315: 0.25,
316: 0.25,
317: 0.25,
318: 0.25,
319: 0.25,
320: 0.25,
321: 0.25,
322: 0.25,
323: 0.25,
324: 0.25,
325: 0.25,
326: 0.25,
327: 0.25,
328: 0.25,
329: 0.05,
330: 0.05,
336: 0.025,
337: 0.025,
338: 0.025,
339: 0.025,
340: 0.01,
346: 0.05,
348: 0.15,
357: 0.15,
367: 0.05,
368: 0.05,
369: 0.25,
370: 0.25,
371: 0.25,
372: 0.25,
373: 0.25,
374: 0.25,
375: 0.25,
376: 0.25,
377: 0.25,
378: 0.25,
379: 0.25,
380: 0.25,
381: 0.25,
382: 0.25,
383: 0.25,
384: 0.25,
385: 0.15,
386: 0.15,
387: 0.15,
388: 0.15,
389: 0.15,
391: 0.15,
392: 0.15,
393: 0.15,
394: 0.15,
395: 0.15,
396: 0.15,
397: 0.15,
398: 0.15,
400: 0.15,
401: 0.25,
402: 0.25,
403: 0.15,
404: 0.15,
405: 0.01,
407: 0.01,
408: 0.01,
409: 0.01,
410: 0.01,
411: 0.01,
412: 0.01,
413: 0.01,
426: 0.15,
427: 0.15,
428: 0.15,
429: 0.15,
430: 0.15,
431: 0.25,
432: 0.25,
433: 0.25,
434: 0.25,
435: 0.25,
436: 0.15,
437: 0.15,
438: 0.15,
439: 0.15,
440: 0.01,
441: 0.01,
442: 0.01,
443: 0.01,
444: 0.01,
445: 0.025,
446: 0.025,
447: 0.025,
448: 0.025,
449: 0.025,
450: 0.025,
451: 0.025,
452: 0.01,
453: 0.01,
454: 0.01,
455: 0.01,
456: 0.01,
457: 0.01,
458: 0.01,
459: 0.01,
468: 0.05,
469: 0.05,
470: 0.05,
471: 0.05,
472: 0.05,
473: 0.05,
474: 0.05,
475: 0.05,
479: 0.025,
480: 0.01,
481: 0.01,
482: 0.01,
483: 0.01,
484: 0.01,
485: 0.01,
486: 0.01,
487: 0.01,
488: 0.01,
489: 0.01,
490: 0.01,
491: 0.01,
492: 0.01,
493: 0.01,
494: 0.01,
495: 0.01,
496: 0.025,
497: 0.025,
498: 0.025,
499: 0.01,
500: 0.01,
501: 0.01,
502: 0.01,
503: 0.01,
504: 0.01,
505: 0.01,
506: 0.01,
507: 0.25,
508: 0.25,
509: 0.25,
510: 0.25,
511: 0.25,
512: 0.25,
513: 0.25,
514: 0.25,
515: 0.25,
516: 0.25,
517: 0.25,
518: 0.25,
519: 0.25,
520: 0.25,
521: 0.25,
522: 0.25,
523: 0.25,
524: 0.25,
525: 0.25,
526: 0.25,
527: 0.25,
528: 0.25,
529: 0.25,
530: 0.25,
531: 0.25,
532: 0.25,
533: 0.25,
534: 0.25,
535: 0.25,
536: 0.25,
537: 0.25,
538: 0.25,
539: 0.25,
540: 0.25,
541: 0.25,
546: 0.01,
547: 0.01,
548: 0.01,
549: 0.01,
550: 0.01,
551: 0.01,
552: 0.01,
553: 0.01,
554: 0.01,
555: 0.01,
556: 0.05,
557: 0.05,
558: 0.05,
559: 0.05,
560: 0.05,
561: 0.05,
562: 0.05,
563: 0.05,
564: 0.01,
565: 0.01,
566: 0.01,
567: 0.01,
568: 0.01,
569: 0.01,
570: 0.01,
571: 0.01,
572: 0.01,
573: 0.01,
574: 0.01,
575: 0.01,
576: 0.01,
577: 0.01,
578: 0.01,
579: 0.01,
580: 0.01,
581: 0.01,
582: 0.01,
583: 0.01,
584: 0.01,
585: 0.01,
586: 0.01,
587: 0.01,
588: 0.15,
589: 0.05,
590: 0.05,
591: 0.25,
592: 0.25,
593: 0.25,
594: 0.25,
595: 0.25,
596: 0.25,
597: 0.25,
598: 0.05,
599: 0.05,
600: 0.05,
601: 0.05,
602: 0.05,
603: 0.05,
604: 0.05,
605: 0.05,
606: 0.05,
607: 0.05,
608: 0.05,
609: 0.05,
610: 0.05,
611: 0.05,
612: 0.05,
613: 0.05,
614: 0.05,
615: 0.05,
616: 0.05,
617: 0.05,
618: 0.05,
619: 0.05,
620: 0.05,
621: 0.05,
622: 0.05,
623: 0.05,
624: 0.05,
625: 0.05,
626: 0.05,
627: 0.05,
628: 0.05,
629: 0.05,
630: 0.05,
631: 0.05,
632: 0.05,
633: 0.05,
634: 0.05,
635: 0.05,
636: 0.05,
637: 0.05,
638: 0.05,
639: 0.05,
640: 0.05,
641: 0.05,
642: 0.05,
643: 0.05,
644: 0.05,
645: 0.05,
646: 0.05,
647: 0.05,
648: 0.15,
649: 0.15,
650: 0.15,
651: 0.15,
652: 0.15,
653: 0.15,
656: 0.01,
657: 0.025,
658: 0.025,
660: 0.25,
661: 0.25,
662: 0.05,
666: 0.01,
667: 0.01,
668: 0.01,
670: 0.01,
671: 0.05,
673: 0.25,
674: 0.25,
675: 0.25,
676: 0.01,
680: 0.025,
690: 0.25,
691: 0.25,
692: 0.25,
693: 0.25,
694: 0.25,
698: 0.05,
699: 0.05,
700: 0.05,
701: 0.15,
702: 0.15,
703: 0.15,
704: 0.25,
705: 0.25,
706: 0.25,
707: 0.05,
708: 0.05,
709: 0.05,
710: 0.15,
711: 0.15,
712: 0.15,
713: 0.25,
714: 0.25,
715: 0.25,
716: 0.25,
717: 0.25,
718: 0.25,
719: 0.05,
720: 0.05,
721: 0.05,
722: 0.15,
723: 0.15,
724: 0.15,
725: 0.25,
726: 0.25,
727: 0.25,
728: 0.25,
729: 0.25,
730: 0.25,
731: 0.05,
732: 0.05,
733: 0.05,
734: 0.15,
735: 0.15,
736: 0.15,
737: 0.25,
738: 0.25,
739: 0.25,
740: 0.25,
741: 0.25,
742: 0.25,
743: 0.05,
744: 0.05,
745: 0.05,
746: 0.15,
747: 0.15,
748: 0.15,
749: 0.25,
750: 0.25,
751: 0.25,
752: 0.25,
753: 0.25,
754: 0.25,
755: 0.05,
756: 0.05,
757: 0.05,
758: 0.15,
759: 0.15,
760: 0.25,
761: 0.25,
762: 0.25,
763: 0.25,
764: 0.25,
765: 0.25,
766: 0.25,
767: 0.05,
768: 0.05,
769: 0.05,
770: 0.15,
771: 0.15,
772: 0.15,
773: 0.25,
774: 0.25,
775: 0.25,
776: 0.25,
777: 0.25,
778: 0.25,
779: 0.05,
780: 0.05,
781: 0.05,
782: 0.15,
783: 0.15,
784: 0.15,
785: 0.25,
786: 0.25,
787: 0.25,
788: 0.25,
789: 0.25,
790: 0.25,
791: 0.05,
792: 0.05,
793: 0.05,
794: 0.15,
795: 0.15,
796: 0.15,
797: 0.25,
798: 0.25,
799: 0.25,
800: 0.25,
801: 0.25,
802: 0.25,
803: 0.05,
804: 0.05,
805: 0.05,
806: 0.15,
807: 0.15,
808: 0.15,
809: 0.25,
810: 0.25,
811: 0.25,
812: 0.25,
813: 0.25,
814: 0.25,
815: 0.05,
816: 0.05,
817: 0.05,
818: 0.15,
819: 0.15,
820: 0.15,
821: 0.15,
822: 0.15,
823: 0.25,
824: 0.25,
825: 0.25,
826: 0.25,
827: 0.25,
828: 0.05,
829: 0.05,
830: 0.05,
831: 0.05,
832: 0.05,
833: 0.05,
834: 0.05,
835: 0.05,
836: 0.15,
837: 0.15,
838: 0.15,
839: 0.25,
840: 0.25,
841: 0.25,
842: 0.15,
843: 0.15,
844: 0.15,
845: 0.05,
846: 0.05,
847: 0.05,
848: 0.25,
849: 0.25,
850: 0.25,
851: 0.15,
852: 0.15,
853: 0.15,
854: 0.25,
855: 0.25,
856: 0.25,
857: 0.25,
858: 0.25,
859: 0.25,
863: 0.15,
864: 0.15,
865: 0.15
}
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : container.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 01/18/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import copy
import collections
from .printing import kvformat, kvprint
__all__ = ['G', 'g', 'GView', 'SlotAttrObject', 'OrderedSet']
class G(dict):
def __getattr__(self, k):
if k not in self:
raise AttributeError
return self[k]
def __setattr__(self, k, v):
self[k] = v
def __delattr__(self, k):
del self[k]
def format(self, sep=': ', end='\n'):
return kvformat(self, sep=sep, end=end)
def print(self, sep=': ', end='\n', file=None):
return kvprint(self, sep=sep, end=end, file=file)
g = G()
class GView(object):
def __init__(self, dict_=None):
if dict_ is None:
dict_ = dict()
object.__setattr__(self, '_dict', dict_)
def __getattr__(self, k):
if k not in self.raw():
raise AttributeError
return self.raw()[k]
def __setattr__(self, k, v):
self.raw()[k] = v
def __delattr__(self, k):
del self.raw()[k]
def __getitem__(self, k):
return self.raw()[k]
def __setitem__(self, k, v):
self.raw()[k] = v
def __delitem__(self, k):
del self.raw()[k]
def __contains__(self, k):
return k in self.raw()
def raw(self):
return object.__getattribute__(self, '_dict')
def update(self, other):
self.raw().update(other)
def copy(self):
return GView(self.raw().copy())
class SlotAttrObject(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def update(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __setattr__(self, k, v):
assert not k.startswith('_')
if k not in type(self).__dict__:
# do not use hasattr; since it may result in infinite recursion
raise AttributeError(
'{}: could not set non-existing attribute {}'.format(
self, k))
cvt = getattr(type(self), '_convert_{}'.format(k), None)
if cvt is not None:
v = cvt(v)
super().__setattr__(k, v)
def clone(self):
return copy.deepcopy(self)
class OrderedSet(object):
def __init__(self, initial_list=None):
if initial_list is not None:
self._dict = collections.OrderedDict([(v, True) for v in initial_list])
else:
self._dict = collections.OrderedDict()
def append(self, value):
self._dict[value] = True
def remove(self, value):
del self._dict[value]
def __contains__(self, value):
return value in self._dict
def __iter__(self):
return self._dict.keys()
def as_list(self):
return list(self._dict.keys())
|
# -*- coding: utf-8 -*-
import unittest
from context import conflict2dict
class TestConflictParser(unittest.TestCase):
""" functional tests of conflict_parser"""
def test_default_xml_contains_rules(self):
res = conflict2dict()
self.assertIsNot(res, [])
def test_conflict2dict_ouput_type(self):
res = conflict2dict('conflicts_test.xml')
self.assertEqual(type(res), list)
def test_conflict2dict_output_unit_dict_keys(self):
res = conflict2dict('conflicts_test.xml')
keys = [k for u in res for k in u]
self.assertEqual(keys, ['tier', 'query', 'code', 'description', 'id'])
def test_conflict2dict_output_values(self):
res = conflict2dict('conflicts_test.xml')
c = res[0]
self.assertEqual(c['id'], 1)
self.assertEqual(c['code'], 'ErrA001')
self.assertEqual(c['tier'], 'bib-ord')
self.assertEqual(c['query'], 'SELECT * FROM bibs WHERE bibs.c_cutter = 0 AND bibs.c_type != "fea" AND (bibs.c_type != "eas" AND bibs.author IS NULL)')
if __name__ == '__main__':
unittest.main()
|
import logging
from flask import jsonify, request
import flask_login
import mediacloud.error
from server import app, TOOL_API_KEY
from server.util.request import api_error_handler, argument_is_valid, arguments_required, json_error_response, \
form_fields_required
from server.views.topics.apicache import topic_story_count
from server.auth import user_mediacloud_key, user_mediacloud_client
from server.util.tags import tags_in_tag_set, TagSetDiscoverer, \
COLLECTION_SET_PARTISANSHIP_QUINTILES_2016, \
COLLECTION_SET_PARTISANSHIP_QUINTILES_2019
from server.views.topics.foci import FOCAL_TECHNIQUE_BOOLEAN_QUERY
logger = logging.getLogger(__name__)
YEAR_2016 = "2016"
YEAR_2019 = "2019"
VALID_YEARS = [YEAR_2016, YEAR_2019]
@app.route('/api/topics/<topics_id>/focal-sets/retweet-partisanship/preview/story-counts', methods=['GET'])
@flask_login.login_required
@arguments_required('year')
@argument_is_valid('year', VALID_YEARS)
@api_error_handler
def retweet_partisanship_story_counts(topics_id):
# TODO: add in overall timespan id here so it works in different snapshots
tag_story_counts = []
year = request.args['year']
partisanship_tags = _cached_partisanship_tags(year)
# grab the total stories
try:
total_stories = topic_story_count(user_mediacloud_key(), topics_id)['count']
except mediacloud.error.MCException:
total_stories = 0
# make a count for each tag
for tag in partisanship_tags:
try:
tagged_story_count = topic_story_count(user_mediacloud_key(), topics_id, q=tag['query'])['count']
pct = float(tagged_story_count)/float(total_stories)
except ZeroDivisionError:
tagged_story_count = 0
pct = 0
except mediacloud.error.MCException:
tagged_story_count = 0
pct = 0
tag_story_counts.append({
'label': tag['label'],
'tags_id': tag['tags_id'],
'count': tagged_story_count,
'pct': pct
})
# order them in the way a person would expect ( left to center to right)
collection_set = {
YEAR_2016: COLLECTION_SET_PARTISANSHIP_QUINTILES_2016,
YEAR_2019: COLLECTION_SET_PARTISANSHIP_QUINTILES_2019
}.get(year)
ordered_tag_story_counts = list()
for quintile in collection_set:
ordered_tag_story_counts.append([t for t in tag_story_counts if t['tags_id'] == quintile][0])
return jsonify({'story_counts': ordered_tag_story_counts})
@app.route('/api/topics/<topics_id>/focal-sets/retweet-partisanship/preview/coverage', methods=['GET'])
@flask_login.login_required
@arguments_required('year')
@argument_is_valid('year', VALID_YEARS)
@api_error_handler
def retweet_partisanship_coverage(topics_id):
year = request.args['year']
partisanship_tags = _cached_partisanship_tags(year)
# grab the total stories
try:
total_stories = topic_story_count(user_mediacloud_key(), topics_id)['count']
except mediacloud.error.MCException:
total_stories = 0
# count the stories in any media in tagged as partisan
tags_ids = " ".join([str(t['tags_id']) for t in partisanship_tags])
tags_ids_query_clause = "tags_id_media:({})".format(tags_ids)
try:
tagged_story_count = topic_story_count(user_mediacloud_key(), topics_id, q=tags_ids_query_clause)['count']
except mediacloud.error.MCException:
tagged_story_count = 0
return jsonify({'counts': {'count': tagged_story_count, 'total': total_stories}})
def _get_tag_sets_id(year):
return {
YEAR_2016: TagSetDiscoverer().partisan_2016_collections_set,
YEAR_2019: TagSetDiscoverer().partisan_2019_collections_set,
}.get(year, None)
def _get_tag_description(year, quintile):
tag_description_template = {
YEAR_2016: "Media sources that were retweeted more often during the {year} US election season by people on "
"the {quintile}.",
YEAR_2019: "Media sources that were shared disproportionately by users on Twitter during {year} by people on "
"the {quintile}. Media source partisanship is determined by the average partisanship of the users "
"who share urls belonging to that media source."
}.get(year)
return tag_description_template.format(year=year, quintile=quintile)
def _cached_partisanship_tags(year):
tag_sets_id = _get_tag_sets_id(year)
partisanship_tags = tags_in_tag_set(TOOL_API_KEY, tag_sets_id)
for tag in partisanship_tags:
tag['query'] = "tags_id_media:{}".format(tag['tags_id'])
return partisanship_tags
@app.route('/api/topics/<topics_id>/focal-sets/retweet-partisanship/create', methods=['POST'])
@form_fields_required('focalSetName', 'focalSetDescription', 'year')
@flask_login.login_required
def create_retweet_partisanship_focal_set(topics_id):
year = request.form['year']
if year not in VALID_YEARS:
return json_error_response('"{} is invalid.'.format(year))
# grab the focalSetName and focalSetDescription and then make one
focal_set_name = request.form['focalSetName']
focal_set_description = request.form['focalSetDescription']
return _add_retweet_partisanship_to_topic(topics_id, focal_set_name, focal_set_description, year)
def _add_retweet_partisanship_to_topic(topics_id, focal_set_name, focal_set_description, year):
user_mc = user_mediacloud_client()
focal_technique = FOCAL_TECHNIQUE_BOOLEAN_QUERY
new_focal_set = user_mc.topicFocalSetDefinitionCreate(topics_id, focal_set_name, focal_set_description,
focal_technique)
if 'focal_set_definitions_id' not in new_focal_set:
return json_error_response('Unable to create the subtopic set')
# now make the foci in it - one for each partisanship quintile
partisanship_tags = _cached_partisanship_tags(year)
for tag in partisanship_tags:
name = tag['label']
description = _get_tag_description(year, tag['label'])
query = tag['query']
focal_set_definitions_id = new_focal_set['focal_set_definitions_id']
# create a new boolean query subtopic based on the tag sets
new_focus = user_mc.topicFocusDefinitionCreate(topics_id,
name=name, description=description, query=query,
focal_set_definitions_id=focal_set_definitions_id)
if (len(new_focus) == 0) or ('focus_definitions_id' not in new_focus[0]):
return json_error_response('Unable to create the {} subtopic'.format(name))
return {'success': True}
|
#
# Lockstep Platform SDK for Python
#
# (c) 2021-2022 Lockstep, Inc.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
# @author Lockstep Network <support@lockstep.io>
# @copyright 2021-2022 Lockstep, Inc.
# @link https://github.com/Lockstep-Network/lockstep-sdk-python
#
from dataclasses import dataclass
@dataclass
class FinancialAccountBalanceHistoryModel:
"""
Represents a balance for a financial account for a given period of
time.
"""
financialAccountBalanceHistoryId: str | None = None
groupKey: str | None = None
financialAccountId: str | None = None
appEnrollmentId: str | None = None
financialYear: int | None = None
periodNumber: int | None = None
periodStartDate: str | None = None
periodEndDate: str | None = None
status: str | None = None
balance: float | None = None
created: str | None = None
createdUserId: str | None = None
modified: str | None = None
modifiedUserId: str | None = None
|
from tests.unit_tests.running_modes.transfer_learning .test_transfer_learning import TestTransferLearning |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
backup_restore_pattern.py
Pixelblaze based LED controller for APA102/SK9822 LED Strips
Working Example using PixelblazeClient
NOTE: this implements a different way of using an async library!
MQTT interface for pixelblaze v3
N Waterton V 1.0 16th March 2021: Initial release
N Waterton V1.0.2 6th April 20201; moved valid_ip to utils
N Waterton V1.0.3 21st April 20201; fixed some bugs
'''
import sys, json
import logging
import asyncio
import argparse
import os.path
import ipaddress
from zipfile import ZipFile, is_zipfile
try:
from pixelblaze_async.PixelblazeClient import PixelblazeClient
except (ImportError, ModuleNotFoundError):
from PixelblazeClient import PixelblazeClient
__version__ = "1.0.3"
class PixelblazeBackup(PixelblazeClient):
'''
backup/Restore patterns
NOTE this adds a file called 'index.txt' (self.index_file) to the zip archive created, this contains the list
of pid and names backed up so there will always be one more file in the archive than there are patterns.
You can retrieve and list from zip files missing `index.txt`, but the names of the patterns will not be displayed,
just the PID's
'index.txt' (self.index_file) is a text file that can be read as a reference for pid's to names in the archive.
'''
__version__ = "1.0.3"
def __init__(self, pixelblaze_ip=None, filename=None, patterns=None, action='list'):
super().__init__(pixelblaze_ip)
self.filename = filename
self.action = action
self.actions = { 'backup' : self.backup_patterns,
'restore': self.restore_pattern,
'list' : self.list_zip
}
self.log.info('Action is: {}'.format(self.action))
self.index_file = 'index.txt'
self.patterns = patterns
def __await__(self):
return self.init().__await__()
async def init(self):
'''
get filename and run the selected action
'''
self.filename = await self.get_filename(self.filename)
if not self.filename: return
await self.actions[self.action]()
async def backup_patterns(self):
'''
backup a list of patterns or all patterns to zip file
'''
try:
backup_patterns = await self.get_backup_patterns()
with ZipFile(self.filename, 'w') as myzip:
myzip.writestr(self.index_file, json.dumps({pid: value[0] for pid, value in backup_patterns.items()}, indent=2))
for pid, value in backup_patterns.items():
myzip.writestr(pid, value[1])
self.log.info('Added {} {:30.30} to {}'.format(pid, value[0], self.filename))
self.log.info('Backup done: {}'.format(self.filename))
info = self.list_zip_contents()
self.log.info('{} files backed up to {}'.format(len(info), self.filename))
except Exception as e:
self.log.error(e)
async def restore_pattern(self):
'''
restore a list of patterns or all patterns from zip file
'''
await self.start_ws()
await asyncio.sleep(1)
try:
restore_patterns = await self.get_patterns_to_restore()
with ZipFile(self.filename) as myzip:
for pid, name in restore_patterns.items():
if pid != self.index_file:
self.log.info('Restoring {}, {:30.30} to {}'.format(pid, name, self.ip))
binary = myzip.read(pid)
await self.load_binary_file(pid, binary)
self.log.info('{} files restored from {}'.format(len(restore_patterns), self.filename))
except Exception as e:
self.log.error(e)
await self._stop()
async def list_zip(self):
'''
list contents of zip file
'''
self.log.info('Contents of zip file: {}'.format(self.filename))
info = self.list_zip_contents()
self.log.info('{} files in file {}'.format(len(info), self.filename))
async def get_backup_patterns(self):
'''
downloads patterns to back up.
returns dictionary of binary files
'''
backup_patterns = {}
await self.start_ws()
#this takes a while (if it's a long list) so increase cache timeout (or we will retrieve the pattern list every 5 seconds)
self.cache_timeout = 30
self.log.info('Backing up {}({}) to {}'.format(self.name, self.ip, self.filename))
await asyncio.sleep(1)
try:
if not self.patterns:
self.patterns = await self._get_patterns()
#backup_patterns = {pid_name[0]:(pid_name[1], await self.save_binary_file(pid_name[0])) for pid_name in[await self._get_pattern_id_and_name(p) for p in self.patterns] if all(pid_name)}
#This does the same as below, but may be a dictionary comprehension too far...
for pattern in self.patterns:
pid, name = await self._get_pattern_id_and_name(pattern)
if not all([pid, name]):
self.log.warning('pattern {} Not found'.format(pattern))
continue
binary = await self.save_binary_file(pid)
if binary:
backup_patterns[pid] = (name, binary)
except Exception as e:
self.log.error(e)
await self._stop()
return backup_patterns
async def get_patterns_to_restore(self):
'''
looks up pattern pid and name to restore from index if there is one
if patterns is [], just loads index into restore_patterns.
returns restore_patterns
'''
restore_patterns = {}
with ZipFile(self.filename) as myzip:
try:
patterns_txt = myzip.read(self.index_file)
patterns = json.loads(patterns_txt)
if not self.patterns:
restore_patterns = patterns.copy()
else:
restore_patterns = {pid:name for p in self.patterns for pid, name in patterns.items() if p in [pid, name]}
except Exception as e:
self.log.error(e)
restore_patterns = {file.filename:'Unknown' for file in myzip.infolist() if file.filename != self.index_file}
return restore_patterns
async def get_filename(self, filename=None):
'''
check filename is valid.
Starts websocket to get pb's name to generate a filename if it
isn't defined.
'''
if self.filename:
return self.check_filename(self.filename)
await self.start_ws()
await self._stop()
return self.check_filename(self.name)
def check_filename(self, filename):
'''
validate the filename
'''
if not filename.endswith('.zip'):
filename+='.zip'
if (not os.path.isfile(filename) or not is_zipfile(filename)) and self.action in ['list', 'restore']:
self.log.warning('{} is not a valid zip file'.format(filename))
filename = None
return filename
def list_zip_contents(self):
'''
prety print zip file contants
uses 'index.txt' (self.index_file) to look up pattern names from pid
'''
with ZipFile(self.filename) as myzip:
info = myzip.infolist()
try:
patterns_txt = myzip.read(self.index_file)
patterns = json.loads(patterns_txt)
except Exception as e:
self.log.error(e)
patterns = {}
for file in info:
name = patterns.get(file.filename, 'UNKNOWN')
self.log.info('file: {:17}, name: {:30.30}, date: {} size: {}'.format(file.filename, name, self.format_date(file.date_time), file.file_size))
return info
def format_date(self, date):
'''
nice date formating
'''
return '{}/{:0>2}/{:0>2} {:0>2}:{:0>2}:{:0>2}'.format(*date)
def parse_args():
#-------- Command Line -----------------
parser = argparse.ArgumentParser(
description='Backup/Restore/List Pixelblaze Patterns to/from Zip file')
parser.add_argument(
'pixelblaze_ip',
action='store',
type=str,
default=None,
help='ipaddress of pixelblaze controller (default: %(default)s)')
parser.add_argument(
'-f', '--filename',
action='store',
type=str,
default=None,
help='filename to backup/restore from (default: %(default)s)')
parser.add_argument(
'-p', '--patterns',
nargs='*',
action='store',
type=str,
default=None,
help='list of names or ids of patterns to backup/restore (None is All) (default: %(default)s)')
parser.add_argument(
'-a', '--action',
nargs='?',
default = 'list',
const='list',
choices=['backup', 'restore', 'list'],
help='action: backup, restore or list (default: %(default)s)')
parser.add_argument(
'-l', '--log',
action='store',
type=str,
default="./pixelblaze.log",
help='path/name of log file (default: %(default)s)')
parser.add_argument(
'-D', '--debug',
action='store_true',
default = False,
help='debug mode')
parser.add_argument(
'--version',
action='version',
version="%(prog)s ({})".format(__version__),
help='Display version of this program')
return parser.parse_args()
async def main():
from pixelblaze_async.utils import setup_logger, valid_ip
arg = parse_args()
if arg.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
'''
logging.basicConfig(level=log_level,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
'''
#setup logging
setup_logger('Pixelblaze', arg.log, level=log_level,console=True)
global log
log = logging.getLogger('Pixelblaze')
log.info("*******************")
log.info("* Program Started *")
log.info("*******************")
log.debug('Debug Mode')
log.info("{} Version: {}".format(sys.argv[0], __version__))
log.info("{} Version: {}".format(PixelblazeClient.__name__, PixelblazeClient.__version__))
log.info("Python Version: {}".format(sys.version.replace('\n','')))
loop = asyncio.get_event_loop()
loop.set_debug(arg.debug)
if not valid_ip(arg.pixelblaze_ip): #if it's not an ip, must be a filename
log.warning('{} is not a valid ip so using it as filename and action is list'.format(arg.pixelblaze_ip))
arg.filename = arg.pixelblaze_ip
arg.pixelblaze_ip = None
arg.action = 'list' #list is the only thing allowed if we don't have an ip address
try:
pb = await PixelblazeBackup(arg.pixelblaze_ip, arg.filename, arg.patterns, arg.action)
except (KeyboardInterrupt, SystemExit):
log.info("System exit Received - Exiting program")
await pb._stop()
finally:
pass
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main()) |
import json
from . import config
def get_context_processor():
def sentry_contexts(request):
if request.user.is_authenticated:
user_context = json.dumps({
'email': request.user.email,
'id': request.user.id,
})
else:
user_context = ""
return {
'SENTRY_PUBLIC_DSN': config.SENTRY_CONFIGS.public_dsn,
'SENTRY_USER_CONTEXT_JSON': user_context,
}
return sentry_contexts
|
from util import *
import numpy as np
from functools import reduce
global zzl_solution_file_a
def ReadAppResources(app_resources_file):
# this function read theapp resource
app_resfile = open(app_resources_file)
for line in app_resfile:
line = line.strip('\n')
vec_resource = line.split(',')
appid = vec_resource[0]
cpu = np.array(vec_resource[1].split('|'))
cpu = cpu.astype(np.float)
mem = np.array(vec_resource[2].split('|'))
mem = mem.astype(np.float)
disk = float(vec_resource[3])
P = int(vec_resource[4])
M = int(vec_resource[5])
PM = int(vec_resource[6])
app = App(appid, cpu, mem, disk, P, M, PM)
Apps[appid] = app
def ReadJobInformation(job_info_file_a):
# this function read the job information
app_resfile = open(job_info_file_a)
for line in app_resfile:
line = line.strip('\n')
part1, part2 = line.split('|')
vec_resource = part1.split(',')
job_id = vec_resource[0]
cpu = float(vec_resource[1])
mem = float(vec_resource[2])
number_of_instances = int(vec_resource[3])
execution_time = int(vec_resource[4])
dependency_task_id = vec_resource[5:-1]
starttime, endtime = part2.split(',')
starttime = int(starttime)
endtime = int(endtime)
job = Job(job_id, cpu, mem, number_of_instances,
execution_time, dependency_task_id, starttime, endtime)
Jobs[job_id] = job
def ReadMachineResources(machine_resources_file_a):
# this function read the machine resources and
# put it into the dict Machine
machine_resfile = open(machine_resources_file_a)
for line in machine_resfile:
line = line.strip('\n')
vec_resource = line.split(',')
machineid = vec_resource[0]
cpu = np.array(vec_resource[1].split('|'))
cpu = cpu.astype(np.float)
mem = np.array(vec_resource[2].split('|'))
mem = mem.astype(np.float)
disk = int(vec_resource[3])
P = int(vec_resource[4])
M = int(vec_resource[5])
PM = int(vec_resource[6])
machine = Machine(machineid, cpu, mem, disk, P, M, PM)
Machines[machineid] = machine
def ReadInferrence(app_interference_file):
# this function read the machine resources and
# put it into the dict Machine
inferrence_file = open(app_interference_file)
for line in inferrence_file:
line = line.strip('\n')
vec_resource = line.split(',')
appa = vec_resource[0]
appb = vec_resource[1]
# k = int(vec_resource[2])
# Inferrences[appa+" "+appb] = k
# Inferrences[appb+" "+appa] = k
def ReadDeploy(inst_deploy_file_a):
# Read the Inst deployment
# Global variables indicate insts that have been deployed in advance
PreDeploy
# Global variables indicate insts that are not deployed in advance
NonDeploy
deploy_file = open(inst_deploy_file_a)
for line in deploy_file:
line = line.strip('\n')
vec_resource = line.split(',')
inst = vec_resource[0]
app = vec_resource[1]
machine = vec_resource[2]
if(len(machine) > 1):
PreDeploy.append([inst, app, machine])
else:
NonDeploy.append([inst, app, ''])
Insts[inst] = [app, None]
Apps[app].instance.append(inst)
def CheckConstraint():
# this function used in debug
# to check wheather the start station satisfy the constraint
for machine in Deployments:
# print(machine)
localInsts = Deployments[machine]
AppCounter = {}
if not len(localInsts):
continue
# check cpu
localCpu = np.zeros((98,), dtype=np.float)
# check memory
localMem = np.zeros((98,), dtype=np.float)
# check disk
localDisk = 0
# check P
localP = 0
# check M
localM = 0
# check PM
localPM = 0
for inst in localInsts:
localCpu += Apps[Insts[inst][0]].cpu
localMem += Apps[Insts[inst][0]].mem
localDisk += Apps[Insts[inst][0]].disk
localP += Apps[Insts[inst][0]].P
localM += Apps[Insts[inst][0]].M
localPM += Apps[Insts[inst][0]].PM
# check cpu
compare = np.greater_equal(Machines[machine].cpu, localCpu)
# print(compare)
compare = reduce(lambda x, y: x & y, compare)
if(not compare):
logger.debug("CPU fail on "+machine)
return False
# check mem
compare = np.greater_equal(Machines[machine].mem, localMem)
compare = reduce(lambda x, y: x & y, compare)
if(not compare):
logger.debug("Memory fail on "+machine)
return False
# check disk
compare = Machines[machine].disk >= localDisk
if(not compare):
logger.debug("disk fail on "+machine)
return False
# check P
compare = Machines[machine].P >= localP
if(not compare):
logger.debug("P fail on "+machine)
return False
# check M
compare = Machines[machine].M >= localM
if(not compare):
("M fail on "+machine)
return False
# check PM
compare = Machines[machine].PM >= localPM
if(not compare):
logger.debug("PM fail on "+machine)
return False
# check inferrence
for inst in localInsts:
curApp = Insts[inst][0]
if curApp not in AppCounter:
AppCounter[curApp] = 1
else:
AppCounter[curApp] += 1
for appa in curApp:
for appb in curApp:
if appa+" "+appb in Inferrences:
if AppCounter[appb] > Inferrences[appa+" "+appb]:
print("Inferrence between "+appa+" " +
appb+" broken "+"on "+machine)
return False
# constraint satisfy
return True
def Loaddata(text):
# the main loaddata function used in this page.
# text can be olda, oldb, a, b, c, d, e
if text == 'olda':
olddata_app_interference_file = './../data/data-a/scheduling_preliminary_app_interference_20180606.csv'
olddata_app_resources_file = './../data/data-a/scheduling_preliminary_app_resources_20180606.csv'
olddata_machine_resources = './../data/data-a/scheduling_preliminary_machine_resources_20180606.csv'
olddata_inst_deploy_file = './../data/data-a/scheduling_preliminary_instance_deploy_20180606.csv'
zzl_solution_file_a = "./submit/solution_olda.csv"
sort_ins_list = np.loadtxt('inssort/sort' + text + '.txt')
ReadAppResources(olddata_app_resources_file)
ReadMachineResources(olddata_machine_resources)
ReadInferrence(olddata_app_interference_file)
ReadDeploy(olddata_inst_deploy_file)
elif text == 'oldb':
olddata_app_interference_file = './../data/data-a/scheduling_preliminary_b_app_interference_20180726.csv'
olddata_app_resources_file = './../data/data-a/scheduling_preliminary_b_app_resources_20180726.csv'
olddata_machine_resources = './../data/data-a/scheduling_preliminary_b_machine_resources_20180726.csv'
olddata_inst_deploy_file = './../data/data-a/scheduling_preliminary_b_instance_deploy_20180726.csv'
zzl_solution_file_a = "./submit/solution_oldb.csv"
sort_ins_list = np.loadtxt('inssort/sort' + text + '.txt')
ReadAppResources(olddata_app_resources_file)
ReadMachineResources(olddata_machine_resources)
ReadInferrence(olddata_app_interference_file)
ReadDeploy(olddata_inst_deploy_file)
# ReadJobInformation()
else:
olddata_app_interference_file = './../data/instance_deploy.{}.csv'.format(
text)
olddata_app_resources_file = './../data/app_resources.csv'
olddata_machine_resources = './../data/machine_resources.{}.csv'.format(
text)
olddata_inst_deploy_file = './../data/instance_deploy.{}.csv'.format(
text)
zzl_solution_file_a = "./submit/solution_oldb.csv"
# sort_ins_list = np.loadtxt('inssort/sort' + text + '.txt')
ReadAppResources(olddata_app_resources_file)
ReadMachineResources(olddata_machine_resources)
ReadInferrence(olddata_app_interference_file)
ReadDeploy(olddata_inst_deploy_file)
# ReadJobInformation()
return Apps, Machines, Insts
|
# from __future__ import division
import numpy as np
from os import path as op
import gzip
import bz2
import glob
import json
import traceback # ,itertools
from ase.io import vasp as pv
# remove nested list by list(chain.from_iterable(list_of_lists))
from itertools import chain
import itertools as itr
from math import atan2,degrees
__author__ = "Ali Zendegani"
_conv = { # CONVERSION table
"THzh__meV": 4.1356675,
"eV__hart": 0.0367493, # eV to hartree
"hart__ev": 27.211385, # hartree to eV
"angst__bohr": 1.8897261, # angstrom to bohrradius
"bohr__angst": 0.5291772, # bohrradius to angstrom
"angst3__bohr3": 6.7483345, # angstrom^3 to bohrradius^3
"bohr3__angst3": 0.1481847, # bohrradius^3 to angstrom^3
"meV__J_mol": 96.485336, # meV to J mol
"kK_meV": 0.086173323, # k_B K to meV
"meV_ang2__mJ_m2": 16.021766, # meV/angstrom^2 to mJ/m^2 SFE
"meV_nm2__mJ_m2": 0.16021766, # meV/nm^2 to mJ/m^2 SFE
"GPa__hart_bohr3": 3.3989312e-05,
"hart_bohr3__GPa": 29421.01 # Hartree / Bohr^3 = 29421.01 GPa
}
_flag_OUTCAR = { # flags which help extracting data from OUTCAR
"energy": "energy without entropy",
"ionsNum": "ions per type",
"ionsName": "TITEL",
"ionsName2": "VRHFIN",
"volume": "volume of cell :"
}
_flag_OSZICAR = { # flags which help extracting data from OSZICAR
"penalty": "E_p",
"lambda": "lambda",
}
def savefig(fig, fig_name, path, dpi_hq=300, dpi_lq=72, fmt='png'):
''' save figure with high and low resolution
'''
from os import path as op
fig.tight_layout()
filename = op.join(path,fig_name+'.'+fmt)
fig.savefig(filename,format=fmt, dpi=dpi_hq);
filename = op.join(path,fig_name+'_LQ'+'.'+fmt)
fig.savefig(filename,format=fmt, dpi=dpi_lq);
#Label line with line2D label data
def labelLine(line,x,label=None,align=True,**kwargs):
ax = line.axes
xdata = line.get_xdata()
ydata = line.get_ydata()
if (x < xdata[0]) or (x > xdata[-1]):
print('x label location is outside data range!')
return
#Find corresponding y co-ordinate and angle of the line
ip = 1
for i in range(len(xdata)):
if x < xdata[i]:
ip = i
break
y = ydata[ip-1] + (ydata[ip]-ydata[ip-1])*(x-xdata[ip-1])/(xdata[ip]-xdata[ip-1])
if not label:
label = line.get_label()
if align:
#Compute the slope
dx = xdata[ip] - xdata[ip-1]
dy = ydata[ip] - ydata[ip-1]
ang = degrees(atan2(dy,dx))
#Transform to screen co-ordinates
pt = np.array([x,y]).reshape((1,2))
trans_angle = ax.transData.transform_angles(np.array((ang,)),pt)[0]
else:
trans_angle = 0
#Set a bunch of keyword arguments
if 'color' not in kwargs:
kwargs['color'] = line.get_color()
if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):
kwargs['ha'] = 'center'
if ('verticalalignment' not in kwargs) and ('va' not in kwargs):
kwargs['va'] = 'center'
if 'backgroundcolor' not in kwargs:
kwargs['backgroundcolor'] = ax.get_facecolor()
if 'clip_on' not in kwargs:
kwargs['clip_on'] = True
if 'zorder' not in kwargs:
kwargs['zorder'] = 2.5
ax.text(x,y,label,rotation=trans_angle,**kwargs)
def labelLines(lines,align=True,xvals=None,**kwargs):
ax = lines[0].axes
labLines = []
labels = []
#Take only the lines which have labels other than the default ones
for line in lines:
label = line.get_label()
if "_line" not in label:
labLines.append(line)
labels.append(label)
if xvals is None:
xmin,xmax = ax.get_xlim()
xvals = np.linspace(xmin,xmax,len(labLines)+2)[1:-1]
for line,x,label in zip(labLines,xvals,labels):
labelLine(line,x,label,align,**kwargs)
def repListList(a, b):
''' Repeat each elements of list 'a' by corresponding number mentioned in list 'b'
'''
return list(itr.chain(*(itr.repeat(elem, n) for elem, n in zip(a, b))))
def permute(item_list, length):
''' Premutes items in the list for given length
'''
return ["".join(el) for el in itr.product(item_list, repeat=length)]
def dZ(number):
return " 0. 0. " + str(number) + " "
def dZlist(mags):
return '0. 0. '+' 0. 0. '.join(str(i) for i in mags)
def dZlistFmt(mags,n):
''' n: floating point decimal precision
'''
return '0. 0. '+' 0. 0. '.join('{1:-1.{0}f}'.format(n,i) for i in mags)
def func2deg(x, a, b, c):
return a * x**2 + b * x + c
def func3deg(x, a, b, c, d):
return a * x**3 + b * x**2 + c * x + d
def func5deg(x, a, b, c, d, e, f):
return a * x**5 + b * x**4 + c * x**3 + d * x**2 + e * x + f
def rough_comparison(value, target, tol_percent):
''' check if the given value is within the given tolerance of the target
'''
return (abs(value) <= abs(target * (100 + tol_percent) / 100)) & (
abs(value) >= abs(target * (100 - tol_percent) / 100)) & (
(value < 0) == (target < 0))
def murnFit(v, e0, v0, b0, b0p):
''' Calc E at given V provided e0,v0,b0,b0p using Murnaghan EOS (from Wikipedia)
'''
return e0 + b0 * v / (b0p * (b0p - 1)) * (b0p * (1 - v0 / v) +
(v0 / v)**b0p - 1)
def murnFitSPHINX(v, e0, v0, b0, b0p):
''' Calc E at given V provided e0,v0,b0,b0p using Murnaghan EOS (from SPHINX website)
'''
return e0 + b0 * v / b0p * (1 + (v0 / v)**b0p /
(b0p - 1)) - (b0 * v0) / (b0p - 1)
def minEnV(murnData):
''' Return lowest E and its corresponding V from set of data
'''
return list(
chain.from_iterable(
murnData[np.nonzero(murnData == (min(murnData[:, 1])))[0], :]))
def data_extractor_murn(murn_file):
''' Extract e0,v0,b0, and b0' from murn.dat of SPHINX
'''
with open(murn_file) as fp:
for i, line in enumerate(fp):
if i == 1:
murnB0 = float(line.split()[-2])
elif i == 2:
murnB0p = float(line.split()[-1])
elif i == 3:
murnE0hartree = float(line.split()[-1]) # hartree
murnE0eV = murnE0hartree * _conv['hart__ev']
elif i == 4:
# bohrradius^3
murnV0bohr = float(line.split()[-1])
murnV0angst = murnV0bohr * _conv['bohr3__angst3']
elif i > 5:
break
return murnE0hartree, murnV0bohr, murnB0, murnB0p
def formE(E_tot, E_elms, n_elms):
''' Calc formation Energy
Parameters
----------
e_tot : Total energy of compound per atom
e_elms : list of total energies per atom of constituents
n_elms : list of number of atoms of each constituents
'''
n_tot = np.sum(n_elms)
E_tot_elms = np.sum(np.array(E_elms) * np.array(n_elms))
return (n_tot * E_tot - E_tot_elms) / n_tot
# def formE(E,n_Fe,n_Nb):
# n_tot = n_Fe + n_Nb
# return (n_tot*E-(n_Fe*Etot_Fe_ea+n_Nb*Etot_Nb_NM_ea))/n_tot
def fitPol(data, degree):
''' fit a polynomial function of given degree to data
'''
return np.poly1d(np.polyfit(data[:, 0], data[:, 1], degree))
def SFE_chu(E_C14, E_C15, a_C14):
''' Calc energy of SF with Chu approach
Parameters
----------
E_C14,E_C15 : Energy (meV per atom)
a_C14 : Lattice parameter of C14 (Angstrom) in basal plane
Returns
-------
Energy of SF in mJ/m2 for Chu method
'''
Edelta_15_14 = E_C15 - E_C14
return [
24 * Edelta_15_14 / (3**.5 * a_C14**2) * _conv['meV_ang2__mJ_m2'],
Edelta_15_14
]
def SFE_SC(E_C14, E_SC, A, n_atoms):
''' Calc energy of SF with Supercell approach
Parameters
----------
E_C14,E_SC : Energy (meV per atom)
A : Area of fault in supercell (Angstrom^2)
n_atoms : Number of atoms in Supercell approach
Returns
-------
Energy of SF in mJ/m2 for Supercell method
'''
Edelta_SC_14 = E_SC - E_C14
return [
Edelta_SC_14 * n_atoms / A * _conv['meV_ang2__mJ_m2'],
Edelta_SC_14 * n_atoms
]
def vec_a_hex(vol,c_a):
''' Return a_vector of a hexagonal cell given the vol and c/a ratio
Parameters
----------
vol : Vol of hexagonal cell (Angstrom^3)
c_a : c/a ratio of hexagonal cell
Returns
-------
vec a : a_vector of hexagonal cell (Angstrom)
'''
return (vol/(np.sin(np.radians(120))*c_a))**(1/3)
def area_hex_basal(vol,c_a):
''' Return basal area of a hexagonal cell given the vol and c/a ratio
Parameters
----------
vol : Vol of hexagonal cell (Angstrom^3)
c_a : c/a ratio of hexagonal cell
Returns
-------
area : Basal area of hexagonal cell (Angstrom^2)
'''
vec_a = (vol/(np.sin(np.radians(120))*c_a))**(1/3)
return vec_a**2 * np.sin(np.radians(120))
class projects_path_keeper():
'''manage (show,add,remove,save,load) the path information for each project
and a little comment for each
'''
def __init__(self, work_directory, database_filename):
self.wk_dir = op.join(work_directory, '')
self.db_filename = database_filename
self.data = {}
if op.isdir(self.wk_dir):
self.db_load()
else:
print("Working directory not found!")
def db_create(self):
if op.isfile(op.join(self.wk_dir, self.db_filename)):
print("Databse already exist! Load with .db_load()")
else:
_ = {}
json.dump(_, open(op.join(self.wk_dir, self.db_filename), 'w'))
print("Empty database created. Add project data by .prj_add()")
def db_load(self):
try:
self.data = json.load(open(op.join(self.wk_dir, self.db_filename)))
except (IOError, e):
print(("Database not found! Create a new one with .db_create()",
e.message))
else:
print("database of projects' path loaded.")
def db_save(self):
json.dump(self.data,
open(op.join(self.wk_dir, self.db_filename), 'w'),
sort_keys=True)
def db_show(self):
#assert self.data != {}, "No data is available!"
return list(self.data.keys())
def prj_add(self, project_name, project_path_data):
if project_name not in self.data:
self.data[project_name] = project_path_data
else:
print((
project_name +
" is already in database, to modify it use .prj_modify command"
))
def prj_modify(self, project_name, project_tag, new_value):
if project_name in self.data:
self.data[project_name][project_tag] = new_value
else:
print((project_name +
" is not in the database, to add it use .prj_add command"))
def prj_remove(self, project_name):
if project_name in self.data:
self.data.pop(project_name)
print((project_name + " removed"))
else:
print((
project_name +
" is not in the database, use .db_show() command to see list of projects"
))
def prj_show(self, project_name):
if project_name in self.data:
return self.data[project_name]
else:
print((
project_name +
" is not in the database, use .db_show() command to see list of projects"
))
class MatPhaseObj():
''' this is the main object for each phase
it will contain e.g. composition, structure, Birch-Murnaghan E-V curve,
vibration contribution,
'''
def __init__(self, name, project_dict, def_name=None):
if def_name == None:
(filename, line_number, function_name,
text) = traceback.extract_stack()[-2]
def_name = text[:text.find('=')].strip()
self.defined_name = def_name
self.name = name
self.project_dict = project_dict
self.data = project_dict
self.magnetic_calc = False
# get as input not static
self.path_dbs = op.join(op.expanduser('~') + "/Git/AZscripts/db", "")
self._conv = json.load(open(self.path_dbs + "_conv.txt"))
self._flag_OUTCAR = json.load(open(self.path_dbs + "_flag_OUTCAR.txt"))
self._flag_OSZICAR = json.load(
open(self.path_dbs + "_flag_OSZICAR.txt"))
self._file_name = {
"Harmonic": 'thermo.out',
"Murn": 'murn.dat',
"EV": 'Bohr3-Hartree.dat',
"OUTCAR": 'OUTCAR',
"OSZICAR": 'OSZICAR',
"vasprun": 'vasprun.xml',
}
self._folder_name = {
"thermo": 'thermo/',
"background": 'forces_background/',
}
self._file_exist = {
"Harmonic":
False, # the file thermo.out which contains harmonic free energy
"background": False,
"Murn": False,
"EV": False,
"staticOUT": False,
}
self._folder_exist = {
"harmonic":
False, # the main folder contains displaced structures and forces_background
"thermo": False,
"background": False,
"Murn": False,
"static": False,
}
self._status = {
"input_verification": False,
"data_loaded": False,
}
def __str__(self):
return str([
'Phase:', self.name, 'Status:', self._status, 'Folders:',
self._folder_exist, 'Files:', self._file_exist
])
@staticmethod
def file_type(filename):
magic_dict = {
"\x1f\x8b\x08": "gz",
"\x42\x5a\x68": "bz2",
"\x50\x4b\x03\x04": "zip",
}
max_len = max(len(x) for x in magic_dict)
with open(filename) as f:
file_start = f.read(max_len)
for magic, filetype in list(magic_dict.items()):
if file_start.startswith(magic):
return filetype
return "no match"
@staticmethod
def getFileName(path, pattern):
_ = glob.glob(op.join(path, pattern) + "*")
if not _:
print(("Error: File " + pattern + " not found"))
return 0
elif len(_) > 1:
print(("Error: more than one file found starting wiht", pattern,
[i.split('/')[-1] for i in _]))
return 0
else:
return _[0]
@staticmethod
def openFile(inputFile):
if op.isfile(inputFile):
checkType = MatPhaseObj.file_type(inputFile)
if checkType == "gz":
outputFile = gzip.open(inputFile, "rb").readlines()
if checkType == "bz2":
outputFile = bz2.BZ2File(inputFile, "rb").readlines()
elif checkType == "no match":
outputFile = open(inputFile, "r").readlines()
return outputFile
else:
print("Error: File not found")
return
def data_extractor_vasprun(self, vasprun_path):
self.vasprun = pv.Vasprun(vasprun_path)
print((self.name, " vasprun data extracted."))
def data_extractor_OSZICAR(self, OSZICAR_path):
penalty, Lambda = 0.0, 0.0
_fileOSZICAR = MatPhaseObj.getFileName(OSZICAR_path,
self._file_name["OSZICAR"])
if _fileOSZICAR:
_ = MatPhaseObj.openFile(_fileOSZICAR)
for line in _:
if self._flag_OSZICAR["penalty"] in line:
penalty = float(line.split()[2]) # eV per cell
Lambda = float(line.split()[-1])
if penalty:
return penalty, Lambda
return 0.0, 0.0
else:
print("Error: File not found")
return
def data_extractor_murn(self):
if self._file_exist["EV"]:
self.Ene_Vol = np.loadtxt(self._fileEV)
if self._file_exist["Murn"]:
self.murnEV = np.loadtxt(self._fileMurn)
fp = open(self._fileMurn)
for i, line in enumerate(fp):
if i == 1:
self.murnB0 = float(line.split()[-2])
elif i == 2:
self.murnB0p = float(line.split()[-1])
elif i == 3:
self.murnE0hartree = float(line.split()[-1]) # hartree
self.murnE0eV = self.murnE0hartree * self._conv["hart2ev"]
elif i == 4:
# bohrradius^3
self.murnV0bohr = float(line.split()[-1])
self.murnV0angst = self.murnV0bohr * \
self._conv["vol_bohr2angst"]
elif i > 5:
break
fp.close()
def data_extractor_harmonic(self):
if self._file_exist["Harmonic"]:
self.Ene_harmonic = np.loadtxt(self._fileTherm)
def data_extractor_OUTCAR(self, OUTCARfile):
output = {}
output['atoms'] = []
_ = MatPhaseObj.openFile(OUTCARfile)
for line in _:
if self._flag_OUTCAR["energy"] in line:
output['E_Sigma0_perCell_eV'] = float(
line.split()[-1]) # eV per cell
if self._flag_OUTCAR["ionsNum"] in line:
output['atomNum'] = list(map(int, line.split("=")[-1].split()))
if self._flag_OUTCAR["ionsName"] in line:
output['atoms'].append(line.split()[-2])
if self._flag_OUTCAR["volume"] in line:
output['vol'] = float(line.split()[-1])
output['AtomTotalNum'] = sum(output['atomNum'])
output['Atom_Num'] = {}
output['Atom_Type_Num'] = len(output['atoms'])
for i in range(output['Atom_Type_Num']):
output['Atom_Num'][output['atoms'][i]] = output['atomNum'][i]
output['vol_atom'] = output['vol'] / output['AtomTotalNum']
output['E_Sigma0_perAtom_eV'] = output['E_Sigma0_perCell_eV'] / \
output['AtomTotalNum'] # eV per atom
counter = 0
output['MagX'] = np.zeros((output['AtomTotalNum'], 5))
output['MagY'] = np.zeros((output['AtomTotalNum'], 5))
output['MagZ'] = np.zeros((output['AtomTotalNum'], 5))
for line in reversed(_):
if "magnetization (x)" in line:
is_ymag = 'magnetization (y)' in _[-counter - 1 +
output['AtomTotalNum'] + 9]
is_zmag = 'magnetization (z)' in _[
-counter - 1 + 2 * (output['AtomTotalNum'] + 9)]
for i in range(output['AtomTotalNum']):
output['MagX'][i] = np.asarray(
list(map(float, _[-counter - 1 + 4 + i].split()))) # x
if is_ymag:
output['MagY'][i] = np.asarray(
list(map(
float,
_[-counter - 1 + 4 + i +
output['AtomTotalNum'] + 9].split()))) # y
if is_zmag:
output['MagZ'][i] = np.asarray(
list(map(
float,
_[-counter - 1 + 4 + i + 2 *
(output['AtomTotalNum'] + 9)].split()))) # z
break
counter += 1
if np.sum(np.nonzero(np.array(output['MagX'][:, -1]) != 0)) > 1 or\
np.sum(np.nonzero(np.array(output['MagY'][:, -1]) != 0)) > 1 or \
np.sum(np.nonzero(np.array(output['MagZ'][:, -1]) != 0)) > 1:
self.magnetic_calc = True
return output
def loadData(self):
self._status['data_loaded'] = True
self.data_extractor_harmonic()
self.data_extractor_murn()
if self._file_exist["staticOUT"]:
self.static = self.data_extractor_OUTCAR(self._fileOUTCAR)
self.static['penalty'], self.static[
'Lambda'] = self.data_extractor_OSZICAR(
self.project_dict['static']['path'])
if self._file_exist["background"]:
self.background = self.data_extractor_OUTCAR(
self._fileOUTCARbackground)
self.background['penalty'], self.background[
'Lambda'] = self.data_extractor_OSZICAR(
op.join(self.project_dict['harmonic']['path'],
self._folder_name["background"]))
# self.data={} why it is here???
try:
self.d_tot_ene_per_atom_eV, self.d_tot_penalty_ene_eV, self.d_lambda,\
self.d_tot_vol_atom_ang3, self.d_tot_atom_num,\
self.d_MagX, self.d_MagY, self.d_MagZ =\
self.static['E_Sigma0_perAtom_eV'], self.static['penalty'],\
self.static['Lambda'], self.static['vol_atom'], self.static['AtomTotalNum'],\
self.static['MagX'], self.static['MagY'], self.static['MagZ']
except:
try:
self.d_tot_ene_per_atom_eV, self.d_tot_penalty_ene_eV, self.d_lambda,\
self.d_tot_vol_atom_ang3, self.d_tot_atom_num,\
self.d_MagX, self.d_MagY, self.d_MagZ =\
self.background['E_Sigma0_perAtom_eV'], self.background['penalty'],\
self.background['Lambda'], self.background['vol_atom'],\
self.background['AtomTotalNum'], self.background['MagX'],\
self.background['MagY'], self.background['MagZ']
except:
try:
self.d_tot_ene_per_atom_eV, self.d_tot_penalty_ene_eV, self.d_lambda,\
self.d_tot_vol_atom_ang3, self.d_tot_atom_num =\
self.murnE0eV, np.NaN, np.NaN, self.murnV0angst, np.NaN
except:
print((
"At least provide one of the background, murn or static data! ",
self.defined_name))
def verifyInput(self):
self._status['input_verification'] = True
if self.project_dict['harmonic']['path']:
self.project_dict['harmonic']['path'] =\
op.join(self.project_dict['harmonic']['path'], '')
if op.isdir(self.project_dict['harmonic']['path']):
self._folder_exist["harmonic"] = True
if op.isdir(self.project_dict['harmonic']['path'] +
self._folder_name["thermo"]):
self._folder_exist["thermo"] = True
if op.isdir(self.project_dict['harmonic']['path'] +
self._folder_name["background"]):
self._folder_exist["background"] = True
self._fileTherm = self.project_dict['harmonic']['path']\
+ self._folder_name["thermo"]+self._file_name["Harmonic"]
if op.isfile(self._fileTherm):
self._file_exist["Harmonic"] = True
self._fileOUTCARbackground = MatPhaseObj.getFileName(
self.project_dict['harmonic']['path'] +
self._folder_name["background"], self._file_name["OUTCAR"])
if op.isfile(self._fileOUTCARbackground):
self._file_exist["background"] = True
if self.project_dict['murn']['path']:
self.project_dict['murn']['path'] = op.join(
self.project_dict['murn']['path'], '')
if op.isdir(self.project_dict['murn']['path']):
self._folder_exist["Murn"] = True
self._fileMurn = self.project_dict['murn']['path'] + \
self._file_name["Murn"]
if op.isfile(self._fileMurn):
self._file_exist["Murn"] = True
self._fileEV = self.project_dict['murn']['path'] + \
self._file_name["EV"]
if op.isfile(self._fileEV):
self._file_exist["EV"] = True
try:
if self.project_dict['static']['path']:
# will add the trailing slash if it's not already there
self.project_dict['static']['path'] = op.join(
self.project_dict['static']['path'], '')
if op.isdir(self.project_dict['static']['path']):
self._folder_exist["static"] = True
self._fileOUTCAR = MatPhaseObj.getFileName(
self.project_dict['static']['path'],
self._file_name["OUTCAR"])
if op.isfile(self._fileOUTCAR):
self._file_exist["staticOUT"] = True
except:
pass |
from gevent import monkey
monkey.patch_all()
from flask import Flask
from TikTokApi import TikTokApi
api = TikTokApi.get_instance()
custom_verifyFp = "verify_kqv2svy0_gQARqKRo_fV40_4EW1_Bob0_RGCkXxCbfYTz"
app = Flask(__name__)
@app.route('/tiktok/<tiktok_id>', methods=['GET'])
def tiktok(tiktok_id):
tiktok_video = api.get_tiktok_by_id(tiktok_id, custom_verifyFp=custom_verifyFp)
return tiktok_video
if __name__ == '__main__':
app.run(host='0.0.0.0', port=105)
|
# -*- coding: utf-8 -*-
# @author: Alexander Pitchford
# @email1: agp1@aber.ac.uk
# @email2: alex.pitchford@gmail.com
# @organization: Aberystwyth University
# @supervisor: Daniel Burgarth
"""
Utility functions for symplectic matrices
"""
import numpy as np
def calc_omega(n):
"""
Calculate the 2n x 2n Omega matrix
Used as dynamics generator phase to calculate symplectic propagators
Parameters
----------
n : scalar(int)
number of modes in oscillator system
Returns
-------
array(float)
Symplectic phase Omega
"""
omg = np.zeros((2*n, 2*n))
for j in range(2*n):
for k in range(2*n):
if k == j+1:
omg[j, k] = (1 + (-1)**j)/2
if k == j-1:
omg[j, k] = -(1 - (-1)**j)/2
return omg
|
import pytest
import vaex
import numpy as np
import numpy.ma
df_a = vaex.from_arrays(a=np.array(['A', 'B', 'C']),
x=np.array([0., 1., 2.]),
y=np.ma.array([0., 9., 2.], mask=[False, True, False]),
m=np.ma.array([1, 2, 3], mask=[False, True, False])
)
df_b = vaex.from_arrays(b=np.array(['A', 'B', 'D']),
x=np.array([2., 1., 0.]),
y=np.ma.array([9., 1., 2.], mask=[True, False, False]),
m=np.ma.array([3, 1, 2], mask=[True, False, False])
)
df_dup = vaex.from_arrays(b=np.array(['A', 'B', 'A']),
x=np.array([2., 1., 2.]),
y=np.ma.array([9., 1., 9.], mask=[True, False, False]),
m=np.ma.array([3, 1, 2], mask=[True, True, False])
)
df_c = vaex.from_arrays(c=np.array(['B', 'C']),
z1=np.array([-1., -2.]),
z2=np.array([True, False]),
)
df_d = vaex.from_arrays(a=np.array(['B', 'C', 'D']),
x1=np.array(['dog', 'cat', 'mouse']),
x2=np.array([3.1, 25, np.nan]),
)
df_e = vaex.from_arrays(a=np.array(['X', 'Y', 'Z']),
x1=np.array(['dog', 'cat', 'mouse']),
x2=np.array([3.1, 25, np.nan]),
)
def test_no_on():
# just adds the columns
df = df_a.join(df_b, rsuffix='_r')
assert df.columns['b'] is df_b.columns['b']
def test_join_masked():
df = df_a.join(other=df_b, left_on='m', right_on='m', rsuffix='_r')
assert df.evaluate('m').tolist() == [1, None, 3]
assert df.evaluate('m_r').tolist() == [1, None, None]
def test_join_nomatch():
df = df_a.join(df_e, on='a', rprefix='r_')
assert df.x2.tolist() == [None, None, None]
def test_left_a_b():
df = df_a.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
assert df.evaluate('a').tolist() == ['A', 'B', 'C']
assert df.evaluate('b').tolist() == ['A', 'B', None]
assert df.evaluate('x').tolist() == [0, 1, 2]
assert df.evaluate('x_r').tolist() == [2, 1, None]
assert df.evaluate('y').tolist() == [0, None, 2]
assert df.evaluate('y_r').tolist() == [None, 1, None]
def test_join_indexed():
df = df_a.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
df_X = df_a.join(df, left_on='a', right_on='b', rsuffix='_r')
assert df_X.evaluate('b').tolist() == ['A', 'B', None]
def test_left_a_b_filtered():
df_af = df_a[df_a.x > 0]
df = df_af.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
assert df.evaluate('a').tolist() == ['B', 'C']
assert df.evaluate('b').tolist() == ['B', None]
assert df.evaluate('x').tolist() == [1, 2]
assert df.evaluate('x_r').tolist() == [1, None]
assert df.evaluate('y').tolist() == [None, 2]
assert df.evaluate('y_r').tolist() == [1, None]
# actually, even though the filter is applied, all rows will be matched
# since the filter can change
df.set_selection(None, vaex.dataset.FILTER_SELECTION_NAME)
assert df.evaluate('a').tolist() == ['A', 'B', 'C']
assert df.evaluate('b').tolist() == ['A', 'B', None]
assert df.evaluate('x').tolist() == [0, 1, 2]
assert df.evaluate('x_r').tolist() == [2, 1, None]
assert df.evaluate('y').tolist() == [0, None, 2]
assert df.evaluate('y_r').tolist() == [None, 1, None]
# if we extract, that shouldn't be the case
df_af = df_a[df_a.x > 0].extract()
df = df_af.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
df.set_selection(None, vaex.dataset.FILTER_SELECTION_NAME)
assert df.evaluate('a').tolist() == ['B', 'C']
assert df.evaluate('b').tolist() == ['B', None]
assert df.evaluate('x').tolist() == [1, 2]
assert df.evaluate('x_r').tolist() == [1, None]
assert df.evaluate('y').tolist() == [None, 2]
assert df.evaluate('y_r').tolist() == [1, None]
def test_inner_a_b_filtered():
df_a_filtered = df_a[df_a.x > 0]
df = df_a_filtered.join(other=df_b, left_on='a', right_on='b', rsuffix='_r', how='inner')
assert df.evaluate('a').tolist() == ['B']
assert df.evaluate('b').tolist() == ['B']
assert df.evaluate('x').tolist() == [1]
assert df.evaluate('x_r').tolist() == [1]
assert df.evaluate('y').tolist() == [None]
assert df.evaluate('y_r').tolist() == [1]
def test_right_x_x():
df = df_a.join(other=df_b, on='x', rsuffix='_r', how='right')
assert df.evaluate('a').tolist() == ['C', 'B', 'A']
assert df.evaluate('b').tolist() == ['A', 'B', 'D']
assert df.evaluate('x').tolist() == [2, 1, 0]
assert df.evaluate('x_r').tolist() == [2, 1, 0]
assert df.evaluate('y').tolist() == [2, None, 0]
assert df.evaluate('y_r').tolist() == [None, 1, 2]
assert 'y_r' not in df_b
def test_left_dup():
df = df_a.join(df_dup, left_on='a', right_on='b', rsuffix='_r', allow_duplication=True)
assert len(df) == 4
# df = df_a.join(df_dup, on='x', rsuffix='_r')
# df = df_a.join(df_dup, on='m', rsuffix='_r')
def test_left_a_c():
df = df_a.join(df_c, left_on='a', right_on='c', how='left')
assert df.a.tolist() == ['A', 'B', 'C']
assert df.x.tolist() == [0, 1, 2]
assert df.y.tolist() == [0., None, 2.]
assert df.m.tolist() == [1, None, 3]
assert df.c.tolist() == [None, 'B', 'C']
assert df.z1.tolist() == [None, -1., -2.]
assert df.z2.tolist() == [None, True, False]
def test_join_a_a_suffix_check():
df = df_a.join(df_a, on='a', lsuffix='_left', rsuffix='_right')
assert set(df.column_names) == {'a_left', 'x_left', 'y_left', 'm_left', 'a_right', 'x_right', 'y_right', 'm_right'}
def test_join_a_a_prefix_check():
df = df_a.join(df_a, on='a', lprefix='left_', rprefix='right_')
assert set(df.column_names) == {'left_a', 'left_x', 'left_y', 'left_m', 'right_a', 'right_x', 'right_y', 'right_m'}
def test_inner_a_d():
df = df_a.join(df_d, on='a', right_on='a', how='inner', rsuffix='_r')
assert df.a.tolist() == ['B', 'C']
assert df.x.tolist() == [1., 2.]
assert df.y.tolist() == [None, 2.]
assert df.m.tolist() == [None, 3.]
assert df.x1.tolist() == ['dog', 'cat']
assert df.x2.tolist() == [3.1, 25.]
@pytest.mark.skip(reason='full join not supported yet')
def test_full_a_d():
df = df_a.join(df_d, on='a', right_on='a', how='full')
assert df.a.tolist() == ['A', 'B', 'C', 'D']
assert df.x.tolist() == [0., 1., 2., None]
assert df.y.tolist() == [0., None, 2., None]
assert df.m.tolist() == [1, None, 3, None]
assert df.x1.tolist() == [None, 'dog', 'cat', 'mouse']
assert df.x2.tolist() == [None, 3.1, 25., np.nan]
np.testing.assert_array_equal(np.array(df_d.x2.values), np.array([3.1, 25., np.nan]))
def test_left_virtual_filter():
df = df_a.join(df_d, on='a', how='left', rsuffix='_b')
df['r'] = df.x + df.x2
df = df[df.r > 10]
assert set(df[0]) == {'C', 2.0, 2.0, 3, 'C', 'cat', 25.0, 27.0}
def test_left_on_virtual_col():
mapper = {0: 'A', 1: 'B', 2: 'C'}
df_a['aa'] = df_a.x.map(mapper=mapper)
df = df_a.join(df_d, left_on='aa', right_on='a', rsuffix='_right')
assert df.a.tolist() == ['A', 'B', 'C']
assert df.aa.tolist() == ['A', 'B', 'C']
assert df.x.tolist() == [0, 1, 2]
assert df.y.tolist() == [0., None, 2.]
assert df.m.tolist() == [1, None, 3]
assert df.x1.tolist() == [None, 'dog', 'cat']
assert df.x2.tolist() == [None, 3.1, 25.]
assert df.a_right.tolist() == [None, 'B', 'C']
def test_join_filtered_inner():
df_a_filtered = df_a[df_a.y > 0]
df_joined = df_a_filtered.join(other=df_b, on='x', how='inner', rsuffix='_', allow_duplication=True)
assert len(df_joined) == len(df_a_filtered)
x = np.arange(20)
df = vaex.from_arrays(x=x, y=x**2)
df = df[df.x > 5]
dfj = df.join(df, on='x', rsuffix='right_', how='inner')
repr(dfj) # trigger issue with selection cache
def test_join_duplicate_column():
df_left = vaex.from_arrays(index=[1, 2, 3], x=[10, 20, 30])
df_right = vaex.from_arrays(index=[1, 2, 3], y=[0.1, 0.2, 0.3])
df = df_left.join(df_right, on='index')
assert df.column_count() == 3
assert set(df.column_names) == {'index', 'x', 'y'}
assert df['index'] == [1, 2, 3]
assert df.x.tolist() == [10, 20, 30]
assert df.y.tolist() == [0.1, 0.2, 0.3]
|
#creating a text file
#opening in write mode
file=open("sam.txt","w")
#writing some content
file.write("hi")
file.write("\n")
file.write("We are participating in Py75 challenge")
file.write("\n")
file.write("It's so interesting")
#closing the opened file
file.close()
#write using appending mode
file=open("sam.txt","a")
file.write("bye")
file.close()
'''OUTPUT:stud@HP-246-Notebook-PC:~$ python abi_writefile.py
stud@HP-246-Notebook-PC:~$ python abi_readfile.py
hi
We are participating in Py75 challenge
It's so interesting
hi
We are participating in Py75 challenge
It's so interestingbye
''' |
import os
import logging
import torch
import collections
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
logger = logging.getLogger(__file__)
def build_dataset(dataset, tokenizer):
logger.info("Tokenize and encode the dataset")
instances = collections.defaultdict(list)
for line in dataset:
# input_idx = tokenizer.convert_tokens_to_ids(
# tokenizer.tokenize(line["system_dialog_acts"] + tokenizer.sep_token + line["ast-hyps1"]))
# label_idx = tokenizer.convert_tokens_to_ids(
# tokenizer.tokenize(" ; ".join(line["semantics"])))
# input_seq = [tokenizer.cls_token_id] + input_idx + [tokenizer.sep_token_id]
# label_seq = [tokenizer.cls_token_id] + label_idx + [tokenizer.sep_token_id]
# input_idx = tokenizer.convert_tokens_to_ids(
# tokenizer.tokenize("system act: " + line["system_dialog_acts"] + " asr hypothesis: " + line["ast-hyps1"]))
# label_idx = tokenizer.convert_tokens_to_ids(
# tokenizer.tokenize(" ; ".join(line["semantics"])))
input_idx = tokenizer.convert_tokens_to_ids(
tokenizer.tokenize("query: " + line[0]))
label_idx = tokenizer.convert_tokens_to_ids(
tokenizer.tokenize(line[1]))
input_seq = input_idx
label_seq = label_idx + [tokenizer.eos_token_id]
input_mask = [1 for _ in range(len(input_seq))]
instances["pad_input"].append(input_seq)
instances["pad_input_mask"].append(input_mask)
instances["pad_label"].append(label_seq)
return instances
def collate(dataset, pad_id, batch_first=True):
logger.info("Pad inputs and convert to Tensor")
tensor_dataset = []
for input_name in dataset.keys():
if "pad" in input_name:
if "label" in input_name in input_name:
input_tensor = pad_sequence(
[torch.tensor(feature, dtype=torch.long) for feature in dataset[input_name]],
batch_first=batch_first, padding_value=-100)
else:
input_tensor = pad_sequence(
[torch.tensor(feature, dtype=torch.long) for feature in dataset[input_name]],
batch_first=batch_first, padding_value=pad_id)
else:
input_tensor = torch.tensor(dataset[input_name], dtype=torch.long)
tensor_dataset.append(input_tensor)
logging.info("Max len of input tensor is %d" % tensor_dataset[0].shape[1])
return tensor_dataset
class BertDataset(Dataset):
def __init__(self, tokenizer, dataset, max_len=512, pad_first=True, mode='train'):
self.tokenizer = tokenizer
self.dataset = dataset
self.max_len = max_len
self.data_size = len(dataset)
self.pad_first = pad_first
self.mode = mode
def __len__(self):
return self.data_size
def __getitem__(self, item):
item = self.dataset.iloc[item]
input_ids = item['random_text']
input_ids = ['[CLS]'] + list(input_ids)[:min(len(input_ids), self.max_len - 2)] + ['[SEP]']
# convert to bert ids
input_ids = self.tokenizer.convert_tokens_to_ids(input_ids)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
pad_len = self.max_len - len(input_ids)
if self.pad_first:
input_ids = [0] * pad_len + input_ids
input_mask = [0] * pad_len + input_mask
segment_ids = [0] * pad_len + segment_ids
else:
input_ids = input_ids + [0] * pad_len
input_mask = input_mask + [0] * pad_len
segment_ids = segment_ids + [0] * pad_len
output = {
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
}
if self.mode == 'train':
output_ids = item['origin_text']
label = item['label']
label = [int(x) for x in label if x != ' ']
output_ids = ['[CLS]'] + list(output_ids)[:min(len(output_ids), self.max_len - 2)] + ['[SEP]']
label = [0] + label[:min(len(label), self.max_len - 2)] + [0]
output_ids = self.tokenizer.convert_tokens_to_ids(output_ids)
pad_label_len = self.max_len - len(label)
if self.pad_first:
output_ids = [0] * pad_len + output_ids
label = [0] * pad_label_len + label
else:
output_ids = output_ids + [0] * pad_len
label = label + [0] * pad_label_len
output = {
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
'output_ids': output_ids,
'label': label
}
return {key: torch.tensor(value) for key, value in output.items()}
|
# pylint: disable=unused-argument, missing-function-docstring
from flask_discord_interactions import DiscordInteractionsBlueprint, Message, Embed
from flask_discord_interactions.models.command import ApplicationCommandType, CommandOptionType
from flask_discord_interactions.models.user import User
from flask_discord_interactions.models.embed import Author, Field, Footer, Media
import config
from resources import players
from resources import trucks
from resources import levels
from resources import companies
profile_bp = DiscordInteractionsBlueprint()
profile = profile_bp.command_group(name="profile", description="Show and manage your profile")
@profile.command(name="register", description="Register yourself to the Truck Simulator")
def register_profile(ctx) -> Message:
with open("./messages/welcome.md", "r") as welcome_file:
welcome_embed = Embed(
title="Hey there, fellow Trucker,",
description=welcome_file.read(),
color=config.EMBED_COLOR,
author=Author(
name="Welcome to the Truck Simulator",
icon_url=config.SELF_AVATAR_URL,
),
)
if not players.registered(ctx.author.id):
players.insert(players.Player(int(ctx.author.id), ctx.author.username, money=1000, gas=600))
welcome_embed.footer = Footer(text="Your profile has been created")
return Message(embed=welcome_embed)
@profile_bp.command(name="Check Profile", type=ApplicationCommandType.USER)
def show_profile_context(ctx, user: User) -> Message:
return Message(embed=get_profile_embed(user))
@profile.command(
name="show",
description="Look at your profile",
options=[
{"name": "user", "description": "The user whose profile you want to look at", "type": CommandOptionType.USER}
],
)
def show_profile(ctx, user: User = None):
return Message(embed=get_profile_embed(user if user is not None else ctx.author))
def get_profile_embed(user: User) -> Embed:
player: players.Player = players.get(int(user.id))
truck: trucks.Truck = trucks.get(player.truck_id)
# Detect, when the player is renamed
if player.name != user.username:
players.update(player, name=user.username)
profile_embed = Embed(
author=Author(name=f"{player.name}'s profile"),
thumbnail=Media(url=user.avatar_url),
color=config.EMBED_COLOR,
fields=[],
image=Media(url=truck.image_url),
)
xp = "{:,}".format(player.xp)
next_xp = "{:,}".format(levels.get_next_xp(player.level))
money = "{:,}".format(player.money)
miles = "{:,}".format(player.miles)
truck_miles = "{:,}".format(player.truck_miles)
profile_embed.fields.append(Field(name="Level", value=f"{player.level} ({xp}/{next_xp} xp)", inline=False))
profile_embed.fields.append(Field(name="Money", value=f"${money}"))
profile_embed.fields.append(
Field(name="Miles driven", value=f"{miles}\n({truck_miles} with current truck)", inline=False)
)
profile_embed.fields.append(Field(name="Gas left", value=f"{player.gas} l", inline=False))
profile_embed.fields.append(Field(name="Current truck", value=truck.name))
try:
company = companies.get(player.company)
profile_embed.fields.append(Field(name="Company", value=f"{company.logo} {company.name}"))
except companies.CompanyNotFound:
pass
return profile_embed
@profile_bp.command(
name="top",
description="Have a look at several toplists",
options=[
{
"name": "key",
"description": "The list you desire to view",
"type": CommandOptionType.STRING,
"required": True,
"choices": [
{"name": "level", "value": "level"},
{"name": "money", "value": "money"},
{"name": "miles", "value": "miles"},
],
}
],
)
def top(ctx, key) -> Message:
top_players = players.get_top(key)
top_body = ""
count = 0
top_embed = Embed(title="Truck Simulator top list", color=config.EMBED_COLOR, fields=[])
for player in top_players[0]:
if key == "money":
val = "{:,}".format(player.money)
elif key == "miles":
val = "{:,}".format(player.miles)
else:
val = "{:,} ({}/{} xp)".format(player.level, player.xp, levels.get_next_xp(player.level))
top_embed.footer = Footer(text="You can also sort by money and miles", icon_url=config.SELF_AVATAR_URL)
count += 1
top_body += "**{}**. {} ~ {}{}\n".format(count, player.name, val, top_players[1])
top_embed.fields.append(Field(name=f"Top {key}", value=top_body))
return Message(embed=top_embed)
|
"""
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
opencl4py - OpenCL cffi bindings and helper classes.
URL: https://github.com/Samsung/opencl4py
Original author: Alexey Kazantsev <a.kazantsev@samsung.com>
"""
"""
Tests some of the api in opencl4py.blas._clBlas module.
"""
import unittest
import logging
import numpy
import opencl4py as cl
import opencl4py.blas as blas
import os
class Test(unittest.TestCase):
def setUp(self):
self.old_env = os.environ.get("PYOPENCL_CTX")
if self.old_env is None:
os.environ["PYOPENCL_CTX"] = "0:0"
self.blas = blas.CLBLAS()
def tearDown(self):
if self.old_env is None:
del os.environ["PYOPENCL_CTX"]
else:
os.environ["PYOPENCL_CTX"] = self.old_env
del self.old_env
def _test_gemm(self, gemm, dtype):
ctx = cl.Platforms().create_some_context()
queue = ctx.create_queue(ctx.devices[0])
a = numpy.zeros([127, 353], dtype=dtype)
b = numpy.zeros([135, a.shape[1]], dtype=dtype)
c = numpy.zeros([a.shape[0], b.shape[0]], dtype=dtype)
numpy.random.seed(numpy.array([123], dtype=numpy.int32)[0])
a[:] = numpy.random.rand(a.size).astype(dtype).reshape(a.shape)
b[:] = numpy.random.rand(b.size).astype(dtype).reshape(b.shape)
gold_c = numpy.dot(a, b.transpose())
a_buf = ctx.create_buffer(
cl.CL_MEM_READ_WRITE | cl.CL_MEM_COPY_HOST_PTR, a)
b_buf = ctx.create_buffer(
cl.CL_MEM_READ_WRITE | cl.CL_MEM_COPY_HOST_PTR, b)
c_buf = ctx.create_buffer(
cl.CL_MEM_READ_WRITE | cl.CL_MEM_COPY_HOST_PTR, c)
gemm([queue], blas.clblasRowMajor, blas.clblasNoTrans,
blas.clblasTrans, a.shape[0], b.shape[0], a.shape[1],
1.0, a_buf, b_buf, 0.0, c_buf)
queue.flush()
queue.read_buffer(c_buf, c)
max_diff = numpy.fabs(c - gold_c).max()
self.assertLess(max_diff, 0.00001 if dtype == numpy.float64
else 0.00015)
def test_sgemm(self):
logging.debug("ENTER: test_sgemm")
self._test_gemm(self.blas.sgemm, numpy.float32)
logging.debug("EXIT: test_sgemm")
def test_dgemm(self):
logging.debug("ENTER: test_dgemm")
self._test_gemm(self.blas.dgemm, numpy.float64)
logging.debug("EXIT: test_dgemm")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
"""
The archive method here is based on the dataloc system.
This is a system where when code is run a json file with the output directory
path is written in the repo.
- Pros
- Can handle general paths
- Cons
- requires a link to files
- Has trouble if experiments mix their output folders
"""
from .utilities import git_status_check, git_commit
from datetime import datetime
from jinja2 import Template
import textwrap
import shutil
import click
import json
import re
import os
@click.command("gprune")
@click.argument("label")
@click.option("-m", "--msg", required=True,
help="msg for first mutation")
@click.option("-b", "--branch", is_flag=True,
help="whether to bud all sub-branches along with main")
@click.option("-a", "--archive_root",
help="root directory where we should locate archive")
def cli(label, msg, branch, archive_root):
"""archives code and data files for a mutation and leaf-mutas
Parameters
----------
label : str
label for current mutation
msg : str
msg to explain pruning/archiving
branch : bool
whether to prune all sub-branches as well
"""
# specify potential archive roots
base_dirs = ["raw", "intermediate", "cache", "summaries", "results"]
if archive_root:
base_dirs.append(archive_root)
# git check
cwd = os.getcwd()
if git_status_check(cwd):
return None
# get list of all directories which have label as parent
if branch:
exp_dir_l = [os.path.join(cwd, d) for d in os.listdir(cwd)
if re.match("^%s" % label, d)]
else:
exp_dir_l = [os.path.join(cwd, label)]
# apply budding/relabeling to all resulting mutations
for ed in exp_dir_l:
prune_mutation(ed, label, msg, base_dirs)
git_msg = "prune experiment %s\n\n%s" % (label, msg)
git_commit(cwd, git_msg)
def prune_mutation(exp_dir, label, msg, base_dirs):
"""archives the code and data files for a mutation
Parameters
----------
exp_dir : str
location for current mutation
label : str
current label for parent/root mutation
msg : str
message to explain the pruning
base_dirs : list
list of potential root locations
"""
# archive dataloc and get common root
dataloc_dir = os.path.join(exp_dir, ".dataloc")
if os.path.exists(dataloc_dir):
dataloc_paths = [os.path.join(r, f)
for r, i, f_l in os.walk(dataloc_dir)
for f in f_l if ".json" in f]
else:
dataloc_paths = []
# build paths containing output
path_l = []
for cf in dataloc_paths:
with open(cf, "r") as fd:
outdirs = json.load(fd)
for d in outdirs:
path = outdirs[d]
path_l.append(path)
# determine archive root directory
potential_archive_roots = []
for p in path_l:
for d in base_dirs:
if ("/%s/" % d) in p:
potential_archive_roots.append(d)
potential_archive_roots = list(set(potential_archive_roots))
if len(potential_archive_roots) > 1:
raise ValueError("Multiple potential archive roots detected")
elif len(potential_archive_roots) == 1:
archive_root = potential_archive_roots[0]
else:
archive_root = None
if archive_root:
dt = datetime.now().strftime("%Y%m%d")
archive_dir = os.path.join(archive_root, "archive_%s" % dt, label)
os.makedirs(archive_dir, exist_ok=True)
# get top lvl dirs for tree move
rel_path_l = [os.path.relpath(p, archive_root) for p in path_l]
rel_path_l = list(set(rel_path_l))
# add deprecated paths
nrel_path_l = []
for p in rel_path_l:
re_pattern = "%s__dep[0-9][0-9]" % p
dep_paths = [f for f in os.listdir(archive_root)
if re.search(re_pattern, f)]
nrel_path_l.append(p)
nrel_path_l.extend(dep_paths)
# move all paths
for p in nrel_path_l:
if os.path.exists(os.path.join(archive_root, p)):
shutil.copytree(os.path.join(archive_root, p),
os.path.join(archive_dir, p))
shutil.rmtree(os.path.join(archive_root, p))
# move code
code_dir = os.path.join(archive_dir, "code")
shutil.copytree(exp_dir, code_dir)
# write README explaining archive
with open(os.path.join(archive_dir, "README.txt"), "w") as fd:
fd.write(msg)
# remove existing code
shutil.rmtree(exp_dir)
if __name__ == "__main__":
cli()
|
from flask import make_response, send_file, g, Response
import traceback, json, time, bson, os
# from views import setup
# from lib.utils.config import Settings
# from lib.utils.db import Manager
from appinit_backend.lib.modules import Modules
#
# settings = Settings(path="/home/cee-tools/", verify=False, instance=os.environ['CEE_TOOLS_INSTANCE'])
# manager = modules.manager
from appinit_backend.lib.responses import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseInternalServerError
from appinit_backend.middleware.json import convert, convert_keys
from appinit_backend.middleware.timezone import check_user_timezone
from appinit.lib.db import Manager
def handle_response(session_mgr, modules, request, module_path, isFile, **kwargs):
session = session_mgr.get()
if not session:
return HttpResponseUnauthorized(json.dumps({"message": "Unauthorized to api. Either authenticate with saml or send an api token"}))
check_user_timezone(session, request)
devel = "APPINIT_DEVEL" in os.environ
# manager.set_user_uid(session.uid)
# manager.set_permissions(session.permissions)
result = check_errors(session_mgr, modules, request, module_path, **kwargs)
response = None
if result['error'] == None:
c = result['data']
if (isFile):
response = Response(c.read(), mimetype=c.content_type)
response.headers['Content-Length'] = c.length
response.headers['Content-Disposition'] = "attachment; filename=%s" % c.filename
else:
response_data = json.dumps(c, default=convert, ensure_ascii=False).encode("utf-8")
response = HttpResponse(response_data)
elif result['error']['type'] == "forbidden":
response = HttpResponseForbidden()
elif result['error']['type'] == "bad-request":
response = result['error']['response']
elif result['error']['type'] == "module-exist":
response = HttpResponseBadRequest(json.dumps({"message": "api module doesn't exist"}))
elif result['error']['type'] == "exception":
if devel:
if result['error']['exception_type'] == "KeyError":
response = HttpResponse(json.dumps({"message": "Invalid parameters"}))
else:
response = HttpResponse(json.dumps({"message": "Server encountered an error. Admin has been notified of this error."}))
else:
raise result['error']['exception']
else:
response = HttpResponse(json.dumps({"message": "Server encountered an error. Admin has been notified of this error."}))
if result['error'] != None and "response" in result['error']:
del result['error']['response']
logging(session_mgr, request, response, module_path, result, **kwargs)
return response
def check_errors(session_mgr, modules, request, module_path, **kwargs):
session = session_mgr.get()
output = {
"module": None,
"error": None,
"data": None,
}
all_modules = modules.get_all_modules()
if module_path not in all_modules.keys():
output['module'] = None
output['error'] = {"type": "module-exist"}
return output
# module_path + 'call'
module_data = modules.get(module_path, data=True)
module = module_data['obj']
output['module'] = module
access = modules.check_permissions(module_data, session_mgr.get_permissions())
if not access:
output['error'] = {"type": "forbidden"}
return output
try:
data = module.call(**kwargs)
if isinstance(data, HttpResponseBadRequest):
output['error'] = {"type": "bad-request", "response": data}
elif isinstance(data, HttpResponseForbidden):
output['error'] = {"type": "forbidden", "response": data}
output['data'] = data
# else:
# data = HttpResponseForbidden()
# output['error'] = {"type": "forbidden"}
except Exception as e:
print(traceback.format_exc())
output['error'] = {
'type': 'exception',
'exception_type': type(e).__name__,
'stack_trace': traceback.format_exc(),
'exception': e,
'request': {
'headers': dict(request.headers),
'data': {
'form': request.form,
'args': request.args,
'data': request.data,
},
'kwargs': json.dumps(kwargs, default=convert, ensure_ascii=False).encode("utf-8"),
'cookies': request.cookies,
}
}
return output
def logging(session_mgr, request, response, module_path, result, **kwargs):
manager = Manager()
session = session_mgr.get()
# get list of parent module chains not including the full path of this one
parent_modules = []
module_chain = module_path.split('.')
temp_parent_chain = module_chain[0]
for module_name in module_chain[1:]:
parent_modules.append(temp_parent_chain)
temp_parent_chain += '.' + module_name
# do not log if the API call is part of the logging API (search, etc)
if 'logging' in parent_modules:
return
# convert(request.headers)
log = {
'timestamp': Manager.get_current_time(),
'path': module_path,
'parent_modules': parent_modules,
'uid': session.uid,
'source_ip': request.remote_addr,
'method': request.method,
# don't yet know whether a module exists to get an action from
'action': None,
# permissions is represented as a set, so convert it to a list
# else the database can't encode it
'permissions': list(session_mgr.get_permissions()),
'request': {
'headers': dict(request.headers),
'data': {
'form': request.form,
'args': request.args,
'data': request.data,
},
'kwargs': json.dumps(kwargs, default=convert, ensure_ascii=False).encode("utf-8"),
'cookies': request.cookies,
},
'response': {
'status': response.status,
'headers': dict(response.headers),
'data': response.get_data()
}
}
if result['module'] is not None:
log['action'] = getattr(result['module'], 'action', None)
if result['error'] is not None:
# the actual exception, if present, can't and shouldn't be encoded into mongo
# create a copy of result[error] with exception field explicitly left out
log['failure'] = { key: result['error'][key] for key in result['error'] if key != 'exception' }
# set up db
db = manager.db("logging")
db.logs.insert_one(log)
|
# Test slice assignment
l = list(range(6))
l[2:5] = ["x"]
assert l == [0, 1, 'x', 5]
l = list(range(7))
l[:5] = ["x"]
assert l == ['x', 5, 6]
l = list(range(6))
l[2:] = ["x"]
assert l == [0, 1, 'x']
l = list(range(6))
l[:] = ["x"]
assert l == ['x']
# Test slice deletion
l = list(range(6))
del l[2:5]
assert l == [0, 1, 5]
l = list(range(6))
del l[:5]
assert l == [5]
l = list(range(6))
del l[2:]
assert l == [0, 1]
l = list(range(6))
del l[:]
assert l == []
l = list(range(6))
del l[::2]
assert l == [1, 3, 5]
|
from akcrm.permissions import LazyPermissions
class PermissionsMiddleware(object):
def process_request(self, request):
setattr(request, 'PERMISSIONS', LazyPermissions(request))
|
#!/usr/bin/python
import sys
source_files = sys.argv[1].split(" ")
output_header_path = sys.argv[2]
tmp_dir = sys.argv[3] or "CMakeFiles/"
timestamps_path = tmp_dir + "model_refl_timestamps"
print("Updating reflection model")
print("Source files", source_files)
print("Output header", output_header_path)
print("Timestamps", timestamps_path)
|
import streamlit as st
import tensorflow as tf
from PIL import Image, ImageOps
import numpy as np
st.set_option('deprecation.showfileUploaderEncoding', False)
# @st.cache(suppress_st_warning=True,allow_output_mutation=True)
def import_and_predict(image_data, model):
image = ImageOps.fit(image_data, (100,100),Image.ANTIALIAS)
image = image.convert('RGB')
image = np.asarray(image)
st.image(image, channels='RGB')
image = (image.astype(np.float32) / 255.0)
img_reshape = image[np.newaxis,...]
prediction = model.predict(img_reshape)
return prediction
model = tf.keras.models.load_model('my_model2.h5')
st.write("""
# ***Glaucoma detector***
"""
)
st.write("This is a simple image classification web app to predict glaucoma through fundus image of eye")
file = st.file_uploader("Please upload an image(jpg) file", type=["jpg"])
if file is None:
st.text("You haven't uploaded a jpg image file")
else:
imageI = Image.open(file)
prediction = import_and_predict(imageI, model)
pred = prediction[0][0]
if(pred > 0.5):
st.write("""
## **Prediction:** You eye is Healthy. Great!!
"""
)
st.balloons()
else:
st.write("""
## **Prediction:** You are affected by Glaucoma. Please consult an ophthalmologist as soon as possible.
"""
)
|
from datetime import datetime
from twogplus import db
class User(db.Model):
__tablename__ = "users"
id: int = db.Column(db.Integer, primary_key=True)
name: str = db.Column(db.Text, nullable=False)
created_at: datetime = db.Column(db.Date, nullable=False, default=db.func.now())
is_tested: bool = db.Column(db.Boolean, nullable=False)
is_vaccinated: bool = db.Column(db.Boolean, nullable=False)
def __init__(self, name: str, is_vaccinated: bool = False, is_tested: bool = False):
self.name = name
self.is_vaccinated = is_vaccinated
self.is_tested = is_tested
def __repr__(self):
return f"<User id={self.id}, name={self.name}>"
def get_capitalized_name(self) -> str:
return (
self.name.split(" ")[0].capitalize()
+ " "
+ self.name.split(" ")[1].capitalize()
)
|
import os
import sys
import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
import warnings
warnings.filterwarnings('ignore')
class GenomeDataset(Dataset):
""" GWAS and nonGWAS SNP sequence dataset"""
def __init__(self, filename, seq_name=['seq_ref_1'], encode_mode="N_2_zero", metadata_cols=None):
self.genomedata = pd.read_csv(filename, sep="\t")
self.seq_name = seq_name
self.encode_mode = encode_mode
self.metadata_cols = metadata_cols
def __len__(self):
return len(self.genomedata)
def __getitem__(self, idx):
seq_encode = {sn: self.encode(self.genomedata.ix[idx, sn], self.encode_mode) for sn in self.seq_name}
label = self.genomedata.ix[idx, "label"]
rsid = self.genomedata.ix[idx, 'rsid']
query_snp_rsid = self.genomedata.ix[idx, 'query_snp_rsid']
if self.metadata_cols is not None:
metadata = self.encode_metadata(self.genomedata.ix[idx, self.metadata_cols])
seq_tuple = tuple([seq_encode[sn] for sn in self.seq_name] + [label] + [rsid] + [query_snp_rsid] + [metadata])
else:
seq_tuple = tuple([seq_encode[sn] for sn in self.seq_name] + [label] + [rsid] + [query_snp_rsid])
return seq_tuple
def encode(self, input, encode_mode='N_2_zero'):
""" Encode string input to a numerical matrix. Sequence after encoding has two modes:
N_2_zero: "N" encodes to [0,0,0,0]
N_2_quarter: "N" encodes to [1/4, 1/4, 1/4, 1/4]
"""
if encode_mode == "N_2_zero":
# output 1*4*n numpy binary matrix in "A, C, G, T" order
# nucleotide "N" encoded as [0, 0, 0, 0]
n = len(input)
output = np.zeros((4, n), dtype="f")
for i in range(n):
if input[i] == "A" or input[i] == "a":
output[0, i] = 1.0
elif input[i] == "C" or input[i] == "c":
output[1, i] = 1.0
elif input[i] == "G" or input[i] == "g":
output[2, i] = 1.0
elif input[i] == "T" or input[i] == "t":
output[3, i] = 1.0
else:
pass
elif encode_mode == "N_2_quarter":
# output 1*4*n numpy integer matrix in "A, C, G, T" order
# nucleotide "N" encoded as [1/4, 1/4, 1/4, 1/4]
n = len(input)
output = np.zeros((4, n), dtype="f")
for i in range(n):
if input[i] == "A" or input[i] == "a":
output[0, i] = 1.0
elif input[i] == "C" or input[i] == "c":
output[1, i] = 2.0
elif input[i] == "G" or input[i] == "g":
output[2, i] = 3.0
elif input[i] == "T" or input[i] == "t":
output[3, i] = 4.0
else:
output[0, i] = 0.25
output[1, i] = 0.25
output[2, i] = 0.25
output[3, i] = 0.25
return output
def encode_metadata(self, inputs):
n = len(inputs)
output = np.zeros((1, n), dtype='f')
for i in range(n):
if inputs[i] is np.nan:
output[0, i] = -1.0
else:
output[0, i] = inputs[i]
return output
if __name__ == '__main__':
filename = 'tmp.txt'
data = GenomeDataset(filename, has_metadata=True)
print(data.genomedata)
# for d in data:
# print(d)
#break
|
class TrainingConfig(object):
seed = 1
use_gpu = True
gpu_id = 0
epoch = 30
learning_rate = 1e-3
decay_rate = 0.5
decay_patience = 3
batch_size = 64
train_log = True
log_interval = 10
show_plot = False
f1_norm = ['macro', 'micro']
class ModelConfig(object):
word_dim = 50
pos_size = 102 # 2 * pos_limit + 2
pos_dim = 5
feature_dim = 60 # 50 + 5 * 2
hidden_dim = 100
dropout = 0.3
class CNNConfig(object):
use_pcnn = True
out_channels = 100
kernel_size = [3, 5, 7]
class RNNConfig(object):
lstm_layers = 3
last_hn = False
class GCNConfig(object):
num_layers = 3
class TransformerConfig(object):
transformer_layers = 3
class CapsuleConfig(object):
num_primary_units = 8
num_output_units = 10 # relation_type
primary_channels = 1
primary_unit_size = 768
output_unit_size = 128
num_iterations = 3
class LMConfig(object):
# lm_name = 'bert-base-chinese' # download usage
# cache file usage
lm_file = 'bert_pretrained'
# transformer 层数,初始 base bert 为12层
# 但是数据量较小时调低些反而收敛更快效果更好
num_hidden_layers = 2
class Config(object):
# 原始数据存放位置
data_path = 'data/origin'
# 预处理后存放文件的位置
out_path = 'data/out'
# 是否将句子中实体替换为实体类型
replace_entity_by_type = True
# 是否为中文数据
is_chinese = True
# 是否需要分词操作
word_segment = True
# 关系种类
relation_type = 10
# vocab 构建时最低词频控制
min_freq = 2
# position limit
pos_limit = 50 # [-50, 50]
# (CNN, RNN, GCN, Transformer, Capsule, LM)
model_name = 'Capsule'
training = TrainingConfig()
model = ModelConfig()
cnn = CNNConfig()
rnn = RNNConfig()
gcn = GCNConfig()
transformer = TransformerConfig()
capsule = CapsuleConfig()
lm = LMConfig()
config = Config()
|
c,t = input().split()
c = int(c)
t = float(t)
if t>c+0.5 and c%5==0:
print("%.2f"%(t-c-0.5))
else:
print("%.2f"%t)
|
import os
images_dir = os.getenv("BACKEND_IMG_PATH", "images")
postgres_url = os.getenv("BACKEND_DB_URL", "")
|
def longestRun(L):
startpos = 0
endpos = 0
myList=[]
numtimes = 0
listindx = 0
if len(L) == 1:
return 1
while numtimes < len(L)-1: # number of times its starting through
if L[listindx+1] >= L[listindx]:
if startpos == 0 and endpos == 0:
startpos = listindx
endpos = listindx+1
else:
endpos = listindx+1
else:
startpos=0
endpos=0
if len(myList) > 0:
if (endpos-startpos) >= len(myList[0]):
myList=[]
myList.append(L[startpos:endpos+1])
else:
if (endpos-startpos) > 0:
myList=[]
myList.append(L[startpos:endpos+1])
listindx += 1
numtimes +=1
if len(myList) == 0:
return 1
else:
return len(myList[0])
|
#!/usr/bin/python
import jsonrpclib
import time
import ssl
from pprint import pprint
ssl._create_default_https_context = ssl._create_unverified_context
ip = '184.105.247.72'
port = '443'
username = 'admin1'
password = '99saturday'
switch_url = 'https://{}:{}@{}:{}'.format(username, password, ip, port)
switch_url = switch_url + '/command-api'
remote_connect = jsonrpclib.Server(switch_url)
print remote_connect
commands = []
commands.insert (0, 'configure terminal')
commands.insert (0, {'cmd': 'enable', 'input': ''})
commands.append('vlan 222')
commands.append('name green')
print commands
CommandsResponse = remote_connect.runCmds(1, commands)
pprint(CommandsResponse)
|
from bottle import Bottle, run, route, static_file, request, response, template, redirect
import json
import time
import os
app = Bottle(__name__)
@app.route('/')
def root():
return static_file('index.html', root='templates')
@app.route('/index1')
def index1():
return static_file('index1.html', root='templates')
@app.route('/index2')
def index2():
return static_file('index2.html', root='templates')
# Static Routes
@app.route('/<filename:re:.*\.html>')
def javascripts(filename):
return static_file(filename, root='templates')
@app.route('/<filename:re:.*\.js>')
def javascripts(filename):
return static_file(filename, root='static')
@app.route('/<filename:re:.*\.css>')
def stylesheets(filename):
return static_file(filename, root='static')
@app.route('/<filename:re:.*\.(jpg|png|gif|ico|svg)>')
def images(filename):
return static_file(filename, root='static')
@app.route('/<filename:re:.*\.(eot|ttf|woff|svg)>')
def fonts(filename):
return static_file(filename, root='static')
@app.route('/<filename:re:.*\.html>')
def javascripts(filename):
return static_file(filename, root='static')
@app.hook('after_request')
def enable_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token' |
# Copyright 2013-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the
# License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import logging
import subprocess
from common.sge import check_sge_command_output, run_sge_command
log = logging.getLogger(__name__)
def hasJobs(hostname):
# Checking for running jobs on the node, with parallel job view expanded (-g t)
command = "qstat -g t -l hostname={0} -u '*'".format(hostname)
# Command output
# job-ID prior name user state submit/start at queue master ja-task-ID
# ------------------------------------------------------------------------------------------------------------------
# 16 0.6 0500 job.sh ec2-user r 02/06/2019 11:06:30 all.q@ip-172-31-68-26.ec2.inte SLAVE
# all.q@ip-172-31-68-26.ec2.inte SLAVE
# all.q@ip-172-31-68-26.ec2.inte SLAVE
# all.q@ip-172-31-68-26.ec2.inte SLAVE
# 17 0.50500 STDIN ec2-user r 02/06/2019 11:06:30 all.q@ip-172-31-68-26.ec2.inte MASTER 1
# 17 0.50500 STDIN ec2-user r 02/06/2019 11:06:30 all.q@ip-172-31-68-26.ec2.inte MASTER 2
try:
output = check_sge_command_output(command, log)
has_jobs = output != ""
except subprocess.CalledProcessError:
has_jobs = False
return has_jobs
def hasPendingJobs():
command = "qstat -g d -s p -u '*'"
# Command outputs the pending jobs in the queue in the following format
# job-ID prior name user state submit/start at queue slots ja-task-ID
# -----------------------------------------------------------------------------------------------------------------
# 70 0.55500 job.sh ec2-user qw 08/08/2018 22:37:24 1
# 71 0.55500 job.sh ec2-user qw 08/08/2018 22:37:24 1
# 72 0.55500 job.sh ec2-user qw 08/08/2018 22:37:25 1
# 73 0.55500 job.sh ec2-user qw 08/08/2018 22:37:25 1
try:
output = check_sge_command_output(command, log)
lines = filter(None, output.split("\n"))
has_pending = True if len(lines) > 1 else False
error = False
except subprocess.CalledProcessError:
error = True
has_pending = False
return has_pending, error
def lockHost(hostname, unlock=False):
mod = unlock and "-e" or "-d"
command = ["qmod", mod, "all.q@%s" % hostname]
try:
run_sge_command(command, log)
except subprocess.CalledProcessError:
log.error("Error %s host %s", "unlocking" if unlock else "locking", hostname)
|
# -*- coding: utf-8 -*-
"""
aux.plugin based on flask.ext
~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
def setup():
from aux.internals.pluginhook import PluginImporter
importer = PluginImporter(['aux_protocol_%s'], __name__)
importer.install()
setup()
del setup
|
import setuptools
VERSION = 0.1
def get_description():
with open("README.md", "r") as md:
long_description = md.read()
return long_description
def get_requirements():
with open("requirements.txt") as f:
requirements = f.readlines()
return [i.replace(r"\n", "") for i in requirements]
setuptools.setup(
name="safe-regex",
version=VERSION,
author="Steven Ensslen",
author_email="steven@claritycloudworks.com",
description="Embeds unit tests with regular expressions",
long_description=get_description(),
long_description_content_type="text/markdown",
url="https://github.com/ensslen/safe-regex",
keywords=["test", "regular expression"],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
install_requires=get_requirements(),
)
|
from __future__ import annotations
from typing import List, Dict
class Rule:
def __init__(self, text: str):
parts = text.split(":")
if len(parts) != 2:
raise ValueError(f"cannot create Rule, syntax error (expected <index>: <description>) in {text}")
self.index = int(parts[0])
description = parts[1]
self.literal: str = ""
self.child_rules: List[List[int]] = []
if '"' in description:
self._parse_literal(description)
else:
self._parse_child_rules(description)
def _parse_child_rules(self, description: str):
alternatives = description.split("|")
for alternative in alternatives:
rule_chain = []
for rule_str in alternative.split(" "):
rule_str = rule_str.replace(" ", "")
if rule_str:
rule_chain.append(int(rule_str))
if len(rule_chain) == 0:
raise ValueError(f"cannot parse, one of alternatives in rule is empty: {description}")
self.child_rules.append(rule_chain)
def _parse_literal(self, description: str):
quote_begin = description.find('"')
quote_end = description.find('"', quote_begin+1)
self.literal = description[quote_begin+1:quote_end]
# for parsing tests only
def __str__(self):
if self.literal:
return f'{self.index}: "{self.literal}"'
return f'{self.index}: {" | ".join(str(chain) for chain in self.child_rules)}'
class Parser:
def __init__(self):
self.rules: Dict[int, Rule] = {}
def add_rule(self, rule: Rule):
if rule.index in self.rules:
raise ValueError(f"cannot add rule {rule.index} twice")
self.rules[rule.index] = rule
def match(self, text: str) -> bool:
ok, remaining_text = self._match(0, text)
return ok and len(remaining_text) == 0
# if text matches given parser rule, returns True and remaining text (used in subsequent _match calls)
def _match(self, rulenum: int, text: str) -> (bool, str):
rule = self.rules[rulenum]
# if rule is literal, return immediately without recursion
if rule.literal:
if text.startswith(rule.literal):
return True, text[len(rule.literal):]
else:
return False, text
# else, for each alternative rule chain check if it matches recursively
for alternative in rule.child_rules:
ok, remaining_text = self._match_rule_chain(alternative, text)
if ok:
return True, remaining_text
# if we get here, none of the alternatives matched
return False, text
# check if text matches all rules specified in the chain (concatenated one after another)
def _match_rule_chain(self, rulenums: List[int], text) -> (bool, str):
remaining_text = text
for num in rulenums:
ok, remaining_text = self._match(num, remaining_text)
if ok:
text = remaining_text
else:
return False, text
return True, remaining_text
def main():
parser = Parser()
matches = 0
with open("data.txt") as f:
line = f.readline().strip()
while line:
rule = Rule(line)
# print(rule)
parser.add_rule(rule)
line = f.readline().strip()
line = f.readline().strip()
while line:
# print(f"{line} -> ", end="")
if parser.match(line):
matches += 1
# print("ok")
# else:
# print("no")
line = f.readline().strip()
print(f"matches: {matches}")
if __name__ == "__main__":
main()
|
"""
Copyright 2019, Institute for Systems Biology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Used as a daemon process to generate logs of system activity using the Stackdriver monitoring
system on Google VMs. Note that project and VM must have Stackdriver monitoring enabled. Note
that this just writes out lines to stdout, and should be wrapped by idle_log_wrapper.sh to
route the output to a set of rolled logs with a maximum size limit that covers enough history
to decide if we are idle.
Function log_a_point() can also be called in single-shot mode to provide a list of entries for
recent activity (multilog appears to do buffering, some Stackdriver metrics take 240 seconds to
refresh, and history logs might be several minutes old).
"""
from google.cloud import monitoring_v3
import sys
from datetime import datetime
import time
def get_a_series(client, project_name, interval, instance, metric, print_it):
results = client.list_time_series(
project_name,
'metric.type = "compute.googleapis.com/instance/{}" AND '
'metric.label.instance_name = "{}"'.format(metric, instance),
interval,
monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL)
series_lines = []
for result in results:
type_name = result.metric.type.replace("compute.googleapis.com/instance/", "")
instance_name = result.metric.labels['instance_name']
is_float = result.value_type == monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE
for point in result.points:
date_string = datetime.utcfromtimestamp(float(point.interval.end_time.seconds)).strftime('%Y-%m-%d-%H:%M:%S')
value = point.value.double_value if is_float else point.value.int64_value
out_line = "{} {} {} {}".format(instance_name, type_name, date_string, value)
series_lines.append(out_line)
if print_it:
print(out_line)
return series_lines
def log_a_point(gcp_project_id, instance_name, interval_sec, print_it):
client = monitoring_v3.MetricServiceClient()
project_name = client.project_path(gcp_project_id)
interval = monitoring_v3.types.TimeInterval()
now = time.time()
interval.end_time.seconds = int(now)
interval.end_time.nanos = int(
(now - interval.end_time.seconds) * 10 ** 9)
interval.start_time.seconds = int(now - interval_sec)
interval.start_time.nanos = interval.end_time.nanos
#
# Some of these numbers are only visible again 240 seconds after last query! Poll
# at an appropriate interval!
#
all_series = []
all_series += get_a_series(client, project_name, interval, instance_name, "cpu/utilization", print_it)
all_series += get_a_series(client, project_name, interval, instance_name, "cpu/reserved_cores", print_it)
all_series += get_a_series(client, project_name, interval, instance_name, "cpu/usage_time", print_it)
all_series += get_a_series(client, project_name, interval, instance_name, "disk/read_bytes_count", print_it)
all_series += get_a_series(client, project_name, interval, instance_name, "disk/write_bytes_count", print_it)
all_series += get_a_series(client, project_name, interval, instance_name, "disk/read_ops_count", print_it)
all_series += get_a_series(client, project_name, interval, instance_name, "disk/write_ops_count", print_it)
all_series += get_a_series(client, project_name, interval, instance_name, "network/sent_bytes_count", print_it)
all_series += get_a_series(client, project_name, interval, instance_name, "network/received_bytes_count", print_it)
return all_series
def main(args):
gcp_project_id = args[1]
instance_name = args[2]
interval_sec = int(args[3])
sleep_secs = int(args[4])
while True:
log_a_point(gcp_project_id, instance_name, interval_sec, True)
time.sleep(sleep_secs)
if __name__ == "__main__":
main(sys.argv) |
from random import sample
n1 = str(input('Primeiro aluno: '))
n2 = str(input('Segundo aluno: '))
n3 = str(input('Terceiro aluno: '))
n4 = str(input('Quarto aluno: '))
lista = [n1, n2, n3, n4]
escolhido = sample(lista, 4)
print('Os sorteados para apresentarem os trabalhos \n{}: Primeiro \n{}: Segundo \n{}: Terceiro\n{}: Quarto'.format(escolhido[0], escolhido[1], escolhido[2], escolhido[3]))
|
"""Test modules for nested-dict."""
|
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from src.mesh_file_manager import MeshStructureFileManager
from src.mesh_parser import get_parser
from src.mesh_tree_uploader import get_uploader
from src.ontology_type import OntologyType
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--url", type=str, required=True)
parser.add_argument("--type", type=str, required=True)
parser.add_argument("--tmp_path", type=str, required=False)
args = parser.parse_args()
url_path = args.url
tmp_path = args.tmp_path
ontology_type = args.type
if ontology_type not in OntologyType.get_allowed():
raise RuntimeError("Unsupported ontology type '%s'. Allowed types: %s" %
(ontology_type, ", ".join(OntologyType.get_allowed())))
file_manager = MeshStructureFileManager(tmp_path, ontology_type)
try:
path = file_manager.download(url_path)
print("Mesh structure successfully downloaded to path '%s'" % path)
tree, root_id = get_parser(ontology_type).parse(path)
print("Mesh structure successfully parsed. Found '%d' records" % len(tree.nodes))
get_uploader(ontology_type, tree, root_id).upload_tree()
print("Mesh structure successfully uploaded!")
except Exception as e:
file_manager.delete()
raise e
file_manager.delete()
if __name__ == "__main__":
main()
|
from typing import Dict
with open("dane_geny.txt") as f:
lines = [line.strip() for line in f.readlines()]
genes: Dict[int, int] = {}
for line in lines:
l = len(line)
if l not in genes:
genes[l] = 1
else:
genes[l] += 1
print(f"liczba gatunków: {len(genes)}")
maks = 0
for v in genes.values():
if v > maks:
maks = v
print(f"{maks=}")
|
#!/usr/bin/env python3
from abc import (
ABCMeta as _ABCMeta,
abstractproperty as _abstractproperty,
)
class Event(metaclass=_ABCMeta):
pass
class QuitEvent(Event):
pass
class NavigateEvent(Event):
@_abstractproperty
def x(self) -> int:
raise NotImplementedError
@_abstractproperty
def y(self) -> int:
raise NotImplementedError
def __setattr__(self, key, value):
raise AttributeError("not writable")
class VerticalNavEvent(NavigateEvent):
x = 0
class HorizontalNavEvent(NavigateEvent):
y = 0
class UpNavEvent(VerticalNavEvent):
y = -1
class DownNavEvent(VerticalNavEvent):
y = 1
class LeftNavEvent(HorizontalNavEvent):
x = -1
class RightNavEvent(HorizontalNavEvent):
x = 1
if __name__ == '__main__':
def _main():
import logging
logging.basicConfig(level=logging.DEBUG)
ev = Event()
logging.debug("%r", ev)
logging.debug("%r", ev.__class__.__mro__)
ev = UpNavEvent()
logging.debug("%r", ev)
logging.debug("%r", ev.__class__.__mro__)
logging.debug("(%d, %d)", ev.x, ev.y)
ev = DownNavEvent()
logging.debug("%r", ev)
logging.debug("%r", ev.__class__.__mro__)
logging.debug("(%d, %d)", ev.x, ev.y)
_main()
|
import sys
import time
from PyQt5.QtWidgets import (QApplication, QWidget, QFrame, QGridLayout,
QProgressBar, QPushButton, QCalendarWidget,
QLabel)
from PyQt5.QtGui import QIcon, QFont, QColor
from PyQt5.QtCore import Qt, QBasicTimer, QDate
from Game_Run import Person, Game
class Windows(QWidget):
def __init__(self):
super().__init__()
self.game = Game()
self.initUI()
self.finished = False
def initUI(self):
self.setWindowTitle('汤姆 vs 托尼')
self.setWindowIcon(QIcon('./res/apaki.ico'))
self.setGeometry(400, 300, 400, 300)
self.pbar = QProgressBar(self)
self.pbar2 = QProgressBar(self)
self.pbar.setValue(100)
self.pbar2.setValue(100)
self.btn = QPushButton("开始游戏", self)
self.btn.clicked.connect(self.doAction)
self.timer = QBasicTimer()
# 标签
self.label = QLabel(self)
self.label.setText('汤姆')
self.label.setFont(QFont('华文行楷', 14))
self.label2 = QLabel(self)
self.label2.setText('托尼')
self.label2.setFont(QFont('华文行楷', 14))
self.label3 = QLabel(self)
self.label3.setText('战斗情报')
self.label3.setFont(QFont('宋体', 12))
# -------------------------------------------------
grid = QGridLayout(self)
grid.addWidget(self.label, 0, 0, 1, 1)
grid.addWidget(self.pbar, 0, 1, 1, 4)
grid.addWidget(self.label3, 1, 1, 1, 1)
grid.addWidget(self.btn, 1, 4, 1, 1)
grid.addWidget(self.label2, 2, 0, 1, 1)
grid.addWidget(self.pbar2, 2, 1, 1, 4)
self.setLayout(grid)
def timerEvent(self, event):
if self.game.isgame_over():
self.timer.stop()
self.finished = True
if self.game.who_win() == 'Tom':
self.label3.setText('托尼死亡 -- 汤姆胜利')
else:
self.label3.setText('汤姆死亡 -- 托尼胜利')
# self.btn.setText('游戏结束')
self.btn.setText('重新开始')
return
ret = self.game.run()
if 0 == ret:
self.label3.setText('攻击:汤姆 -> 托尼')
else:
self.label3.setText('攻击:汤姆 <- 托尼')
hp1 = self.game.get_blood('Tom')
hp2 = self.game.get_blood('Tony')
self.pbar.setValue(hp1)
self.pbar2.setValue(hp2)
def doAction(self):
if self.finished is not True:
if self.timer.isActive():
self.timer.stop()
self.btn.setText('继续')
else:
self.timer.start(500, self)
self.btn.setText('进行中')
else:
self.label3.setText('战斗情报')
self.pbar.setValue(100)
self.pbar2.setValue(100)
del self.game
self.game = Game()
self.finished = False
self.timer.start(500, self)
def showData(self, date):
self.label.setText(date.toString())
if __name__ == "__main__":
app = QApplication(sys.argv)
w = Windows()
w.show()
sys.exit(app.exec_())
|
import matplotlib.pyplot as plt
from src.configuration import *
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
colors = ['pink', 'lightgreen', 'lightblue', 'wheat', 'salmon']
day_label = 'Timetable'
fig_width = 10
fig_height = 6
def convert_hour(hour):
return STARTING_HOUR + hour * DIFFERENCE_BETWEEN_STARTING_CLASSES_IN_HOURS
def convert_hour_to_text(hour):
return (STARTING_HOUR + hour * DIFFERENCE_BETWEEN_STARTING_CLASSES // MINUTES_IN_HOUR,
hour * DIFFERENCE_BETWEEN_STARTING_CLASSES % MINUTES_IN_HOUR)
def plot_timetable(student):
fig = plt.figure(figsize=(fig_width, fig_height))
for classwork in student.timetable:
classwork_name = classwork.classwork_name
day = classwork.day + 0.52
start_hour = convert_hour(classwork.hour)
end_hour = start_hour + DURATION_OF_ONE_CLASSWORK_IN_HOURS
plt.fill_between([day, day + 0.96], [start_hour, start_hour], [end_hour, end_hour],
color=colors[int(day)], edgecolor='k', linewidth=0.5)
timetable_hour = convert_hour_to_text(classwork.hour)
plt.text(day + 0.02, start_hour + 0.05, '{0}:{1:0>2}'.format(int(timetable_hour[0]), int(timetable_hour[1])),
va='top', fontsize=7)
plt.text(day + 0.48, (start_hour + end_hour) * 0.5, classwork_name, ha='center', va='center', fontsize=11)
ax = fig.add_subplot(111)
ax.yaxis.grid()
ax.set_xlim(0.5, len(days) + 0.5)
ax.set_ylim(20, 8)
ax.set_xticks(range(1, len(days) + 1))
ax.set_xticklabels(days)
ax.set_ylabel('Time')
ax2 = ax.twiny().twinx()
ax2.set_xlim(ax.get_xlim())
ax2.set_ylim(ax.get_ylim())
ax2.set_xticks(ax.get_xticks())
ax2.set_xticklabels(days)
ax2.set_ylabel('Time')
plt.title(day_label, y=1.07)
plt.show()
|
"""The solaredge component."""
|
import procbridge.procbridge as procbridge
__all__ = ['ProcBridgeServer', 'ProcBridge']
|
from argparse import ArgumentParser
import torch
import torchmetrics
import pytorch_lightning as pl
from .model_selection import load_model
from .scheduler import WarmupCosineSchedule
class LitVisionTransformer(pl.LightningModule):
def __init__(self, args):
super().__init__()
self.args = args
self.criterion = torch.nn.CrossEntropyLoss()
self.train_acc = torchmetrics.Accuracy()
self.val_acc = torchmetrics.Accuracy()
self.test_acc = torchmetrics.Accuracy()
self.backbone = load_model(args)
def forward(self, x):
# use forward for inference/predictions
embedding = self.backbone(x)
return embedding
def training_step(self, batch, batch_idx):
# forward and backward pass and log
x, y = batch
y_hat = self.backbone(x)
loss = self.criterion(y_hat, y)
self.log('train_loss', loss, on_epoch=True, on_step=True)
self.train_acc(y_hat.softmax(dim=1), y)
self.log('train_acc', self.train_acc, on_epoch=True, on_step=False)
curr_lr = self.optimizers().param_groups[0]['lr']
self.log('learning_rate', curr_lr, on_epoch=False, on_step=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.backbone(x)
loss = self.criterion(y_hat, y)
self.val_acc(y_hat.softmax(dim=-1), y)
metrics = {'val_acc': self.val_acc, 'val_loss': loss}
self.log_dict(metrics, on_epoch=True, on_step=False)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.backbone(x)
self.test_acc(y_hat.softmax(dim=-1), y)
self.log('test_acc', self.test_acc, on_epoch=True, on_step=False)
def configure_optimizers(self):
if self.args.optimizer == 'adam':
optimizer = torch.optim.Adam(self.parameters(),
lr=self.args.learning_rate, weight_decay=self.args.weight_decay)
else:
optimizer = torch.optim.SGD(self.parameters(), lr=self.args.learning_rate,
momentum=0.9, weight_decay=self.args.weight_decay)
scheduler = {'scheduler': WarmupCosineSchedule(
optimizer, warmup_steps=self.args.warmup_steps,
t_total=self.args.total_steps),
'name': 'learning_rate', 'interval': 'step', 'frequency': 1}
return [optimizer], [scheduler]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--optimizer', choices=['sgd', 'adam'], default='sgd')
parser.add_argument('--learning_rate', default=0.001, type=float,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=0.00)
parser.add_argument('--warmup_steps', type=int, default=1000, help='Warmup steps for LR scheduler.')
parser.add_argument('--model_name',
choices=['Ti_4', 'Ti_8', 'Ti_16', 'Ti_32', 'S_4', 'S_8', 'S_16', 'S_32',
'B_4', 'B_8', 'B_16', 'B_32', 'L_16', 'L_32', 'B_16_in1k'],
default='B_16_in1k', help='Which model architecture to use')
parser.add_argument('--pretrained',action='store_true',
help='Loads pretrained model if available')
parser.add_argument('--checkpoint_path', type=str, default=None)
parser.add_argument('--transfer_learning', action='store_true',
help='Load partial state dict for transfer learning'
'Resets the [embeddings, logits and] fc layer for ViT')
parser.add_argument('--load_partial_mode', choices=['full_tokenizer', 'patchprojection',
'posembeddings', 'clstoken', 'patchandposembeddings',
'patchandclstoken', 'posembeddingsandclstoken', None], default=None,
help='Load pre-processing components to speed up training')
parser.add_argument('--interm_features_fc', action='store_true',
help='If use this flag creates FC using intermediate features instead of only last layer.')
parser.add_argument('--conv_patching', action='store_true',
help='If use this flag uses a small convolutional stem instead of single large-stride convolution for patch projection.')
return parser
|
import torch
import torch.nn.functional as F
from torch import nn
class MLP_Net0(nn.Module):
def __init__(self):
super(MLP_Net0, self).__init__()
self.linear1 = nn.Linear(in_features=784, out_features=21)
self.linear2 = nn.Linear(in_features=21, out_features=10)
def forward(self, x):
x = torch.flatten(x, start_dim=1)
x = F.relu(self.linear1(x))
return F.relu(self.linear2(x))
class MLP_Net1(nn.Module):
def __init__(self):
super(MLP_Net1, self).__init__()
self.linear1 = nn.Linear(in_features=784, out_features=85)
self.linear2 = nn.Linear(in_features=85, out_features=10)
def forward(self, x):
x = torch.flatten(x, start_dim=1)
x = F.relu(self.linear1(x))
return F.relu(self.linear2(x))
class CNN_Net0(nn.Module):
def __init__(self):
super(CNN_Net0, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=5)
self.fc1 = nn.Linear(in_features=16 * 4 * 4 * 1, out_features=32)
self.fc2 = nn.Linear(in_features=32, out_features=10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = torch.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
return F.relu(self.fc2(x))
class CNN_Net1(nn.Module):
def __init__(self):
super(CNN_Net1, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5)
self.fc1 = nn.Linear(in_features=64 * 4 * 4 * 1, out_features=128)
self.fc2 = nn.Linear(in_features=128, out_features=10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = torch.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
return F.relu(self.fc2(x))
class CNN_Net_Regularisation(nn.Module):
def __init__(self):
super(CNN_Net_Regularisation, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=5)
self.fc1 = nn.Linear(in_features=16 * 4 * 4 * 1, out_features=32)
self.fc2 = nn.Linear(in_features=32, out_features=10)
self.dropout = nn.Dropout(p=0.5)
self.dropout2d = nn.Dropout2d(p=0.5)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = self.dropout2d(x)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = torch.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = self.dropout(x)
return F.relu(self.fc2(x)) |
# namespace imports
import subprocess # noqa
# FIXME
#from gevent import subprocess
from gevent.queue import Queue # noqa
from gevent import Greenlet as Thread # noqa
from gevent import joinall, sleep # noqa
|
# -*- coding: utf-8 -*-
import unittest2
import redis
from apps.database.session import db, cache
from apps.controllers.router import app
from apps.database.models import Test
class TestDatabase(unittest2.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
test = Test('test01')
db.session.add(test)
db.session.commit()
def tearDown(self):
Test.query.filter_by(message='test01').delete()
db.session.commit()
def test_connect_db(self):
rows = Test.query.filter_by(message='test01').all()
self.assertEqual(len(rows), 1)
def test_connect_redis(self):
try:
client_list = cache.client_list()
self.assertIsNot(client_list, [])
except redis.exceptions.ConnectionError as e:
print(e)
if __name__ == '__main__':
unittest2.main()
|
from flask import render_template, g, url_for, redirect, request, Response
from sqlalchemy import and_
from scrimmage import app, db
from scrimmage.models import User, Team, TeamJoinRequest, GameRequest, GameRequestStatus, Game, Announcement, Tournament
from scrimmage.decorators import login_required, team_required, set_flash, sponsor_or_team_required
@app.route('/')
def index():
games_to_show = int(g.settings['recent_games_to_show'])
recent_games = Game.query.order_by(Game.create_time.desc()).limit(games_to_show).all()
if not g.is_logged_in:
announcements = Announcement.query.filter(Announcement.is_public == True).order_by(Announcement.create_time.desc()).all()
return render_template('logged_out.html', recent_games=recent_games, announcements=announcements)
announcements = Announcement.query.order_by(Announcement.create_time.desc()).limit(1).all()
if not g.team:
join_request = TeamJoinRequest.query.filter(TeamJoinRequest.kerberos == g.kerberos).one_or_none()
joinable_teams = Team.query.filter(and_(Team.is_disabled == False, Team.must_autoaccept == False)).all()
requestable_teams = [team for team in joinable_teams if team.can_be_requested()]
return render_template('no_team.html', announcements=announcements, teams=requestable_teams, join_request=join_request)
teams = Team.query.filter(Team.is_disabled == False).all()
challengeable_teams = [team for team in teams if team.can_be_challenged()]
if not g.team.can_be_challenged():
challengeable_teams.append(g.team)
return render_template('homepage.html',
challengeable_teams=challengeable_teams,
pending_requests=g.team.pending_requests(),
recent_games=recent_games,
announcements=announcements)
@app.route('/request_team', methods=['POST'])
@login_required
def request_team():
team = Team.query.get(int(request.form['team_id']))
assert team.can_be_requested()
join_request = TeamJoinRequest(g.kerberos, team)
db.session.add(join_request)
db.session.commit()
return redirect(url_for('index'))
@app.route('/request_team/cancel', methods=['POST'])
@login_required
def cancel_team_request():
join_request = TeamJoinRequest.query.filter(TeamJoinRequest.kerberos == g.kerberos).one_or_none()
assert join_request is not None
db.session.delete(join_request)
db.session.commit()
return redirect(url_for('index'))
@app.route('/create_team', methods=['POST'])
@login_required
def create_team():
team_name = request.form['team_name']
if Team.query.filter(Team.name == team_name).count() != 0:
return Response("A team with that name already exists. Try again.", content_type="text/plain", status=400)
team = Team(team_name)
db.session.add(team)
user = User(g.kerberos, team)
db.session.add(user)
db.session.commit()
return redirect(url_for('index'))
@app.route('/announcements')
@login_required
def announcements():
announcements = Announcement.query.order_by(Announcement.create_time.desc()).all()
return render_template('announcements.html', announcements=announcements)
@app.route('/challenge', methods=['POST'])
@team_required
def challenge():
team_id = int(request.form['team_id'])
team = Team.query.get(team_id)
assert g.settings['challenges_enabled'].lower() == 'true'
assert g.settings['challenges_only_reference'] == 'false' or team.must_autoaccept
assert g.team.can_challenge()
assert team.can_be_challenged()
g_request = GameRequest(g.team, team)
db.session.add(g_request)
if g_request.should_autoaccept():
if g.team.can_initiate():
game = g_request.accept(was_automatic=True)
db.session.add(game)
set_flash("Challenged {}. The game is now in the queue".format(g_request.opponent.name), level='success')
db.session.commit()
game.spawn()
else:
set_flash("Game could not be spawned since you have too many games currently running. Please wait a little bit.", level='warning')
else:
set_flash("Challenged {}. Since they are lower ELO, they must accept the request.".format(g_request.opponent.name), level='success')
db.session.commit()
return redirect(url_for('index'))
@app.route('/answer_request/<int:request_id>', methods=['POST'])
@team_required
def answer_request(request_id):
g_request = GameRequest.query.get(request_id)
assert g_request.opponent == g.team
action = request.form['action']
if action == 'reject':
g_request.reject()
db.session.commit()
set_flash("Rejected {}'s game request.".format(g_request.challenger.name), level='success')
elif action == 'accept':
if g.team.can_initiate():
game = g_request.accept(was_automatic=False)
db.session.add(game)
db.session.commit()
game.spawn()
set_flash("Accepted {}'s game request. It is now in the game queue.".format(g_request.challenger.name), level='success')
else:
set_flash("Could not accept the game request, since you have too many games currently running. Please wait a little bit", level='warning')
return redirect(url_for('index'))
@app.route('/tournaments')
@sponsor_or_team_required
def show_tournaments():
query = Tournament.query.order_by(Tournament.create_time.desc())
if not g.is_admin and not g.is_sponsor:
query = query.filter(Tournament.is_private == False)
tournaments = query.all()
return render_template('tournaments.html', tournaments=tournaments)
|
from tensorflow.keras import callbacks as keras_callbacks
from glrec.utils import log
from glrec.train import utils as train_utils
class GsutilRsync(keras_callbacks.Callback):
def __init__(self, local_storage_dir, gs_storage_dir, **kwargs):
super().__init__(**kwargs)
self._local_storage_dir = local_storage_dir
self._gs_storage_dir = gs_storage_dir
def on_epoch_end(self, epoch, logs):
train_utils.cmdline_sync_dir_with_gcs(
self._local_storage_dir, self._gs_storage_dir)
_local_callback_mapping = {
'GsutilRsync': GsutilRsync,
}
def get_callback(callback, kwargs):
if hasattr(keras_callbacks, callback):
log.info('Loading `{callback}` callback from tf.keras with '
'parameters {kwargs}'.format(
callback=callback, kwargs=kwargs))
callback_instance = getattr(keras_callbacks, callback)(**kwargs)
return callback_instance
elif callback in _local_callback_mapping:
callback_instance = _local_callback_mapping[callback](**kwargs)
else:
raise ValueError('Callback `{callback}` is not supported.')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The user's intents definitions. It serves as abstraction to separate NLU processing
logic from response processing routines. Allows to easy register in the system as many
intents as it needed.
"""
from chatbot1.command import GreetCommand, AddItemCommand, RemoveItemCommand, ShowItemsCommand, ClearListCommand, ShowStatsCommand,WishBackCommand,SuggestCorona
class Intent(object):
def __init__(self, bot, intent_name, context):
"""
Creates new intent for specified chatbot with given name
Arguments:
bot the chatbot
name the intent name
context the execution context holding configuration parameters
"""
self.chatbot = bot
self.name = intent_name
self.context = context
self.commands = []
self.initCommands()
def execute(self, nlu_data):
"""
Executes given intent by applying appropriate command to the given
parsed NLU data response
"""
for c in self.commands:
# print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
# print(c)
# print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
val = c.do(self.chatbot, None)
# print(val)
# print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
return val
def initCommands(self):
"""
The method to init specific to particular intent.
"""
pass
class AddItemsIntent(Intent):
def initCommands(self):
self.commands.append(AddItemCommand())
val = self.commands.append(ShowItemsCommand())
return val
def execute(self, data):
confidence = data['intent']['confidence']
confidence_threshold = self.context['confidence_threshold']
print (confidence)
if confidence < confidence_threshold:
s = 'I\'m sorry! Could you please paraphrase!'
print(s)
return s
# add all intent entities
for entity in data['entities']:
b=self.commands[0].do(self.chatbot, entity['value'])
if (b==1):
end="Success"
else:
end="Fail"
if end=="Success":
return ("Done")
else:
return ("Sorry! Can't add item!")
# show items list
self.commands[1].do(self.chatbot, None)
class RemoveItemsIntent(Intent):
def initCommands(self):
self.commands.append(RemoveItemCommand())
val = self.commands.append(ShowItemsCommand())
return val
def execute(self, data):
confidence = data['intent']['confidence']
confidence_threshold = self.context['confidence_threshold']
print (confidence)
if confidence < confidence_threshold:
s = 'I\'m sorry! Could you please paraphrase!'
print(s)
return s
#return ("F")
# add all intent entities
for entity in data['entities']:
a=self.commands[0].do(self.chatbot, entity['value'])
if (a==1):
return ("Done...")
else:
return ("Sorry! Can't remove item!")
# show items list
self.commands[1].do(self.chatbot, None)
class HelloIntent(Intent):
def initCommands(self):
val = self.commands.append(GreetCommand())
# print("##########################################")
# print(val)
# print("##########################################")
return val
class WishBackIntent(Intent):
def initCommands(self):
val = self.commands.append(WishBackCommand())
# print("##########################################")
# print(val)
# print("##########################################")
return val
class ShowItemsIntent(Intent):
def initCommands(self):
val = self.commands.append(ShowItemsCommand())
# print("##########################################")
# print(val)
# print("##########################################")
return val
class ClearListIntent(Intent):
def initCommands(self):
self.commands.append(ClearListCommand())
val = self.commands.append(ShowItemsCommand())
return val
class ShowStatsIntent(Intent):
def initCommands(self):
val = self.commands.append(ShowStatsCommand())
return val
class GetCoronaUpdate(Intent):
def initCommands(self):
val = self.commands.append(SuggestCorona())
# print(val)
# print("This is GetLocation ")
# val = self.commands.append(ShowItemsCommand())
return val
def execute(self, data):
confidence = data['intent']['confidence']
# print(data)
confidence_threshold = self.context['confidence_threshold']
if confidence < confidence_threshold:
s = 'I\'m sorry! Could you please paraphrase!'
print(s)
return s
# add all intent entities
s = ""
for entity in data['entities']:
s = s + self.commands[0].do(self.chatbot, entity['value'])
return s
# show items list
# self.commands[1].do(self.chatbot, None)
|
import bpy
from bpy import context as context
fc_skList = [
'browInnerUp', 'browDown_L', 'browDown_R', 'browOuterUp_L', 'browOuterUp_R',
'eyeLookUp_L', 'eyeLookUp_R', 'eyeLookDown_L', 'eyeLookDown_R', 'eyeLookIn_L', 'eyeLookIn_R', 'eyeLookOut_L', 'eyeLookOut_R',
'eyeBlink_L', 'eyeBlink_R', 'eyeSquint_L', 'eyeSquint_R', 'eyeWide_L', 'eyeWide_R',
'cheekPuff', 'cheekSquint_L', 'cheekSquint_R', 'noseSneer_L', 'noseSneer_R',
'jawOpen', 'jawForward', 'jawLeft', 'jawRight', 'mouthFunnel', 'mouthPucker', 'mouthLeft', 'mouthRight',
'mouthRollUpper', 'mouthRollLower', 'mouthShrugUpper', 'mouthShrugLower', 'mouthClose',
'mouthSmile_L', 'mouthSmile_R', 'mouthFrown_L', 'mouthFrown_R', 'mouthDimple_L', 'mouthDimple_R',
'mouthUpperUp_L', 'mouthUpperUp_R', 'mouthLowerDown_L', 'mouthLowerDown_R', 'mouthPress_L', 'mouthPress_R',
'mouthStretch_L', 'mouthStretch_R', 'tongueOut']
def addPropsInPbone():
sk = obj.data.shape_keys
skb = sk.key_blocks
# TODO make sure, its workable with 2.81 by making property exportable
for i in range(1,len(skb)):
print(skb[i])
pbone[skb[i].name]=0.0
def addDriversToShapeKeys():
sk = obj.data.shape_keys
skb = sk.key_blocks
for i in range(1,len(skb)):
#print(skb[i])
#pbone[skb[i].name]=0.0
data_path = "key_blocks[\"" + skb[i].name + "\"].value"
print (data_path)
dr = obj.data.shape_keys.driver_add(data_path)
dr.driver.type='SCRIPTED'
var = dr.driver.variables.new()
var.type = 'SINGLE_PROP'
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = rig
var.targets[0].data_path = "pose.bones[\""+pbone.name+"\"][\""+ skb[i].name +"\"]"
print (var.targets[0].data_path)
dr.driver.expression = "var"
#addPropsInPbone()
#addDriversToShapeKeys()
def isShapeKeyAnimated(sk,attr):
#sk = obj.data.shape_keys
skad = sk.animation_data
if skad is not None and skad.action is not None:
for fcu in skad.action.fcurves:
if attr in fcu.data_path :
return True
return False
def isShapeKeyDriven(sk,attr):
#sk = obj.data.shape_keys
skad = sk.animation_data
skdr = skad.drivers
if skdr is not None and len(skdr)>0:
for each in skdr:
if attr in each.data_path :
return True
return False
def removeShapeKeyDriver(sk,attr):
if isShapeKeyDriven(sk,attr):
data_path = "key_blocks[\"" + attr + "\"].value"
# sk.driver_remove('key_blocks["eye.L"].value')
sk.driver_remove(data_path)
def removeShapeKeyDrivers():
pass
class SkZeroAll_OT_Operator (bpy.types.Operator):
'''Reset All Shapekey values to zero'''
bl_idname = "ffgen.sk_zero_all"
bl_label = "ffgen_SkZeroAll"
bl_options = {"REGISTER","UNDO"}
@classmethod
def poll(cls,context):
if context.area.type=='VIEW_3D':
if (context.active_object.type=='MESH') and (context.active_object.data.shape_keys !='None'):
return (1)
else:
return(0)
def execute(self, context):
obj = context.object
sk = obj.data.shape_keys
if sk :
skb = sk.key_blocks
print ("Shape Key Blocks %s"% len(skb))
for i in range(1,len(skb)):
skb[i].value = 0
self.report({'INFO'}, "Done")
else :
self.report({'INFO'}, "Select some mesh object")
return{"FINISHED"}
class SkAnimateAll_OT_Operator (bpy.types.Operator):
'''Animate All Shapekey values to 1 for each frame(helpful for testing)'''
bl_idname = "ffgen.sk_animate_all"
bl_label = "ffgen_SkAnimateAll"
bl_options = {"REGISTER","UNDO"}
@classmethod
def poll(cls,context):
if context.area.type=='VIEW_3D':
if (context.active_object.type=='MESH') and (context.active_object.data.shape_keys !='None'):
return (1)
else:
return(0)
def execute(self, context):
obj = context.object
sk = obj.data.shape_keys
if sk :
skb = sk.key_blocks
print ("Shape Key Blocks %s"% len(skb))
for i in range(1,len(skb)):
skb[i].value = 0
skb[i].keyframe_insert("value",frame=i-1)
skb[i].value = 1
skb[i].keyframe_insert("value",frame=i)
skb[i].value = 0
skb[i].keyframe_insert("value",frame=i+1)
self.report({'INFO'}, "Done")
else :
self.report({'INFO'}, "Select some mesh object")
return{"FINISHED"}
class SkBindToBone_OT_Operator (bpy.types.Operator):
'''1- Select Mesh with Shapekeys\n2- Select Control Object(Armature Pose Bone)\n This action will Create property on pbone (shapekey name)\n Then Bind shapekey via expression to relevant property'''
bl_idname = "ffgen.sk_bind_to_bone"
bl_label = "ffgen_SkBindToBone"
bl_options = {"REGISTER","UNDO"}
@classmethod
def poll(cls,context):
if context.area.type=='VIEW_3D':
if (len(context.selected_objects)==2) and (context.selected_objects[0].type=='MESH') and (context.active_object.type =='ARMATURE'):
return (1)
else:
return(0)
def execute(self, context):
skSrcObj = context.selected_objects[0]
armObj = context.active_object
pbone = context.active_pose_bone
sk = skSrcObj.data.shape_keys
if sk :
skb = sk.key_blocks
print ("Shape Keys Count:",len(skb))
fs = bpy.context.scene.ff_model_prop_grp.sk_filterStr
print ("FILTER STRING:",fs)
for i in range(1,len(skb)):
skn=skb[i].name
# print ("CHECKING..", skn)
if (skn.find(fs)!= -1):
# PROCESS STEPS
# 1- if already driven, clean this
removeShapeKeyDriver(sk,skn) # safety step
# 2- if property doesn't exist create this
pbone[skn]=0.0 #
# 3- setup driver
data_path = "key_blocks[\"" + skn + "\"].value"
print (data_path)
dr = skSrcObj.data.shape_keys.driver_add(data_path)
dr.driver.type='SCRIPTED'
var = dr.driver.variables.new()
var.type = 'SINGLE_PROP'
# var.name = var
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = armObj
var.targets[0].data_path = "pose.bones[\""+pbone.name+"\"][\""+ skn +"\"]"
print (var.targets[0].data_path)
dr.driver.expression = "var"
# 4- TODO ideally if already animated, data should be saved by moving to property
self.report({'INFO'}, "Done")
else :
self.report({'INFO'}, "No ShapeKeys Found")
return{"FINISHED"} |
# -*- coding: utf-8 -*-
"""
some function by metaphy,2007-04-03,copyleft
version 0.2
"""
import urllib, httplib, urlparse
import re
import random
"""judge url exists or not,by others"""
def httpExists(url):
host, path = urlparse.urlsplit(url)[1:3]
if ':' in host:
# port specified, try to use it
host, port = host.split(':', 1)
try:
port = int(port)
except ValueError:
print 'invalid port number %r' % (port,)
return False
else:
# no port specified, use default port
port = None
try:
connection = httplib.HTTPConnection(host, port=port)
connection.request("HEAD", path)
resp = connection.getresponse( )
if resp.status == 200: # normal 'found' status
found = True
elif resp.status == 302: # recurse on temporary redirect
found = httpExists(urlparse.urljoin(url,resp.getheader('location', '')))
else: # everything else -> not found
print "Status %d %s : %s" % (resp.status, resp.reason, url)
found = False
except Exception, e:
print e.__class__, e, url
found = False
return found
"""get html src,return lines[]"""
def gGetHtmlLines(url):
if url==None : return
if not httpExists(url): return
try:
page = urllib.urlopen(url)
html = page.readlines()
page.close()
return html
except:
print "gGetHtmlLines() error!"
return
"""get html src,return string"""
def gGetHtml(url):
if url==None : return
if not httpExists(url): return
try:
page = urllib.urlopen(url)
html = page.read()
page.close()
return html
except:
print "gGetHtml() error!"
return
"""根据url获取文件名"""
def gGetFileName(url):
if url==None: return None
if url=="" : return ""
arr=url.split("/")
return arr[len(arr)-1]
"""生成随机文件名"""
def gRandFilename(type):
fname = ''
for i in range(16):
fname = fname + chr(random.randint(65,90))
fname = fname + chr(random.randint(48,57))
return fname + '.' + type
"""根据url和其上的link,得到link的绝对地址"""
def gGetAbslLink(url,link):
if url==None or link == None : return
if url=='' or link=='' : return url
addr = ''
if link[0] == '/' :
addr = gGetHttpAddr(url) + link
elif len(link)>3 and link[0:4] == 'http':
addr = link
elif len(link)>2 and link[0:2] == '..':
addr = gGetHttpAddrFatherAssign(url,link)
else:
addr = gGetHttpAddrFather(url) + link
return addr
"""根据输入的lines,匹配正则表达式,返回list"""
def gGetRegList(linesList,regx):
if linesList==None : return
rtnList=[]
for line in linesList:
matchs = re.search(regx, line, re.IGNORECASE)
if matchs!=None:
allGroups = matchs.groups()
for foundStr in allGroups:
if foundStr not in rtnList:
rtnList.append(foundStr)
return rtnList
"""根据url下载文件,文件名参数指定"""
def gDownloadWithFilename(url,savePath,file):
#参数检查,现忽略
try:
urlopen=urllib.URLopener()
fp = urlopen.open(url)
data = fp.read()
fp.close()
file=open(savePath + file,'w+b')
file.write(data)
file.close()
except IOError:
print "download error!"+ url
"""根据url下载文件,文件名自动从url获取"""
def gDownload(url,savePath):
#参数检查,现忽略
fileName = gGetFileName(url)
#fileName =gRandFilename('jpg')
gDownloadWithFilename(url,savePath,fileName)
"""根据某网页的url,下载该网页的jpg"""
def gDownloadHtmlJpg(downloadUrl,savePath):
lines= gGetHtmlLines(downloadUrl)
regx = r"""src\s*="?(\S+)\.jpg"""
lists =gGetRegList(lines,regx)
if lists==None: return
for jpg in lists:
jpg = gGetAbslLink(downloadUrl,jpg) + '.jpg'
gDownload(jpg,savePath)
### print gGetFileName(jpg)
"""根据url取主站地址"""
def gGetHttpAddr(url):
if url== '' : return ''
arr=url.split("/")
return arr[0]+"//"+arr[2]
"""根据url取上级目录"""
def gGetHttpAddrFather(url):
if url=='' : return ''
arr=url.split("/")
addr = arr[0]+'//'+arr[2]+ '/'
if len(arr)-1>3 :
for i in range(3,len(arr)-1):
addr = addr + arr[i] + '/'
return addr
"""根据url和上级的link取link的绝对地址"""
def gGetHttpAddrFatherAssign(url,link):
if url=='' : return ''
if link=='': return ''
linkArray=link.split("/")
urlArray = url.split("/")
partLink =''
partUrl = ''
for i in range(len(linkArray)):
if linkArray[i]=='..':
numOfFather = i + 1 #上级数
else:
partLink = partLink + '/' + linkArray[i]
for i in range(len(urlArray)-1-numOfFather):
partUrl = partUrl + urlArray[i]
if i < len(urlArray)-1-numOfFather -1 :
partUrl = partUrl + '/'
return partUrl + partLink
"""根据url获取其上的相关htm、html链接,返回list"""
def gGetHtmlLink(url):
#参数检查,现忽略
rtnList=[]
lines=gGetHtmlLines(url)
regx = r"""href="?(\S+)\.htm"""
for link in gGetRegList(lines,regx):
link = gGetAbslLink(url,link) + '.htm'
if link not in rtnList:
rtnList.append(link)
print link
return rtnList
"""根据url,抓取其上的jpg和其链接htm上的jpg"""
def gDownloadAllJpg(url,savePath):
#参数检查,现忽略
gDownloadHtmlJpg(url,savePath)
#抓取link上的jpg
links=gGetHtmlLink(url)
for link in links:
gDownloadHtmlJpg(link,savePath)
"""test"""
def test():
u='http://wanimal1983.tumblr.com/'
save='d:/temp/temppic/'
print 'download pic from [' + u +']'
print 'save to [' +save+'] ...'
gDownloadHtmlJpg(u,save)
print "download finished"
test()
|
import numpy as np
from uutils.torch_uu.metrics.cca.cca_core import get_cca_similarity
def svcca_with_keeping_fixed_dims(x: np.ndarray, y: np.ndarray, dims_to_keep: int,
epsilon: float = 1e-10, verbose: bool = False,
full_matrices: bool = False, keepdims: bool = True, axis: int = 1):
"""
Computes CCA statistics after doing the SV step from SVCCA.
Incoming data is of shape [D1, N], [D2, N].
To get svcca similirty do see note.
Alg:
- preprocessing:
- 1) center the incoming raw data
- 2) SV of centered incoming raw data
- 3) then cca_core (which does not center but divides by max of incoming data, which due to 2 is centered.)
Note:
- To compute svcca distance do: svcca: float = np.mean(svcca_baseline["cca_coef1"])
- Input data is assumed to be of size [D, N] to make it consistent with the original tutorial: https://github.com/google/svcca/blob/master/tutorials/001_Introduction.ipynb
- Centering code does not make a difference because get_cc_similarity uses np.cov and cov(x,y) = E[(x-mu_x)(y-mu_y)]
so it's already centering. But to my surprise giving the SV part non-centered data doesn't make a difference but
I would have expected a difference.
"""
# Mean subtract baseline activations
# cx = center(x, axis=axis, keepdims=keepdims)
# cy = center(y, axis=axis, keepdims=keepdims)
cx = x
cy = y
# Perform SVD
Ux, sx, Vx = np.linalg.svd(cx, full_matrices=full_matrices)
Uy, sy, Vy = np.linalg.svd(cy, full_matrices=full_matrices)
svx = np.dot(sx[:dims_to_keep] * np.eye(dims_to_keep), Vx[:dims_to_keep])
svy = np.dot(sy[:dims_to_keep] * np.eye(dims_to_keep), Vy[:dims_to_keep])
# Recenter after SVD since CCA assumes incoming stuff is centered - this is something I added myself to match
# ultimate anatome's code but it doesn't seem to make a difference.
# svx = center(svx, axis=axis, keepdims=keepdims)
# svy = center(svy, axis=axis, keepdims=keepdims)
svcca_baseline = get_cca_similarity(svx, svy, epsilon=epsilon, verbose=verbose)
# print("Baseline", np.mean(svcca_baseline["cca_coef1"]), "and MNIST", np.mean(svcca_results["cca_coef1"]))
svcca: float = np.mean(svcca_baseline["cca_coef1"])
if verbose:
print("SVCCA:", svcca)
return svcca_baseline
def center(x: np.ndarray, axis: int = 1, keepdims=True):
"""
Centers data assuming data is of shape [D, N].
"""
cx = x - np.mean(x, axis=axis, keepdims=keepdims)
return cx
|
# -*- coding: utf-8 -*-
"""Module for single entry transformers."""
import logging
from collections import defaultdict
from operator import attrgetter
from ....core.models.base import RemoteInstance
from ....core.exceptions import DoesNotExistException, SkipField
from ....core.models.terminal import (
SshKey, Identity,
)
from .base import Transformer, DeletBadEncrypted
from .utils import id_getter, map_zip_model_fields
def id_getter_wrapper():
"""Generate id getter."""
return id_getter
# pylint: disable=abstract-method
class BulkEntryBaseTransformer(Transformer):
"""Base Transformer for one model."""
def __init__(self, model_class, **kwargs):
"""Create new entry transformer."""
super(BulkEntryBaseTransformer, self).__init__(**kwargs)
assert model_class
self.model_class = model_class
self.sync_keys = (
self.account_manager.get_settings()['synchronize_key']
)
self.skip = (
not self.sync_keys and
self.model_class in (SshKey, Identity)
)
class BulkPrimaryKeyTransformer(BulkEntryBaseTransformer):
"""Transformer for primary key payloads."""
logger = logging.getLogger(__name__)
to_model_mapping = defaultdict(id_getter_wrapper, {int: int, })
def to_model(self, payload):
"""Retrieve model from storage by payload."""
if not payload:
return None
if self.skip:
raise SkipField
remote_instance_id = self.id_from_payload(payload)
model = self.storage.get(
self.model_class,
**{'remote_instance.id': remote_instance_id}
)
return model
def to_payload(self, model):
"""Convert model to primary key or to set/id reference."""
if self.skip:
raise SkipField
if not model:
return None
if model.remote_instance:
return model.remote_instance.id
return '{model.set_name}/{model.id}'.format(model=model)
def id_from_payload(self, payload):
"""Get remote id from payload."""
return self.to_model_mapping[type(payload)](payload)
# pylint: disable=too-few-public-methods
class GetPrimaryKeyTransformerMixin(object):
"""Mixin to get primary get Transformer."""
def get_primary_key_transformer(self, model_class):
"""Create new primary key Transformer."""
return BulkPrimaryKeyTransformer(
storage=self.storage, model_class=model_class,
account_manager=self.account_manager,
)
class BulkEntryTransformer(GetPrimaryKeyTransformerMixin,
BulkPrimaryKeyTransformer):
"""Transformer for complete model."""
def __init__(self, **kwargs):
"""Create new Transformer."""
super(BulkEntryTransformer, self).__init__(**kwargs)
self.attrgetter = attrgetter(*self.model_class.fields)
self.remote_instance_attrgetter = attrgetter(*RemoteInstance.fields)
def to_payload(self, model):
"""Convert model to payload."""
if self.skip:
raise SkipField
payload = dict(map_zip_model_fields(model, self.attrgetter))
if model.remote_instance:
zipped_remote_instance = map_zip_model_fields(
model.remote_instance, self.remote_instance_attrgetter
)
payload.update(zipped_remote_instance)
for field, mapping in model.fields.items():
self.serialize_field(payload, model, field, mapping)
payload['local_id'] = model.id
return payload
def serialize_field(self, payload, model, field, mapping):
"""Transform field to payload or skip."""
try:
if field in model.fk_field_names():
payload[field] = self.serialize_related_field(
model, field, mapping
)
else:
payload[field] = getattr(model, field)
except SkipField:
payload.pop(field, None)
def serialize_related_field(self, model, field, mapping):
"""Transform relation to payload."""
related_transformer = self.get_primary_key_transformer(mapping.model)
fk_payload = related_transformer.to_payload(getattr(model, field))
return fk_payload
def to_model(self, payload):
"""Convert payload to model."""
if self.skip:
raise SkipField
model = self.get_or_initialize_model(payload)
model = self.update_model_fields(model, payload)
return model
def update_model_fields(self, model, payload):
"""Update model's fields with payload."""
fk_fields = model.fk_field_names()
models_fields = {
i: payload[i]
for i, mapping in model.fields.items()
if i not in fk_fields
}
for i, mapping in model.fields.items():
if i in fk_fields:
try:
models_fields[i] = self.render_relation_field(
mapping, payload[i]
)
except SkipField:
models_fields.pop(i, None)
model.update(models_fields)
model.remote_instance = self.create_remote_instance(payload)
return model
def get_or_initialize_model(self, payload):
"""Get existed model or generate new one using payload."""
try:
model = self.get_model(payload)
except DoesNotExistException:
model = self.initialize_model()
model.id = payload.get('local_id', model.id)
return model
def get_model(self, payload):
"""Get model for payload."""
return super(BulkEntryTransformer, self).to_model(payload)
def render_relation_field(self, mapping, value):
"""Convert relation mapping and value to whole model."""
transformer = self.get_primary_key_transformer(mapping.model)
return transformer.to_model(value)
def initialize_model(self):
"""Generate new model using payload."""
model = self.model_class()
return model
# pylint: disable=no-self-use
def create_remote_instance(self, payload):
"""Generate remote instance for payload."""
instance = RemoteInstance()
instance.init_from_payload(payload)
return instance
class CryptoBulkEntryTransformer(BulkEntryTransformer):
"""Entry Transformer that encrypt model and decrypt payload."""
def __init__(self, crypto_controller, **kwargs):
"""Construct new crypto Transformer for bulk entry."""
super(CryptoBulkEntryTransformer, self).__init__(**kwargs)
self.crypto_controller = crypto_controller
def to_model(self, payload):
"""Decrypt model after serialization."""
model = super(CryptoBulkEntryTransformer, self).to_model(payload)
try:
descrypted_model = self.crypto_controller.decrypt(model)
except self.crypto_controller.bad_encrypted_exception:
raise DeletBadEncrypted(model)
return self.storage.save(descrypted_model)
def to_payload(self, model):
"""Encrypt model before deserialization."""
encrypted_model = self.crypto_controller.encrypt(model)
return super(CryptoBulkEntryTransformer, self).to_payload(
encrypted_model
)
class SettingsTransformer(Transformer):
"""Transformer for settings."""
def to_model(self, payload):
"""Convert REST API payload to Application models."""
return payload
def to_payload(self, model):
"""Convert Application models to REST API payload."""
return model
|
__author__ = 'jitrixis'
from TVpy.Factory.toolsheds import Toolkit
class Arp:
def __init__(self):
self.__hwtype = 0x1
self.__ptype = 0x800
self.__hwlen = 6
self.__plen = 4
self.__op = 0x1
self.__hwsrc = '00:00:00:00:00:00'
self.__psrc = '0.0.0.0'
self.__hwdst = '00:00:00:00:00:00'
self.__pdst = '0.0.0.0'
'''Hardware Type'''
def getHwtype(self):
return self.__hwtype
def setHwtype(self, hwtype):
self.__hwtype = hwtype
return self
def __buildHwtype(self):
return Toolkit.buildInt2(self.__hwtype)
def __consumeHwtype(self, data):
val = Toolkit.consumeInt2(data)
self.__hwtype = val[0]
return val[1]
'''IP Type'''
def getPtype(self):
return self.__ptype
def setPtype(self, ptype):
self.__ptype = ptype
return self
def __buildPtype(self):
return Toolkit.buildInt2(self.__ptype)
def __consumePtype(self, data):
val = Toolkit.consumeInt2(data)
self.__ptype = val[0]
return val[1]
'''Hardware length'''
def getHwlen(self):
return self.__hwlen
def setHwlen(self, hwlen):
self.__hwlen = hwlen
return self
def __buildHwlen(self):
return Toolkit.buildInt1(self.__hwlen)
def __consumeHwlen(self, data):
val = Toolkit.consumeInt1(data)
self.__hwlen = val[0]
return val[1]
'''IP length'''
def getPlen(self):
return self.__plen
def setPlen(self, plen):
self.__plen = plen
return self
def __buildPlen(self):
return Toolkit.buildInt1(self.__plen)
def __consumePlen(self, data):
val = Toolkit.consumeInt1(data)
self.__plen = val[0]
return val[1]
'''Operation'''
def getOp(self):
return self.__op
def setOp(self, op):
self.__op = op
return self
def __buildOp(self):
return Toolkit.buildInt2(self.__op)
def __consumeOp(self, data):
val = Toolkit.consumeInt2(data)
self.__op = val[0]
return val[1]
'''Hardware Source'''
def getHwsrc(self):
return self.__hwsrc
def setHwsrc(self, hwsrc):
self.__hwsrc = hwsrc
return self
def __buildHwsrc(self):
return Toolkit.buildMAC(self.__hwsrc)
def __consumeHwsrc(self, data):
val = Toolkit.consumeMAC(data)
self.__hwsrc = val[0]
return val[1]
'''IP Source'''
def getPsrc(self):
return self.__psrc
def setPsrc(self, psrc):
self.__psrc = psrc
return self
def __buildPsrc(self):
return Toolkit.buildIPv4(self.__psrc)
def __consumePsrc(self, data):
val = Toolkit.consumeIPv4(data)
self.__psrc = val[0]
return val[1]
'''Hardware Destination'''
def getHwdst(self):
return self.__hwdst
def setHwdst(self, hwdst):
self.__hwdst = hwdst
return self
def __buildHwdst(self):
return Toolkit.buildMAC(self.__hwdst)
def __consumeHwdst(self, data):
val = Toolkit.consumeMAC(data)
self.__hwdst = val[0]
return val[1]
'''IP Destination'''
def getPdst(self):
return self.__pdst
def setPdst(self, pdst):
self.__pdst = pdst
return self
def __buildPdst(self):
return Toolkit.buildIPv4(self.__pdst)
def __consumePdst(self, data):
val = Toolkit.consumeIPv4(data)
self.__pdst = val[0]
return val[1]
'''Building method'''
def build(self):
ret = self.__buildHwtype() + self.__buildPtype()
ret += self.__buildHwlen() + self.__buildPlen()
ret += self.__buildOp()
ret += self.__buildHwsrc() + self.__buildPsrc()
ret += self.__buildHwdst() + self.__buildPdst()
return ret
def fromSource(self, data):
data = self.__consumeHwtype(data)
data = self.__consumePtype(data)
data = self.__consumeHwlen(data)
data = self.__consumePlen(data)
data = self.__consumeOp(data)
data = self.__consumeHwsrc(data)
data = self.__consumePsrc(data)
data = self.__consumeHwdst(data)
data = self.__consumePdst(data)
return data
def getLength(self):
return len(self.build()) |
# todo make sure what they mean by desc undefined? None or empty? Answer: None :) it can never be empty but None is sometimes returned.
# I am implementing everything as dicts to speed up property creation
# Warning: value, get, set props of dest are PyJs types. Rest is Py!
def is_data_descriptor(desc):
return desc and ('value' in desc or 'writable' in desc)
def is_accessor_descriptor(desc):
return desc and ('get' in desc or 'set' in desc)
def is_generic_descriptor(
desc
): # generic means not the data and not the setter - therefore it must be one that changes only enum and conf
return desc and not (is_data_descriptor(desc)
or is_accessor_descriptor(desc))
def from_property_descriptor(desc, space):
if not desc:
return {}
obj = space.NewObject()
if is_data_descriptor(desc):
obj.define_own_property(
'value', {
'value': desc['value'],
'writable': True,
'enumerable': True,
'configurable': True
}, False)
obj.define_own_property(
'writable', {
'value': desc['writable'],
'writable': True,
'enumerable': True,
'configurable': True
}, False)
else:
obj.define_own_property(
'get', {
'value': desc['get'],
'writable': True,
'enumerable': True,
'configurable': True
}, False)
obj.define_own_property(
'set', {
'value': desc['set'],
'writable': True,
'enumerable': True,
'configurable': True
}, False)
obj.define_own_property(
'writable', {
'value': desc['writable'],
'writable': True,
'enumerable': True,
'configurable': True
}, False)
obj.define_own_property(
'enumerable', {
'value': desc['enumerable'],
'writable': True,
'enumerable': True,
'configurable': True
}, False)
return obj
def to_property_descriptor(obj):
if obj._type() != 'Object':
raise TypeError()
desc = {}
for e in ('enumerable', 'configurable', 'writable'):
if obj.has_property(e):
desc[e] = obj.get(e).to_boolean().value
if obj.has_property('value'):
desc['value'] = obj.get('value')
for e in ('get', 'set'):
if obj.has_property(e):
cand = obj.get(e)
if not (cand.is_callable() or cand.is_undefined()):
raise TypeError()
if ('get' in desc or 'set' in desc) and ('value' in desc
or 'writable' in desc):
raise TypeError()
|
style = 'formatted'
print(f"this is a {style} string")
import math
r = 3.6
print(f"A circle with radius {r} gives the result {math.pi * r * r:.2f}") |
for index in range(1, int(input())+1): print("*" * index)
|
graph = dict()
graph['S'] = {}
graph['S']['A'] = 2
graph['S']['B'] = 2
graph['A'] = {}
graph['A']['B'] = 2
graph['B'] = {}
graph['B']['C'] = 2
graph['B']['E'] = 25
graph['C'] = {}
graph['C']['A'] = -15
graph['C']['E'] = 2
# graph['D'] = {}
# graph['D']['E'] = 1
graph['E'] = {}
inf = float('inf')
# costs = {}
# # S 에서 A 까지 도달하는데 드는 비용
# costs['A'] = 1
# # S 에서 C 까지 도달하는데 드는 비용
# costs['C'] = 2
# 아직 비용을 알 수 없는 노드는 무한대로 설정
parents = {}
# A 의 부모 노드는 S
parents['A'] = 'S'
# C 의 부모 노드도 S
parents['B'] = 'S'
parents['E'] = None
processed = []
def set_costs_table(graph, start='S'):
costs = {}
costs = graph['S']
for i, j in graph.items():
if i not in graph['S'] and i != 'S':
costs[i] = float('inf')
return costs
costs = set_costs_table(graph, 'S')
def find_lowest_cost_node(costs):
lowest_cost = float('inf')
lowest_cost_node = None
for node in costs:
cost = costs[node]
if cost < lowest_cost and node not in processed:
lowest_cost = cost
lowest_cost_node = node
return lowest_cost_node
node = find_lowest_cost_node(costs)
while node is not None:
cost = costs[node]
neighbors = graph[node]
for n in neighbors.keys():
new_cost = cost + neighbors[n]
if costs[n] > new_cost:
costs[n] = new_cost
parents[n] = node
processed.append(node)
node = find_lowest_cost_node(costs)
def find_parents(node='E'):
print(node)
if node == 'S':
return None
parent_node = parents[node]
return find_parents(parent_node)
find_parents()
print(set_costs_table(graph))
print(costs) |
import numpy as np
import torch
from collections import defaultdict
import string
from sequence.data import traits
from enum import IntEnum
from typing import List, Dict, Optional, Union, Sequence
import functools
def lazyprop(fn):
attr_name = "_lazy_" + fn.__name__
@property
@functools.wraps(fn)
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazyprop
class Tokens(IntEnum):
EOS = 0
SOS = 1
UNKNOWN = 2
class Language:
"""
The vocabulary of a dataset. This will map the unique strings to indexes that map to the embeddings.
You can also provide custom embeddings object.
"""
def __init__(
self,
words: Optional[List[str]] = None,
lower: bool = True,
remove_punctuation: bool = True,
custom_embeddings: Optional[np.ndarray] = None,
):
"""
Parameters
----------
words
Unique words in the dataset
lower
Lower the string: str.lower()
remove_punctuation
!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ will be removed
custom_embeddings
Shape(num_embeddings, embedding_dim)
"""
self.lower = lower
self.remove_punctuation = remove_punctuation
if remove_punctuation:
self.translation_table = str.maketrans("", "", string.punctuation)
else:
self.remove_punctuation = False
# Warning. Don't change index 0, 1, and 2
# These are used in the models!
self.w2i: Dict[str, int] = {
"EOS": Tokens.EOS,
"SOS": Tokens.SOS,
"UNKNOWN": Tokens.UNKNOWN,
}
if words is not None:
self.register(words)
self.custom_embeddings = self.init_custom_emb(custom_embeddings)
@staticmethod
def init_custom_emb(
custom_embeddings: Optional[torch.Tensor],
) -> Optional[np.ndarray]:
if custom_embeddings is not None:
# we need to concat 3 embeddings for EOS, SOS and UNKNOWN
_, emb_dim = custom_embeddings.shape
pre = np.zeros((3, emb_dim))
# one hot encode. TODO: maybe something smarter?
# can fail if embedding size is two. It happened to you? You can't be serious!
pre[0, 0] = 1
pre[1, 1] = 1
pre[2, 2] = 1
return torch.tensor(
np.concatenate((pre, custom_embeddings), axis=0), dtype=torch.float32
)
return None
def clean(self, word: str) -> str:
"""
Transform str to lowercase and optionally remove punctuation
"""
if self.lower:
word = word.lower()
if self.remove_punctuation:
# Make a translation table when given 3 args.
# All punctuation will be mapped to None
word = word.translate(self.translation_table)
return word
def register(self, words: List[str]):
[self.register_single_word(w) for w in words]
def register_single_word(self, word: str):
"""
Register words as indexes
Parameters
----------
word
unique word
"""
c = self.clean(word)
if len(c) > 0:
self.w2i[c] = len(self.w2i)
@lazyprop
def i2w(self) -> Dict[int, Optional[str]]:
d: Dict[int, Optional[str]] = defaultdict(lambda: None)
d.update({v: k for k, v in self.w2i.items()})
return d
@property
def vocabulary_size(self) -> int:
return len(self.w2i)
@property
def words(self) -> List[str]:
return list(self.w2i.keys())
def translate_batch(self, padded: torch.Tensor) -> np.ndarray:
"""
Parameters
----------
padded : torch.Tensor
Tensor with word indexes. Shape: (seq_len, batch)
Returns
-------
out : np.Array[int]
Array with matching words. Shape: (seq_len, batch)
"""
# Only eval once
d = self.i2w
d[-1] = ""
if hasattr(padded, "cpu"):
padded = padded.cpu().data.numpy()
return np.vectorize(d.get)(padded)
def __getitem__(self, item: Union[int, str]) -> Union[int, str]:
if isinstance(item, int):
return self.i2w[item]
else:
return self.w2i[item]
def __contains__(self, item: Union[int, str]) -> bool:
if isinstance(item, str):
return self.clean(item) in self.w2i
else:
return item in self.i2w
class Dataset(
traits.Query, traits.TransitionMatrix, traits.Transform, traits.DatasetABC
):
"""
Dataset used in training. This has some lazy operations due to dask usage.
"""
def __init__(
self,
sentences: List[List[str]],
language: Optional[Language],
skip: Sequence[str] = (),
buffer_size: int = int(1e4),
max_len: int = None,
min_len: int = 1,
device: str = "cpu",
chunk_size: Union[int, str] = "auto",
allow_con_dup: bool = True,
):
"""
Parameters
----------
sentences : list[list[str]]
[["hello", "world!"], ["get", "down!"]]
language : sequence.data.utils.Language
Required. Should be the language fitted for training.
skip : list[str]
Words to skip.
buffer_size : int
Size of chunks prepared by lazy generator.
Only used during preparation of dataset.
max_len : int
Max sequence length.
min_len : int
Min sequence length.
device : str
'cuda' | 'cpu'
chunk_size : str/ int
Passed to dask array.
allow_con_dup : bool
Filter sequences from consecutive duplicates
"""
language = Language() if language is None else language
traits.DatasetABC.__init__(self, self, language=language, device=device)
traits.Query.__init__(self, self)
traits.TransitionMatrix.__init__(self, self)
traits.Transform.__init__(
self,
parent=self,
buffer_size=buffer_size,
min_len=min_len,
max_len=max_len,
chunk_size=chunk_size,
sentences=sentences,
skip=skip,
allow_con_dup=allow_con_dup,
)
def split(self, fracs, shuffle=True):
"""
Split dataset in [train, test, ..., val] Datasets.
Parameters
----------
fracs : Sequence
shuffle : bool
Returns
-------
datasets : tuple[Dataset]
A new Dataset object for every fraction in fracs
"""
idx = np.arange(len(self.data))
dsets = tuple(Dataset(None, language=self.language) for _ in fracs)
fracs = np.array([0] + fracs)
assert fracs.sum() == 1
if shuffle:
np.random.shuffle(idx)
slice_idx = np.cumsum(fracs * len(self.data)).astype(int)
slice_idx = [(i, j) for i, j in zip(slice_idx[:-1], slice_idx[1:])]
for (i, j), ds in zip(slice_idx, dsets):
ds.__dict__.update(self.__dict__)
ds.data = self.data[i:j]
ds.set_idx()
return dsets
class ArrayWrap(np.ndarray):
# We only wrap a numpy array such that it has a compute method
# See: https://docs.scipy.org/doc/numpy-1.13.0/user/basics.subclassing.html
def __new__(cls, input_array, attr=None):
obj = np.asarray(input_array).view(cls)
obj.compute = attr
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.compute = lambda: self
class DatasetEager(Dataset):
"""
The eager variation of Dataset. This class doesn't use Dask.
"""
def __init__(
self,
sentences: List[List[str]],
language: Optional[Language] = None,
skip: Sequence[str] = (),
buffer_size: int = int(1e4),
max_len: Optional[int] = None,
min_len: int = 1,
device: str = "cpu",
chunk_size: str = "auto",
):
super().__init__(
sentences=sentences,
language=language,
skip=skip,
buffer_size=buffer_size,
max_len=max_len,
min_len=min_len,
device=device,
chunk_size=chunk_size,
)
def transform_data(self):
if self.max_len is None:
self.max_len = max(map(len, self.paths))
size = len(self.paths)
array = []
for i, j in zip(
range(0, size, self.buffer_size),
range(self.buffer_size, size + self.buffer_size, self.buffer_size),
):
array.append(self._gen(i, j, size))
self.data = np.concatenate(array)
# Because of transformation conditions there can be empty sequences
# These need to be removed.
# The actual computed values are a bit shorter.
# Because the data rows % buffer_size has a remainder.
mask_short = self.data.sum(-1) == -(self.max_len + 1)
mask = np.ones(shape=(self.data.shape[0],), dtype=bool)
mask[: mask_short.shape[0]] = mask_short
self.data = ArrayWrap(self.data[~mask])
self.set_idx()
class DatasetInference(traits.Query, traits.Transform, traits.DatasetABC):
"""
Dataset used during inference.
"""
def __init__(
self,
sentences: List[List[str]],
language: Language,
buffer_size: int = int(1e4),
max_len: Optional[int] = None,
min_len: int = 1,
device: str = "cpu",
chunk_size: str = "auto",
):
traits.DatasetABC.__init__(self, self, language=language, device=device)
traits.Query.__init__(self, self)
traits.Transform.__init__(
self,
parent=self,
buffer_size=buffer_size,
min_len=min_len,
max_len=max_len,
chunk_size=chunk_size,
sentences=sentences,
skip=(),
allow_con_dup=False,
mask=False,
)
def transform_sentence(self, s: List[str]) -> np.ndarray:
"""
Transform sentence of string to integers.
This method is different from the one in training because we
don't want to add new words to the language. Unknown words will
be added to UNKNOWN.
Parameters
----------
s : list[str]
A sequence of any length.
Returns
-------
s : np.array[int]
A -1 padded sequence of shape (self.max_len, )
"""
assert self.max_len is not None
s = list(filter(lambda x: len(x) > 0, [self.language.clean(w) for w in s]))
# All the sentences are -1 padded
idx = np.ones(self.max_len + 1) * -1
last_w = None
if len(s) > self.max_len or len(s) < self.min_len:
# prevents returning an IndexError during inference
idx[0] = Tokens.SOS
idx[1] = Tokens.EOS
# will be removed jit
return idx
i = -1
for w in s:
if not self.allow_duplicates:
if w == last_w:
last_w = w
continue
last_w = w
# Only increment if we don't continue
i += 1
if w not in self.language.w2i:
w = Tokens.UNKNOWN.name
idx[i] = self.language.w2i[w]
idx[i + 1] = 0
return np.array(idx)
|
from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandStart
from loader import dp, _
from src.stickers.dn_stickers import ryuk_hi
from src.utils.db_api import db_helpers
@dp.message_handler(CommandStart(), state='*')
async def bot_start(message: types.Message):
await message.answer_sticker(sticker=ryuk_hi)
text = _("Hello, {}!\n\n"
"You have the privilege of using the Death Note, so read the rules before "
"you start:\n\n"
"🍎 /rules 🖋 (click here) \n\n"
"If you have read the rules, you can start using the death note: \n\n"
"📓 /write_down 📓 (click here) \n\n"
"Your Death Note:\n\n"
" 📔 /death_list (click here)\n\n"
"Settings:\n\n"
"⚙ /settings (click here)\n\n"
"For each kill you can get 10 apples 🍎 , which you can spend on something "
"in the shop\n\n"
"🏪 /shop (click here)").format(message.from_user.full_name)
await message.answer(text)
name = message.from_user.full_name
await db_helpers.add_user(id_user=message.from_user.id,
name=name, apples=0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.