content
stringlengths 5
1.05M
|
|---|
from django.test import TestCase
from accounts.models import Account
class AccountsModelsTestCase(TestCase):
fixtures = ["tests/test_data/accounts.json"]
def setUp(self):
"""Set up accounts"""
self.user = Account.objects.get(is_superuser=False)
self.superuser = Account.objects.get(is_superuser=True)
def test_str(self):
"""Check __str__ correct"""
self.assertEqual(self.user.__str__(), "Test User")
self.assertEqual(self.superuser.__str__(), "Test Superuser")
def test_get_full_name(self):
"""Check get_full_name correct"""
self.assertEqual(self.user.get_full_name(), "Test User")
self.assertEqual(self.superuser.get_full_name(), "Test Superuser")
def test_is_superuser(self):
"""Check is_super_user correct"""
self.assertEqual(self.user.is_superuser, False)
self.assertEqual(self.superuser.is_superuser, True)
|
from odata_pretty_printer.odata_tools import pretty_print
def make_result(text):
"""Template for the "result = (...)" value generator
Arguments:
text {str} -- The output from the pretty printer
Returns:
str -- Formatted result ready to be pasted into the test
Example:
print(make_result(output))
prints:
result = (
"ShowOnSite eq true\n"
" and PublicationDate le 2020-04-29T08:49:33Z\n"
" and PublicationDate ge 2019-04-29T08:49:33Z\n"
" and ChannelId eq 23\n"
" and (ExpiryDate eq null or ExpiryDate ge 2020-04-29T08:49:33Z)\n"
" and IsAdvisor eq true\n"
" and (FundCodeList/any(fundcode: fundcode eq 'ITSMT')\n"
" or SectorCodeList/any(sectorcode: sectorcode eq 'T:IG')\n"
" or GroupCodeList/any(groupcode: groupcode eq 'BAIL'))"
)
"""
lines = text.split('\n')
lines = '\\n"\n "'.join(lines)
lines = f' result = (\n "{lines}"\n )\n'
return lines
# Throw OData expressions at pretty_print and confirm reverse parsed result
# that maches the input is returned.
def test_simple():
filter = "ShowOnSite eq true and (ExpiryDate eq null or ExpiryDate ge 2020-04-29T08:49:33Z)"
output = pretty_print(filter)
make_result(output)
assert output == 'ShowOnSite eq true\n and (ExpiryDate eq null or ExpiryDate ge 2020-04-29T08:49:33Z)'
def test_collection_filter_expression():
filter = "SectorCodeList/any(sectorcode: sectorcode eq 'T:IG')"
output = pretty_print(filter)
assert output == filter
def test_example1():
filter = (
"ShowOnSite eq true "
"and PublicationDate le 2020-04-29T08:49:33Z "
"and PublicationDate ge 2019-04-29T08:49:33Z "
"and ChannelId eq 23 "
"and (ExpiryDate eq null "
"or ExpiryDate ge 2020-04-29T08:49:33Z"
") "
"and IsAdvisor eq true "
"and (FundCodeList/any(fundcode: fundcode eq 'ITSMT') "
"or SectorCodeList/any(sectorcode: sectorcode eq 'T:IG') "
"or GroupCodeList/any(groupcode: groupcode eq 'BAIL')"
")"
)
result = (
"ShowOnSite eq true\n"
" and PublicationDate le 2020-04-29T08:49:33Z\n"
" and PublicationDate ge 2019-04-29T08:49:33Z\n"
" and ChannelId eq 23\n"
" and (ExpiryDate eq null or ExpiryDate ge 2020-04-29T08:49:33Z)\n"
" and IsAdvisor eq true\n"
" and (FundCodeList/any(fundcode: fundcode eq 'ITSMT')\n"
" or SectorCodeList/any(sectorcode: sectorcode eq 'T:IG')\n"
" or GroupCodeList/any(groupcode: groupcode eq 'BAIL'))"
)
output = pretty_print(filter)
assert output == result
def test_example2():
filter = """
(IsTrustnetUnit eq true and IsTrustnetFund eq true)
and Category ne 'AIP'
and Category ne 'NIP'
and Category ne 'RIP'
and Category ne 'COP'
and Currency ne 'CPS'
and UniverseCodeList/any(u: u eq 'T')
"""
result = (
"(IsTrustnetUnit eq true and IsTrustnetFund eq true)\n"
" and Category ne 'AIP'\n"
" and Category ne 'NIP'\n"
" and Category ne 'RIP'\n"
" and Category ne 'COP'\n"
" and Currency ne 'CPS'\n"
" and UniverseCodeList/any(u: u eq 'T')"
)
output = pretty_print(filter)
assert output == result
def test_example3():
filter = """
(IsTrustnetUnit eq true and IsTrustnetFund eq true)
and Category ne 'AIP'
and Category ne 'NIP'
or (Category ne 'RIP' and Category ne 'COP' and Currency ne 'CPS')
and UniverseCodeList/any(u: u eq 'T')
"""
result = (
"(IsTrustnetUnit eq true and IsTrustnetFund eq true)\n"
" and Category ne 'AIP'\n"
" and Category ne 'NIP'\n"
" or (Category ne 'RIP' and Category ne 'COP' and Currency ne 'CPS')\n"
" and UniverseCodeList/any(u: u eq 'T')"
)
output = pretty_print(filter)
assert output == result
|
from abc import abstractmethod
import uuid
import sys
import time
import copy
import logging
from global_config import config_parameters
from util.resources import Resource
from util.s3_utils import S3Helper
class TaskManager(object):
def __init__(self):
self.tasks = {}
self.completed_successfully_tasks = {}
self.completed_failed_tasks = {}
def add_task(self, task, dependency_of=None, dependencies=None):
if task.task_id in self.tasks.keys():
raise TaskManager.DuplicateTaskException()
self.tasks[task.task_id] = task
if dependency_of is not None:
if isinstance(dependency_of, list):
for downstream_task in dependency_of:
self.add_dependency_to_task(task=downstream_task, dependency=task)
else:
self.add_dependency_to_task(task=dependency_of, dependency=task)
if dependencies is not None:
if isinstance(dependencies, list):
for dependency in dependencies:
self.add_dependency_to_task(task=task, dependency=dependency)
else:
self.add_dependency_to_task(task=task, dependency=dependencies)
def add_dependency_to_task(self, task, dependency):
self.get_task(task).dependencies.append(DependencyList.get_safe_value(dependency))
def get_task(self, task_or_task_id):
return self.tasks[DependencyList.get_safe_value(task_or_task_id)]
class DuplicateTaskException(Exception):
def __init__(self):
super(TaskManager.DuplicateTaskException, self).__init__()
def run(self):
while len(self.tasks.keys()) > 0:
tasks_clone = self.tasks.copy()
for task_id in tasks_clone.keys():
self.remove_fulfilled_dependencies(task_id)
if len(self.tasks[task_id].dependencies) == 0:
task = self.tasks.pop(task_id)
# noinspection PyBroadException
try:
task.execute()
self.mark_task_as_succeeded(task)
except Exception as e:
logging.warning(e)
self.mark_task_as_failed(task)
if config_parameters['failOnError']:
logging.fatal('Task {t} fails and failOnError is True.'.format(t=task))
sys.exit(2)
else:
logging.debug('Task {t} has {n} unmet dependencies.'.format(
t=self.tasks[task_id],
n=len(self.tasks[task_id].dependencies)
))
for dependency in self.tasks[task_id].dependencies:
logging.debug('\t{d}'.format(d=dependency))
time.sleep(1)
def remove_fulfilled_dependencies(self, task_id):
for dependency in self.tasks[task_id].dependencies.copy():
if dependency in self.completed_successfully_tasks.keys():
logging.debug('Dependency {d} for task {t} succeeded earlier, clearing dependency'.format(
d=dependency,
t=task_id
))
self.tasks[task_id].dependencies.remove(dependency)
elif dependency in self.completed_failed_tasks.keys():
logging.debug('Dependency {d} for task {t} failed earlier, failing {t} task as well.'.format(
d=dependency,
t=task_id
))
self.tasks[task_id].has_failed = True
self.tasks[task_id].dependencies.remove(dependency)
def mark_task_as_succeeded(self, task):
logging.info('Task succeeded {t}'.format(t=task))
self.completed_successfully_tasks[task.task_id] = task
logging.debug('All succeeded tasks: {tl}'.format(tl=self.completed_successfully_tasks))
def mark_task_as_failed(self, task):
logging.info('Task failed {t}'.format(t=task))
self.completed_failed_tasks[task.task_id] = task
logging.debug('All failed tasks: {tl}'.format(tl=self.completed_failed_tasks))
class DependencyList(list):
def append(self, value):
return super(DependencyList, self).append(DependencyList.get_safe_value(value))
def count(self, value):
return super(DependencyList, self).count(DependencyList.get_safe_value(value))
def index(self, value, start=0, stop=None):
return super(DependencyList, self).index(DependencyList.get_safe_value(value), start, stop)
def remove(self, value):
return super(DependencyList, self).remove(DependencyList.get_safe_value(value))
def copy(self):
return copy.deepcopy(self)
def __setitem__(self, key, value):
return super(DependencyList, self).__setitem__(key, DependencyList.get_safe_value(value))
@staticmethod
def get_safe_value(value):
if isinstance(value, Task):
value = value.task_id
if isinstance(value, uuid.UUID):
return value
raise ValueError('Value {v} cannot be converted to valid dependency (task_id)'.format(v=value))
class Task(object):
def __init__(self, source_resource=None, target_resource=None, s3_details=None):
self.source_resource = source_resource
self.target_resource = target_resource
self.s3_details = s3_details
self.dependencies = DependencyList()
self.task_id = uuid.uuid4()
self.has_failed = False
@abstractmethod
def execute(self):
"""
Should peform the task and raise exceptions if anything goes wrong.
:return:
"""
pass
def __str__(self):
return self.__class__.__name__ + '(' + str(self.task_id) + ')'
class FailIfResourceDoesNotExistsTask(Task):
def __init__(self, resource=None):
super(FailIfResourceDoesNotExistsTask, self).__init__(source_resource=resource,
target_resource=None,
s3_details=None)
def execute(self):
if not self.source_resource.is_present():
raise Resource.NotFound('{r} was not found'.format(r=self))
class FailIfResourceClusterDoesNotExistsTask(Task):
def __init__(self, resource=None):
super(FailIfResourceClusterDoesNotExistsTask, self).__init__(source_resource=resource,
target_resource=None,
s3_details=None)
def execute(self):
res = self.source_resource.get_cluster().get_query_full_result_as_list_of_dict('select 1 as result')
if not res[0]['result'] == 1:
raise Resource.NotFound('Cluster of resource {r} could not be queried.'.format(r=self.source_resource))
class NoOperationTask(Task):
def __init__(self):
super(NoOperationTask, self).__init__()
def execute(self):
return
class CreateIfTargetDoesNotExistTask(Task):
def __init__(self, source_resource=None, target_resource=None):
super(CreateIfTargetDoesNotExistTask, self).__init__(source_resource=source_resource,
target_resource=target_resource,
s3_details=None)
def execute(self):
if config_parameters['destinationTableForceDropCreate']:
logging.info('Dropping target table {tbl}'.format(tbl=str(self.target_resource)))
self.target_resource.drop()
if not self.target_resource.is_present():
self.target_resource.clone_structure_from(self.source_resource)
logging.info('Creating target {tbl}'.format(tbl=str(self.target_resource)))
self.target_resource.create()
class UnloadDataToS3Task(Task):
def __init__(self, cluster_resource, s3_details):
super(UnloadDataToS3Task, self).__init__(source_resource=cluster_resource,
target_resource=None,
s3_details=s3_details)
def execute(self):
logging.info("Exporting from Source ({t})".format(t=self))
self.source_resource.unload_data(self.s3_details)
class CopyDataFromS3Task(Task):
def __init__(self, cluster_resource, s3_details):
super(CopyDataFromS3Task, self).__init__(source_resource=None,
target_resource=cluster_resource,
s3_details=s3_details)
def execute(self):
logging.info("Importing to Target ({t})".format(t=self))
self.target_resource.copy_data(self.s3_details)
class CleanupS3StagingAreaTask(Task):
def __init__(self, s3_details):
super(CleanupS3StagingAreaTask, self).__init__(source_resource=None,
target_resource=None,
s3_details=s3_details)
def execute(self):
s3_helper = S3Helper(config_parameters['region'])
if self.s3_details.deleteOnSuccess:
s3_helper.delete_s3_prefix(self.s3_details)
|
import numpy as np
def brocher(z, vs):
model = np.zeros([len(z), 5])
for i in range(len(vs)):
vp = (0.9409 + 2.0947 * vs[i] - 0.8206 * vs[i]**2 +
0.2683 * vs[i]**3 - 0.0251 * vs[i]**4)
rho = (1.6612 * vp - 0.4721 * vp**2 + 0.0671 * vp**3
- 0.0043 * vp**4 + 0.000106 * vp**5)
model[i, 2] = rho
model[i, 4] = vp
model[:, 0] = np.arange(len(z)) + 1.0
model[:, 1] = z
model[:, 3] = vs
return model
def gardner(z, vs):
model = np.zeros([len(z), 5])
vp_vs_ratio = 1.7321
vp = vp_vs_ratio * vs
rho = 1.741 * vp ** 0.25
model[:, 0] = np.arange(len(z)) + 1.0
model[:, 1] = z
model[:, 2] = rho
model[:, 3] = vs
model[:, 4] = vp
return model
def user_defined(z, vs):
pass
|
# By Kirill Snezhko
import requests
from bs4 import BeautifulSoup
import datetime
import re
import smtplib
# Airport Code
# Barcelona GRO
# Wien XWC
# Cologne CGN
# Larnaca LCA
# Milan - Bergamo BGY
# Milan - Centrum XIK
# Munich - Memmingen FMM
# Munixh - Zentrum ZMU
# Piza PSA
# Firenze ZMS
# Zurich ZLP
desired_home = 'VKO'
desired_destination = 'CGN'
desired_destinations = ['GRO', 'XWC', 'CGN', 'LCA', 'BGY', 'XIK',
'FMM', 'ZMU', 'PSA', 'ZMS', 'ZLP']
desired_minimum_length = 2
desired_maximum_price = 3000
r = requests.post('https://www.pobeda.aero/en/information/book/search_cheap_tickets',
data = {'search_tickets':'1',
'city_code_from':desired_home,
'city_code_to': desired_destination,
'date_departure_from':'10/01/2017',
'date_departure_to':'10/03/2017',
'is-return': 'no',
'date_return_from':'10/01/2017',
'date_return_to':'10/03/2017',
'max_price':'10000'
})
soup = BeautifulSoup(r.text, 'html.parser')
for tag in soup.find_all('br'):
tag.replaceWith('')
string_price = soup.find("div", {"class": "airtickets-cost"}).text.strip(' \t\n\r')
string_dates = soup.find("div", {"class": "airtickets-date"}).text.strip(' \t\n\r')
link = soup.find("a", href=True)['href']
price = int(re.sub("[^0-9]", "", string_price))
string_departure_date, string_arrival_date = string_dates.split('/')
departue_date = datetime.datetime.strptime(string_departure_date, "%d %b %Y ").date()
arrival_date = datetime.datetime.strptime(string_arrival_date, " %d %b %Y").date()
email_text = "Рейс найден!\n" \
"Из {} в {}, туда {}, обратно {} за {} руб.\n" \
"КУПИТЬ: {}."
email_text = email_text.format(desired_home,
desired_destination,
departue_date.strftime("%Y-%m-%d"),
arrival_date.strftime("%Y-%m-%d"),
price,
link)
print (email_text)
if ((arrival_date - departue_date).days > desired_minimum_length and price < desired_maximum_price):
server_ssl = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server_ssl.login("YOUR_MAIL", "YOUR_PASS")
server_ssl.sendmail("FROM", "FROM", 'Subject: %s\n\n%s' % ("НОВЫЙ РЕЙС!!!", email_text))
server_ssl.close()
|
#!/usr/bin/env python
#from distutils.core import setup
from setuptools import setup
setup(name='pysensu',
version='0.5',
description='Utilities for working with Sensu',
author='K. Daniels',
author_email='kd@gc.io',
url='https://github.com/kdaniels/pysensu',
packages=['pysensu'],
install_requires=[
"requests >= 1.2.3",
"simplejson >= 3.3.0",
],
)
|
import json
import logging
import os
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import numpy as np
import os.path as osp
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
JSON_CONTENT_TYPE = 'application/json'
logger = logging.getLogger(__name__)
def model_fn(model_dir):
logger.info('Loading the model.')
model_info = dict()
model = AutoModelForSequenceClassification.from_pretrained("allenai/scibert_scivocab_uncased", num_labels = 10, output_attentions = False, output_hidden_states = False)
if os.path.exists(os.path.join(model_dir,'last_saved.pth')):
state_dict = torch.load(os.path.join(model_dir,'last_saved.pth'))
model.load_state_dict(state_dict['model'])
print('model_info: {}'.format(model_info))
logger.info('Current device: {}'.format(device))
model.to(device).eval()
logger.info('Model has been loaded')
return {'model': model}
def input_fn(serialized_input_data, content_type=JSON_CONTENT_TYPE):
logger.info('Deserializing the input data.')
if content_type == JSON_CONTENT_TYPE:
input_data = json.loads(serialized_input_data)
return input_data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept=JSON_CONTENT_TYPE):
logger.info('Serializing the generated output.')
if accept == JSON_CONTENT_TYPE:
return prediction_output
raise Exception('Requested unsupported ContentType in Accept: ' + accept)
def predict_fn(input_data, model):
logger.info('Generating text based on input parameters.')
model = model['model']
logger.info('Current device: {}'.format(device))
labels = ["'Protect from harm'", "'Process resources'", "'Sense send or process information'", "'Maintain structural integrity'", "'Move'", "'Attach'", "'Maintain ecological community'", "'Chemically modify or Change energy state'", "'Change size or color'", "'Physically assemble/disassemble'"]
abstract = input_data['abstract']
tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
encoded_dict = tokenizer.encode_plus(
abstract,
truncation=True,
add_special_tokens = True,
max_length = 256,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = 'pt',
)
# Convert to tensor for prediction, batch size is 10
input_id = torch.tensor(encoded_dict['input_ids'])
attention_mask = torch.tensor(encoded_dict['attention_mask'])
labels_tensor = torch.tensor([0])
# labels_tensor = torch.cat(32*[torch.tensor([actual_label])])
# Prediction
results = dict()
with torch.no_grad():
outputs = model(input_id,
token_type_ids=None,
attention_mask=attention_mask,
labels=labels_tensor)
tmp_eval_loss, logits = outputs[:2]
preds = logits.detach().cpu().numpy()
pred_index = np.argmax(preds, axis=1)
softmax_loss = torch.nn.functional.softmax(logits).detach().cpu().numpy()
log_softmax_loss = torch.nn.functional.log_softmax(logits).detach().cpu().numpy()
# cross_ent = torch.nn.functional.cross_entropy(logits,torch.tensor([actual_label])).detach().cpu().numpy() # Need actual data to compute this
results = {'abstract':abstract,
'predicted_label':labels[pred_index[0]],
'confidence': str(preds[0][pred_index[0]]),
'hface_Loss': str(float(tmp_eval_loss)),
'softmax_loss': np.array2string(softmax_loss),
'log_softmax_loss':np.array2string(log_softmax_loss)
}
return json.dumps(results, indent = 4)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-14 04:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=2000)),
('slug', models.SlugField(max_length=250, unique=True)),
('description', models.TextField()),
('datetime', models.DateTimeField()),
('status', models.CharField(choices=[('draft', 'Draft'), ('live', 'Live'), ('finished', 'Finished')], default='draft', max_length=10)),
],
),
]
|
from devsim import *
print("coordinates")
coordinates=[]
for i in range(0,11):
coordinates.extend([float(i), 0.0, 0.0])
print(coordinates)
print()
print("elements")
elements=[]
for i in range(0,5):
# line type, physical region 0
x=[1, 0, i, i+1]
print(x)
elements.extend(x)
for i in range(5,10):
# line type, physical region 1
x=[1, 1, i, i+1]
print(x)
elements.extend([1, 1, i, i+1])
#points for boundary conditions
elements.extend([0, 2, 0])
print(elements[-3:])
elements.extend([0, 3, 10])
print(elements[-3:])
elements.extend([0, 4, 5])
print(elements[-3:])
print()
print("physical_names")
physical_names = [
"top",
"bot",
"top_contact",
"bot_contact",
"top_bot_interface"
]
print(physical_names)
print()
create_gmsh_mesh(mesh="toy", coordinates=coordinates, physical_names=physical_names, elements=elements)
add_gmsh_region(mesh="toy", gmsh_name="top", region="top", material="silicon")
add_gmsh_region(mesh="toy", gmsh_name="bot", region="bot", material="silicon")
add_gmsh_contact(mesh="toy", gmsh_name="top_contact", name="top_contact", region="top", material="metal")
add_gmsh_contact(mesh="toy", gmsh_name="bot_contact", name="bot_contact", region="bot", material="metal")
add_gmsh_interface(mesh="toy", gmsh_name="top_bot_interface", name="top_bot_interface", region0="top", region1="bot")
finalize_mesh(mesh="toy")
create_device(mesh="toy", device="toy")
write_devices(device="toy",file="pythonmesh1d.msh")
|
"""
CUR matrix decompostion based on:
CUR matrix decompositions for improved data analysis
Michael W. Mahoney, Petros Drineas
Proceedings of the National Academy of Sciences Jan 2009, 106 (3) 697-702;
DOI: 10.1073/pnas.0803205106
"""
from gaptrain.log import logger
import numpy as np
from scipy.linalg import svd
def rows(matrix, k=2, epsilon=1, return_indexes=False, n_iters=100):
"""
Get the most leveraging rows of a matrix using the CUR algorithm
---------------------------------------------------------------------------
:param matrix: (np.ndarray) shape = (n, m)
:param k: (int) Rank of the SVD decomposition
:param epsilon: (float) Error parameter, smaller values will generate
better approximations
:param return_indexes: (bool) Only return the indexes of the rows in the
matrix
:param n_iters: (int) Number of iterations to calculate whether a row is
chosen from the list of rows in the matrix
:return:
"""
logger.info('Calculating partial CUR decomposition for rows on '
f'a matrix with dimensions {matrix.shape}')
if k < 1:
raise ValueError('Rank must be at least 1')
# Requesting the rows so run COLUMNSELECT on the transpose on the matrix
A = matrix.T
m, n = A.shape
if k > n:
raise ValueError(f'Cannot find {k} rows in a matrix with only {n} rows')
# Approximate number of rows to take from A
c = k * np.log(k) / epsilon ** 2
# Singular value decomposition of the matrix A:
# A = u^T s v
u, s, v = svd(A)
# Compute the pi matrix of probabilities
pi = (1.0 / k) * np.sum(v[:int(k), :] ** 2, axis=0)
# COLUMNSELECT algorithm from Mahoney & Drineas----
# Dictionary of row indexes and the number of times they are selected by
# the CUR decomposition
rows_p = {j: 0 for j in range(n)}
for _ in range(n_iters):
for j in range(n):
# Accept this column with probability min{1,cπ_j}
if np.random.uniform(0, 1) < min(1, c * pi[j]):
rows_p[j] += 1
# List of row indexes from most significant -> least significant will
# have the most chance of being selected i.e a large value rows_p[j]
row_indexes = [j for j in sorted(rows_p, key=rows_p.get)][::-1]
if return_indexes:
logger.info(f'Returning the indexes of the {k} most leveraging rows')
return row_indexes[:k]
R = np.array([A[:, j] for j in row_indexes[:k]])
logger.info(f'Returning the matrix R with dimension {R.shape}')
return R
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from workouts.models import Session
@login_required
def index(request):
# TODO: Filter on user and friends
# TODO: Pagination
sessions = (
Session.objects.all().select_related("workout").order_by("-timestamp")[:10]
)
context = {
"sessions": sessions,
}
return render(request, "dashboard/dashboard.html", context=context)
|
from . import calendar
from . import show
from . import videometadata
from .top import cli, main
__all__ = ['cli', 'main', 'calendar', 'show', 'videometadata']
|
from django.test import TestCase
from django.forms.models import model_to_dict
from django.contrib.auth.hashers import check_password
from nose.tools import eq_, ok_
from .factories import UserFactory
from ..serializers import CreateUserSerializer
class TestCreateUserSerializer(TestCase):
def setUp(self):
self.user_data = model_to_dict(UserFactory.build())
def test_serializer_with_empty_data(self):
serializer = CreateUserSerializer(data={})
eq_(serializer.is_valid(), False)
def test_serializer_with_valid_data(self):
serializer = CreateUserSerializer(data=self.user_data)
ok_(serializer.is_valid())
def test_serializer_hashes_password(self):
serializer = CreateUserSerializer(data=self.user_data)
ok_(serializer.is_valid())
user = serializer.save()
ok_(check_password(self.user_data.get('password'), user.password))
|
mysql_config = {
"user":"root",
"password":"g@KgUnHhcYUYu7.",
"host":"192.168.255.128",
"database":"data_speaker",
"port":"33305"
}
vrchat_config = {
"id":"kalraidai",
"password":"tJt!424j=#L27m?",
}
|
'''
##############################################################################
# Copyright 2019 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
Created on Apr 4, 2017
@author: HWASSMAN
'''
import cherrypy
import json
import re
import argparse
import logging.handlers
import sys
import socket
from queryHandler.Query import Query
from queryHandler.QueryHandler import QueryHandler2 as QueryHandler
from queryHandler.Topo import Topo
from queryHandler import SensorConfig
from collections import defaultdict
from timeit import default_timer as timer
ERR = {
400:'Bad Request',
404:'Not Found',
500:'Internal Server Error. Please check logs for more details.'}
MSG = {
'IntError':'Server internal error occurred. Reason: {}',
'sysStart':'Initial cherryPy server engine start have been invoked. Python version: {}, cherryPy version: {}.',
'MissingParm':'Missing mandatory parameters, quitting',
'CollectorErr':'Failed to initialize connection to pmcollector, quitting',
'MetaError':'Metadata could not be retrieved. Check log file for more details, quitting',
'MetaSuccess': 'Successfully retrieved MetaData',
'QueryError':'Query request could not be proceed. Reason: {}',
'SearchErr': 'Search for {} did cause exception: {}',
'LookupErr':'Lookup for metric {} did not return any results',
'FilterByErr':'No component entry found for the specified \'filterby\' attribute',
'GroupByErr': 'In the current setup the group aggregation \'groupby\' is not possible.',
'MetricErr':'Metric {0} cannot be found. Please check if the corresponding sensor is configured',
'InconsistentParams':'Received parameters {} inconsistent with request parameters {}',
'SensorDisabled':'Sensor for metric {} is disabled',
'NoData':'Empty results received', #Please check the pmcollector is properly configured and running.
'BucketsizeChange':'Based on requested downsample value: {} the bucketsize will be set: {}',
'BucketsizeToPeriod':'Bucketsize will be set to sensors period: {}',
'ReceivedQuery':'Received query request for query:{}, start:{}, end:{}',
'RunQuery':'Execute zimon query: {}',
'AttrNotValid':'Invalid attribute:{}',
'AllowedAttrValues': 'For attribute {} applicable values:{}',
'ReceivAttrValues': 'Received {}:{}',
'TimerInfo':'Processing {} took {} seconds',
'Query2port':'For better bridge performance multithreaded port {} will be used'
}
class MetadataHandler():
def __init__(self, logger, server, port=9084):
self.__qh = None
self.__sensorsConf = None
self.__metaData = None
self.logger = logger
self.server = server
self.port = port
self.__initializeTables()
@property
def qh(self):
if not self.__qh:
self.__qh = QueryHandler(self.server, self.port, self.logger)
return self.__qh
@property
def SensorsConfig(self):
if not self.__sensorsConf or len(self.__sensorsConf) == 0:
self.__sensorsConf = SensorConfig.readSensorsConfigFromMMSDRFS(self.logger)
return self.__sensorsConf
@property
def metaData(self):
return self.__metaData
def __initializeTables(self):
'''Read the topology from ZIMon and (re-)construct
the tables for metrics, keys, key elements (tag keys)
and key values (tag values)'''
self.__qh = QueryHandler(self.server, self.port, self.logger)
self.__sensorsConf = SensorConfig.readSensorsConfigFromMMSDRFS(self.logger)
tstart = timer()
self.__metaData = Topo(self.qh.getTopology())
tend = timer()
if not (self.metaData and self.metaData.topo):
raise ValueError(MSG['NoData'])
foundItems = len(self.metaData.allParents) -1
sensors = self.metaData.sensorsSpec.keys()
self.logger.info(MSG['MetaSuccess'])
self.logger.info(MSG['ReceivAttrValues'].format('parents totally', foundItems))
self.logger.debug(MSG['ReceivAttrValues'].format('parents', ", ".join(self.metaData.allParents)))
self.logger.info(MSG['ReceivAttrValues'].format('sensors', ", ".join(sensors)))
self.logger.info(MSG['TimerInfo'].format('Metadata',str(tend-tstart)))
def update(self):
'''Read the topology from ZIMon and update
the tables for metrics, keys, key elements (tag keys)
and key values (tag values)'''
tstart = timer()
self.__metaData = Topo(self.qh.getTopology())
tend = timer()
if not (self.metaData and self.metaData.topo):
self.logger.error(MSG['NoData']) # Please check the pmcollector is properly configured and running.
raise cherrypy.HTTPError(404, MSG[404])
self.logger.info(MSG['MetaSuccess'])
self.logger.debug(MSG['ReceivAttrValues'].format('parents', ", ".join(self.metaData.allParents)))
self.logger.debug(MSG['TimerInfo'].format('Metadata', str(tend-tstart)))
return({'msg':MSG['MetaSuccess']})
class GetHandler(object):
exposed = True
def __init__(self, logger, mdHandler):
self.logger = logger
self.__md = mdHandler
@property
def md(self):
return self.__md
@property
def qh(self):
return self.__md.qh
@property
def TOPO(self):
return self.__md.metaData
def __getSuggest(self, params):
resp = []
if params.get('q'):
searchStr = params['q'].strip()
# if '*' and tagv, then it denotes a grouping key value: do not process
if not(searchStr == '*' and params['type'] == 'tagv'):
# Since grafana sends the candidate string quickly, one character at a time, it
# is likely that the reg exp compilation will fail.
try:
regex = re.compile("^" + searchStr + ".*")
except re.error:
self.logger.debug(MSG['SearchErr'].format(searchStr, str(re.error)))
regex = None # failed to compile, return empty response
if regex:
try:
if params['type'] == 'metrics':
resp = sorted([m.group(0) for item in self.TOPO.getAllEnabledMetricsNames for m in [regex.search(item)] if m])
elif params['type'] == 'tagk':
resp = sorted([m.group(0) for item in self.TOPO.getAllAvailableTagNames for m in [regex.search(item)] if m])
elif params['type'] == 'tagv':
resp = sorted([m.group(0) for item in self.TOPO.getAllAvailableTagValues for m in [regex.search(item)] if m])
except Exception as e:
self.logger.exception(MSG['IntError'].format(str(e)))
raise cherrypy.HTTPError(500, ERR[500])
return resp
def __getLookup(self, params):
if params.get('m'):
try:
params_list = re.split(r'\{(.*)\}', params['m'].strip())
searchMetric = params_list[0]
if searchMetric and str(searchMetric).strip() not in self.TOPO.getAllEnabledMetricsNames:
self.logger.debug(MSG['LookupErr'].format(searchMetric))
return {}
else:
filterBy = None
if len(params_list) > 1:
attr = params_list[1]
filterBy = dict(x.split('=') for x in attr.split(','))
identifiersMap = self.TOPO.getIdentifiersMapForQueryAttr('metric', searchMetric, filterBy)
res = LookupResultObj(searchMetric)
res.parseResultTags(identifiersMap)
res.parseRequestTags(filterBy)
resp = res.__dict__
except Exception as e:
self.logger.exception(MSG['IntError'].format(str(e)))
raise cherrypy.HTTPError(500, MSG[500])
return resp
@cherrypy.tools.json_out()
def GET(self, **params):
'''Handle partial URLs such as /api/suggest?q=cpu_&type=metrics
where type is one of metrics, tagk or tagv
or
Handle /api/search/lookup/m=cpu_idle{node=*}
where m is the metric and optional term { tagk = tagv } qualifies the lookup.
For more details please check openTSDB API (version 2.2 and higher) documentation for
/api/lookup
/api/search/lookup
'''
resp =[]
# /api/suggest
if 'suggest' in cherrypy.request.script_name:
resp = self.__getSuggest(params)
# /api/search/lookup
elif 'lookup' in cherrypy.request.script_name:
resp = self.__getLookup(params)
# /api/update
elif 'update' in cherrypy.request.script_name:
resp = self.md.update()
elif 'aggregators' in cherrypy.request.script_name:
resp = [ "noop", "sum", "avg", "max", "min", "rate"]
elif 'config/filters' in cherrypy.request.script_name:
supportedFilters = {}
filterDesc = {}
filterDesc['description'] = '''Accepts an exact value or a regular expressions and matches against
values for the given tag. The value can be omitted if the filter is used to specify groupBy on the tag only.'''
filterDesc['examples'] = '''node=pm_filter(machine1), node=pm_filter(machine[1-6]), node=pm_filter(m1|m2),
node=pm_filter(mac.*), node=pm_filter((?!^z).*)'''
supportedFilters['pm_filter'] = filterDesc
resp = supportedFilters
del cherrypy.response.headers['Allow']
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
return resp
class PostHandler(object):
exposed = True
def __init__(self, logger, mdHandler):
self.logger = logger
self.__md = mdHandler
@property
def qh(self):
return self.__md.qh
@property
def sensorsConf(self):
return self.__md.SensorsConfig
@property
def TOPO(self):
return self.__md.metaData
def _getTimeMultiplier(self, timeunit):
'''Translate OpenTSDB time units, ignoring ms (milliseconds)'''
return {
's' : 1,
'm' : 60,
'h' : 3600,
'd' : 86400,
'w' : 604800,
'n' : 2628000,
'y' : 31536000,
}.get(timeunit, -1)
def _retrieveData(self, query, dsOp=None, dsInterval=None):
'''Executes zimon query and returns results'''
self.logger.info(MSG['RunQuery'].format(query))
tstart = timer()
res = self.qh.runQuery(query)
tend = timer()
self.logger.info(MSG['TimerInfo'].format('runQuery: \"'+ str(query)+'\"', str(tend-tstart)))
if res == None:
return
self.logger.info("res.rows length: {}".format(len(res.rows)))
rows = res.rows
if dsOp and dsInterval and len(res.rows) > 1:
rows = res.downsampleResults(dsInterval, dsOp)
columnValues = defaultdict(dict)
for row in rows:
for value, columnInfo in zip(row.values, res.columnInfos):
columnValues[columnInfo][row.tstamp] = value
return columnValues
def _validateQueryFilters(self, metricName, query):
notValid = False
# check filterBy settings
if query.filters:
filterBy = dict(x.split('=') for x in query.filters)
identifiersMap = self.TOPO.getIdentifiersMapForQueryAttr('metric', metricName, filterBy)
if not identifiersMap:
self.logger.error(MSG['FilterByErr'])
return (notValid, MSG['AttrNotValid'].format('filter'))
# check groupBy settings
if query.groupby:
filter_keys = self.TOPO.getAllFilterKeysForMetric(metricName)
if not filter_keys:
self.logger.error(MSG['GroupByErr'])
return (notValid, MSG['AttrNotValid'].format('filter'))
groupKeys = query.groupby
if not all(key in filter_keys for key in groupKeys):
self.logger.error(MSG['AttrNotValid'].format('groupBy'))
self.logger.error(MSG['ReceivAttrValues'].format('groupBy', ", ".join(filter_keys)))
return (notValid, MSG['AttrNotValid'].format('filter'))
return (True, '')
def _createZimonQuery(self, q, start, end):
'''Creates zimon query string '''
query = Query()
query.normalize_rates = False
bucketSize = 1 # default
inMetric = q.get('metric')
if not inMetric in self.TOPO.getAllEnabledMetricsNames:
self.logger.error(MSG['MetricErr'].format(inMetric))
raise cherrypy.HTTPError(404, MSG['MetricErr'].format(inMetric))
else:
self.logger.info(MSG['ReceivedQuery'].format(str(q), str(start), str(end)))
# add tagName or metric using the same method. There is no 'NOOP' option in openTSDB
query.addMetric(inMetric, q.get('aggregator'))
if q.get('filters'):
try:
for f in q.get('filters'):
tagk = f.get('tagk')
if tagk:
if f.get('groupBy'):
query.addGroupByMetric(tagk)
if f.get('filter'):
query.addFilter(tagk, f.get('filter'))
except ValueError as e:
self.logger.error(MSG['QueryError'].format(str(e)))
raise cherrypy.HTTPError(400, MSG['QueryError'].format(str(e)))
# set time bounds
if end:
query.setTime(str(int(int(start) / 1000)),
str(int(int(end) / 1000)))
else:
query.setTime(str(int(int(start) / 1000)), '')
# set bucket size
bucketSize = self._getSensorPeriod(inMetric)
if bucketSize < 1 :
self.logger.error(MSG['SensorDisabled'].format(inMetric))
raise cherrypy.HTTPError(400, MSG['SensorDisabled'].format(inMetric))
dsOp=dsBucketSize=dsInterval=None
if q.get('downsample'):
dsOp = self._get_downsmplOp(q.get('downsample'))
dsBucketSize = self._calc_bucketSize(q.get('downsample'))
if not dsOp and dsBucketSize > bucketSize:
bucketSize = dsBucketSize
self.logger.info(MSG['BucketsizeChange'].format(q.get('downsample'), bucketSize))
elif dsBucketSize <= bucketSize:
dsOp=dsInterval=None
else :
dsInterval = int(dsBucketSize/bucketSize)
else:
self.logger.info(MSG['BucketsizeToPeriod'].format(bucketSize))
query.setBucketSize(bucketSize)
return query, dsOp, dsInterval
def _formatQueryResponse(self, inputQuery, results, showQuery=False, globalAnnotations=False):
resList = []
for columnInfo, dps in results.items():
if columnInfo.name.find(inputQuery.get('metric')) == -1:
self.logger.error(MSG['InconsistentParams'].format(columnInfo.name, inputQuery.get('metric')))
raise cherrypy.HTTPError(500, MSG[500])
filtersMap = self.TOPO.getAllFilterMapsForMetric(columnInfo.keys[0].metric)
res = QueryResultObj(inputQuery, dps, showQuery, globalAnnotations)
res.parseTags(self.logger, filtersMap, columnInfo)
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
resList.append(res.__dict__)
return resList
def _calc_bucketSize(self, downsample):
bucketSize = 1 # default
bstr = downsample
if '-' in bstr:
x = re.split('(\d+)', bstr[:bstr.find('-')])
if len(x) == 3: # if not 3, then split failed
if x[1]: # there is a time value
if x[1].isdigit():
timeMultiplier = -1
if x[2]: # there is a unit
timeMultiplier = self._getTimeMultiplier(x[2])
if timeMultiplier == -1:
bucketSize = int(x[1])
else:
bucketSize = int(x[1]) * timeMultiplier
else: # no units
bucketSize = int(x[1])
return bucketSize
def _get_downsmplOp(self, downsample):
bstr = downsample
if '-' in bstr:
x = bstr.split('-')
if x[1] in ['avg','sum', 'max', 'min']:
return x[1]
return None
def _getSensorPeriod(self, metric):
bucketSize = 0
sensor = self.TOPO.getSensorForMetric(metric)
if not sensor:
self.logger.error(MSG['MetricErr'].format(metric))
raise cherrypy.HTTPError(404, MSG['MetricErr'].format(metric))
elif sensor in ('GPFSPoolCap', 'GPFSInodeCap'):
sensor = 'GPFSDiskCap'
for sensorAttr in self.sensorsConf:
if sensorAttr['name'] == str('\"%s\"' % sensor):
bucketSize = int(sensorAttr['period'])
return bucketSize
@cherrypy.config(**{'tools.json_in.force' : False})
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def POST(self):
''' Process POST. tools.json_in.force is set to False for
compatability between versions of grafana < 3 and version 3.'''
# read query request parameters
jreq = cherrypy.request.json
_resp = []
if jreq.get('queries') == None:
self.logger.error(MSG['QueryError'].format('empty'))
raise cherrypy.HTTPError(400, MSG[400])
# A request query can include more than one sub query and any mixture of the two types
# For more details please check openTSDB API (version 2.2 and higher) documentation for
# /api/query
for i, q in enumerate(jreq.get('queries')):
q['index']=i
query, dsOp, dsInterval = self._createZimonQuery(q, jreq.get('start'), jreq.get('end'))
if self.logger.level == logging.DEBUG:
(valid, msg) = self._validateQueryFilters(q.get('metric'), query)
if not valid:
raise cherrypy.HTTPError(400, msg)
columnValues = self._retrieveData(query, dsOp, dsInterval)
if columnValues == None:
self.logger.debug(MSG['NoData'])
if len(jreq.get('queries')) == 1:
raise cherrypy.HTTPError(404, ERR[404])
else:
continue
res = self._formatQueryResponse(q, columnValues, jreq.get('showQuery'), jreq.get('globalAnnotations'))
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
_resp.extend(res)
return _resp
def OPTIONS(self):
# print('options_post')
del cherrypy.response.headers['Allow']
cherrypy.response.headers['Access-Control-Allow-Methods'] = 'GET, POST, NEW, OPTIONS'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Accept'
cherrypy.response.headers['Access-Control-Max-Age'] = 604800
class LookupResultObj():
def __init__(self, metric):
self.type = "LOOKUP"
self.metric = metric
self.tags = []
self.results = []
def parseRequestTags(self, filtersDict):
if filtersDict:
for key, value in filtersDict.items():
tmp = {'key':key, 'value' : value}
self.tags.append(tmp)
def parseResultTags(self, identifiersMap):
if identifiersMap:
for identifiers in identifiersMap:
d = defaultdict(dict)
for key in identifiers.keys():
d['tags'][key]= identifiers[key]
if d not in self.results:
self.results.append(d)
class QueryResultObj():
def __init__(self, inputQuery, dps, showQuery=False, globalAnnotations=False):
self.metric = inputQuery.get('metric')
self.dps = dps
if showQuery:
self.query = inputQuery
if globalAnnotations:
self.globalAnnotations = []
self.tags = defaultdict(list)
self.aggregatedTags = []
def parseTags(self, logger, filtersMap, columnInfo):
tagsDict = defaultdict(list)
for key in columnInfo.keys:
ident = [key.parent]
ident.extend(key.identifier)
logger.debug(MSG['ReceivAttrValues'].format('Single ts identifiers', ', '.join(ident)))
for filtersDict in filtersMap:
if all((value in filtersDict.values()) for value in ident):
logger.debug(MSG['ReceivAttrValues'].format('filtersKeys',', '.join(filtersDict.keys())))
if len(columnInfo.keys) == 1:
self.tags = filtersDict
else:
for _key, _value in filtersDict.items():
tagsDict[_key].append(_value)
for _key, _values in tagsDict.items():
if len(set(_values)) > 1:
self.aggregatedTags.append(_key)
else:
self.tags[_key] = _values[0]
def processFormJSON(entity):
''' Used to generate JSON when the content
is of type application/x-www-form-urlencoded. Added for grafana 3 support'''
body = entity.fp.read()
if len(body) > 0:
cherrypy.serving.request.json = json.loads(body.decode('utf-8'))
else:
cherrypy.serving.request.json = json.loads('{}')
def configureLogging(logfile, loglevel):
# prepare the logger
logger = logging.getLogger('zimonGrafanaIntf')
rfhandler = logging.handlers.RotatingFileHandler(logfile, 'a', 1000000, 5) # 5 x 1M files
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
rfhandler.setFormatter(formatter)
logger.addHandler(rfhandler)
try:
logger.setLevel(loglevel)
except (ValueError, TypeError):
logger.setLevel(logging.INFO)
logger.propagate = False # prevent propagation to default (console) logger
return logger
def validateCollectorConf(args, logger):
if not (args.server == 'localhost') and not (args.server == '127.0.0.1'):
try:
s = socket.socket()
s.connect((args.server, args.serverPort))
finally:
s.close()
else:
#get queryport
foundPorts = SensorConfig.getCollectorPorts(logger)
if foundPorts and str(args.serverPort) not in foundPorts:
raise Exception("Invalid serverPort specified. Try with: %s" % str(foundPorts))
elif foundPorts[1] and not (args.serverPort == int(foundPorts[1])):
args.serverPort = int(foundPorts[1])
logger.info(MSG['Query2port'].format(args.serverPort))
def updateCherrypyConf(args):
globalConfig = {
'global' : {
'server.socket_host': '0.0.0.0',
'server.socket_port' : args.port,
'server.socket_timeout' : 60, # increase timeout to 60s
'request.show_tracebacks' : False,
'log.screen' : False, # turn off logging to console
'log.access_file': "cherrypy_access.log",
'log.error_file': "cherrypy_error.log",
'tools.encode.on' : True,
'tools.encode.encoding' : 'utf-8'}}
cherrypy.config.update(globalConfig)
if args.port == 8443:
sslConfig = {
'global' : {
'server.ssl_module' : 'builtin',
'server.ssl_certificate' : args.keyPath + "/cert.pem",
'server.ssl_private_key' : args.keyPath + "/privkey.pem" }}
cherrypy.config.update(sslConfig)
def main(argv):
# parse input arguments
parser = argparse.ArgumentParser('python zimonGrafanaIntf.py')
parser.add_argument('-s', '--server', action="store", default='localhost',
help='Host name or ip address of the ZIMon collector (Default: 127.0.0.1) \
NOTE: Per default ZIMon does not accept queries from remote machines. \
To run the bridge from outside of the ZIMon collector, you need to modify ZIMon queryinterface settings (\'ZIMonCollector.cfg\')')
parser.add_argument('-P', '--serverPort', action="store", type=int, default=9084, help='ZIMon collector port number (Default: 9084)')
parser.add_argument('-l', '--logFile', action="store", default="./zserver.log", help='location of the log file (Default: ./zserver.log')
parser.add_argument('-c', '--logLevel', action="store", type=int, default=logging.INFO, help='log level 10 (DEBUG), 20 (INFO), 30 (WARN), 40 (ERROR) (Default: 20)')
parser.add_argument('-p', '--port', action="store", type=int, default=4242, help='port number to listen on (Default: 4242)')
parser.add_argument('-k', '--keyPath', action="store", help='Directory path of privkey.pem and cert.pem file location(Required only for HTTPS port 8443)')
args = parser.parse_args(argv)
if args.port == 8443 and not args.keyPath:
print(MSG['MissingParm'])
return
# prepare the logger
logger = configureLogging(args.logFile, args.logLevel)
logger.info('zimonGrafanaItf invoked with parameters:%s', str(args))
#prepare cherrypy server configuration
updateCherrypyConf(args)
# prepare metadata
try:
validateCollectorConf(args, logger)
mdHandler = MetadataHandler(logger, args.server, args.serverPort)
print(MSG['MetaSuccess'])
print(MSG['ReceivAttrValues'].format('sensors',"\n\n"+ "\t".join(mdHandler.metaData.sensorsSpec.keys())))
except (AttributeError, ValueError, TypeError) as e:
logger.exception('%s', MSG['IntError'].format(str(e)))
print(MSG['MetaError'])
return
except (Exception, IOError) as e:
logger.exception('%s', MSG['IntError'].format(str(e)))
print(MSG['CollectorErr'])
return
ph = PostHandler(logger, mdHandler)
cherrypy.tree.mount(ph, '/api/query',
{'/':
{'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'request.body.processors' : { 'application/x-www-form-urlencoded': processFormJSON }
}
}
)
gh = GetHandler(logger, mdHandler)
# query for metric name (openTSDB: zimon extension returns keys as well)
cherrypy.tree.mount(gh, '/api/suggest',
{'/':
{'request.dispatch': cherrypy.dispatch.MethodDispatcher()}
}
)
# query for tag name and value, given a metric (openTSDB)
cherrypy.tree.mount(gh, '/api/search/lookup',
{'/':
{'request.dispatch': cherrypy.dispatch.MethodDispatcher()}
}
)
# query to force update of metadata (zimon feature)
cherrypy.tree.mount(gh, '/api/update',
{'/':
{'request.dispatch': cherrypy.dispatch.MethodDispatcher()}
}
)
# query for list of aggregators (openTSDB)
cherrypy.tree.mount(gh, '/api/aggregators',
{'/':
{'request.dispatch': cherrypy.dispatch.MethodDispatcher()}
}
)
# query for list of filters (openTSDB)
cherrypy.tree.mount(gh, '/api/config/filters',
{'/':
{'request.dispatch': cherrypy.dispatch.MethodDispatcher()}
}
)
print(MSG['sysStart'].format(sys.version, cherrypy.__version__))
logger.info("%s", MSG['sysStart'].format(sys.version, cherrypy.__version__))
try:
cherrypy.engine.start()
print("server started")
cherrypy.engine.block()
except TypeError:
#msg = "server stopping, please check logs for more details"
print("Server request could not be proceed. Reason:")
raise cherrypy.HTTPError(500, ERR[500])
except IOError:
#msg = "server stopping, please check logs for more details"
print("STOPPING: Server request could not be proceed. Reason:")
cherrypy.engine.stop()
cherrypy.engine.exit()
ph = None
gh = None
print("server stopped")
if __name__ == '__main__':
main(sys.argv[1:])
|
from cfgparser.packet import Packet
from runner.runner import Runner
from cfgparser.iptables import ParseIPTables
from cfgparser.ipaddrs import ParseIPAddrs
from cfgparser.iproutes import ParseIPRoutes
from cfgparser.ipsets import ParseIPSets
from colorama import init
from prompt_toolkit.completion import WordCompleter, merge_completers
from prompt_toolkit.shortcuts import CompleteStyle, prompt
from path_completer import PathCompleter
from prompt_toolkit import PromptSession
cfg = {
"iptables": None,
"ipaddrs": None,
"iproutes": None,
"ipsets": None,
"non_local_ip": None,
"packet": Packet(),
}
def bottom_statusbar():
text = "iptables: {}, ipaddrs: {}, iproutes: {}, ipsets: {} | {}:{} => {}:{}".format(
"loaded" if cfg["iptables"] else "missing",
"loaded" if cfg["ipaddrs"] else "missing",
"loaded" if cfg["iproutes"] else "missing",
"loaded" if cfg["ipsets"] else "missing",
cfg["packet"].source, cfg["packet"].sport,
cfg["packet"].dest, cfg["packet"].dport,
)
return text
cmd_completer = WordCompleter([
'load-dir', 'load-iptables', 'load-ipaddrs', 'load-iproutes', 'load-ipsets',
'set-source', 'set-dest', 'set-sport', 'set-dport',
'run-incomming-packet', 'run-localgen-packet', 'exit',
], ignore_case=True)
session = PromptSession()
if __name__ == '__main__':
init(autoreset=True)
while True:
input = session.prompt('> ', bottom_toolbar=bottom_statusbar, completer=merge_completers([PathCompleter(min_input_len=1), cmd_completer]))
if not input.split():
continue
if input.split()[0] == "exit":
break
if input.split()[0] == "load-localhost":
print("not implemented")
continue
if input.split()[0] == "load-dir":
try:
cfg["iptables"] = ParseIPTables(input.split()[1] + r"/iptables.txt")
cfg["ipaddrs"], cfg["non_local_ip"] = ParseIPAddrs(input.split()[1] + r"/ipaddrs.txt")
cfg["iproutes"] = ParseIPRoutes(input.split()[1] + r"/iproutes.txt")
cfg["ipsets"] = ParseIPSets(input.split()[1] + r"/ipsets.txt")
except Exception as e:
print("load config failed. " + str(e))
continue
if input.split()[0] == "load-iptables":
try:
cfg["iptables"] = ParseIPTables(input.split()[1])
except Exception as e:
print("load iptables failed. " + str(e))
continue
if input.split()[0] == "load-ipaddrs":
try:
cfg["ipaddrs"], cfg["non_local_ip"] = ParseIPAddrs(input.split()[1])
except Exception as e:
print("load ipaddrs failed. " + str(e))
continue
if input.split()[0] == "load-iproutes":
try:
cfg["iproutes"] = ParseIPRoutes(input.split()[1])
except Exception as e:
print("load iproutes failed. " + str(e))
continue
if input.split()[0] == "load-ipsets":
try:
cfg["ipsets"] = ParseIPSets(input.split()[1])
except Exception as e:
print("load ipsets failed. " + str(e))
continue
if input.split()[0] == "set-source":
try:
cfg["packet"].set_source(input.split()[1])
except Exception as e:
print("set source ip failed. " + str(e))
continue
if input.split()[0] == "set-dest":
try:
cfg["packet"].set_dest(input.split()[1])
except Exception as e:
print("set dest ip failed. " + str(e))
continue
if input.split()[0] == "set-sport":
try:
cfg["packet"].sport = int(input.split()[1])
except Exception as e:
print("set source port failed. " + str(e))
continue
if input.split()[0] == "set-dport":
try:
cfg["packet"].dport = int(input.split()[1])
except Exception as e:
print("set dest port failed. " + str(e))
continue
if input.split()[0] == "run-incomming-packet":
if (not cfg["iptables"]) or (not cfg["ipaddrs"]) or (not cfg["iproutes"]) or (not cfg["ipsets"]):
print("please run 'load' cmds to load configurations.")
continue
Runner(cfg["ipaddrs"], cfg["non_local_ip"], cfg["iproutes"], cfg["ipsets"], cfg["iptables"]).RunIncommingPacket(cfg["packet"])
continue
if input.split()[0] == "run-localgen-packet":
if (not cfg["iptables"]) or (not cfg["ipaddrs"]) or (not cfg["iproutes"]) or (not cfg["ipsets"]):
print("please run 'load' cmds to load configurations.")
continue
Runner(cfg["ipaddrs"], cfg["non_local_ip"], cfg["iproutes"], cfg["ipsets"], cfg["iptables"]).RunLocalGenPacket(cfg["packet"])
continue
# p.set_source("192.168.199.10").set_dest("192.168.199.14").dport = 2379
# p.iface = "cali30b5015dbf7"
# p.set_source("192.16.1.51").set_dest("192.16.1.29").dport = 2379
# p.set_source("192.16.1.51").set_dest("10.254.0.1").dport = 443
# Runner(addrs, non_local_ip, routes, sets, tables).RunIncommingPacket(p)
# answer = prompt('Give me some input: ', bottom_toolbar=bottom_statusbar)
# print('You said: %s' % answer)
# p.set_dest("192.16.1.51").dport = 2379
# Runner(cfg["ipaddrs"], cfg["non_local_ip"], cfg["iproutes"], cfg["ipsets"], cfg["iptables"]).RunLocalGenPacket(p)
|
"""
Publish/Subscribe tool
@author Paul Woods <paul@skytruth.org>
"""
import webapp2
from google.appengine.ext import db
from google.appengine.api import taskqueue
import json
import urllib2
import os
from taskqueue import TaskQueue
class Subscription (db.Model):
event = db.StringProperty()
url = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
data = db.StringProperty() #this contains arbitrary content encoded as json
class PubSub ():
@staticmethod
def publish (event, pub_data = None):
"""Publish an event. This will trigger all subscriptions with a matching event
Args:
event name of the event
pub_data optional dict of params to be passed to the subscriber
Returns:
the number of subscriptions that were triggered
"""
#Get all subscriptions with a matching event
count = 0
q = db.GqlQuery("SELECT __key__ FROM Subscription WHERE event = :1", event)
for key in q.run():
# add a push-queue entry for each notification
taskqueue.add(url='/pubsub/notify', params={'key':str(key), 'event': event, 'pub_data': json.dumps(pub_data)})
count = count + 1
#return the number of subscriptions triggered
return count
@staticmethod
def subscribe (event, url, sub_data = None):
"""Subscribe to an event.
Args:
event name of the event to subscribe to
url url to receive a POST when the specified event is published
sub_data optional dict of params to be passed to the subscriber. This can be used to contain
a 'secret' key that will identify the post as coming from this source
Returns:
a subscription id
"""
sub = Subscription(event=event, url=url, data=json.dumps(sub_data))
return str(sub.put())
@staticmethod
def unsubscribe (key):
""" Remove an existing subscription.
Args:
key A subscrption key previously returned by a call to subscribe
Returns:
True if the subscription was removed, False if it was not found
"""
sub = Subscription.get(db.Key(encoded=key))
if sub:
sub.delete()
return sub is not None
@staticmethod
def notify (key, event, pub_data):
"""Send notification to the specified subscription.
"""
sub = Subscription.get(db.Key(encoded=key))
if not sub:
return None
data = {
'key' : key,
'event': event,
'pub_data': pub_data,
'sub_data': json.loads(sub.data)
}
if sub.url.startswith ('/'):
#Handle local urls through the task queue
taskqueue.add(
url=sub.url,
headers = {'Content-Type':'application/json'},
payload=json.dumps(data))
else:
#for external urls use urllib2
req = urllib2.Request(sub.url)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(data))
#handler for notify - called once for each subscription that is triggered by a published event
class NotifyHandler(webapp2.RequestHandler):
"""Handler for pubsub notifications.
This gets called from the taskqueue by tasks added by PubSub.publish()
"""
def post(self):
self.response.headers.add_header('Content-Type', 'application/json')
r = {'status': 'OK'}
try:
PubSub.notify(
key = self.request.get('key'),
event = self.request.get('event'),
pub_data = json.loads(self.request.get('pub_data')))
except Exception, e:
r['status'] = 'ERR'
r['message'] = str(e)
self.response.write(json.dumps( r ))
#creates a new task in the task queue
class TaskHandler(webapp2.RequestHandler):
"""This handler acts as a subscribe target and creates a new task in the task queue
subdata.channel specifies the task queue channel
subdata.taskname specifies the name to be assigned to the task
OR
subdata.pubname specifies the field name in pub data to use for the task name
"""
def post(self):
self.response.headers.add_header('Content-Type', 'application/json')
r = {'status': 'OK'}
try:
data = json.loads(self.request.body)
channel = data['sub_data']['channel']
name = data['sub_data'].get('taskname')
if not name:
name = data['pub_data'].get(data['sub_data'].get('pubname'))
r['id'] = TaskQueue.add (channel, name, data)
except Exception, e:
r['status'] = 'ERR'
r['message'] = str(e)
self.response.write(json.dumps( r ))
class TestHandler(webapp2.RequestHandler):
"""This handler expects a json POST, and it returns same json it receives. Used for testing."""
def post(self):
self.response.headers.add_header('Content-Type', self.request.headers['Content-Type'])
self.response.write (self.request.body)
app = webapp2.WSGIApplication([
('/pubsub/notify', NotifyHandler),
('/pubsub/test', TestHandler),
('/pubsub/task', TaskHandler)
],debug=True)
|
from fixture.db import DbFixture
from fixture.orm import ORMFixture
from fixture.group import Group
# db1 = DbFixture(host="127.0.0.1", name="addressbook", user="root", password="")
#
# try:
# contacts = db1.get_contact_list()
# for contact in contacts:
# print(contact)
# print(len(contacts))
# finally:
# db1.destroy()
db2 = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
try:
l = db2.get_contacts_in_group(Group(id="332"))
for item in l:
print(item)
print(len(l))
finally:
pass
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2009 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header
import csv
import sys
csv.register_dialect('escaped',
escapechar='\\',
doublequote=False,
quoting=csv.QUOTE_NONE,
)
csv.register_dialect('singlequote',
quotechar="'",
quoting=csv.QUOTE_ALL,
)
quoting_modes = {
getattr(csv, n): n
for n in dir(csv)
if n.startswith('QUOTE_')
}
TEMPLATE = '''\
Dialect: "{name}"
delimiter = {dl!r:<6} skipinitialspace = {si!r}
doublequote = {dq!r:<6} quoting = {qu}
quotechar = {qc!r:<6} lineterminator = {lt!r}
escapechar = {ec!r:<6}
'''
for name in sorted(csv.list_dialects()):
dialect = csv.get_dialect(name)
print(TEMPLATE.format(
name=name,
dl=dialect.delimiter,
si=dialect.skipinitialspace,
dq=dialect.doublequote,
qu=quoting_modes[dialect.quoting],
qc=dialect.quotechar,
lt=dialect.lineterminator,
ec=dialect.escapechar,
))
writer = csv.writer(sys.stdout, dialect=dialect)
writer.writerow(
('col1', 1, '10/01/2010',
'Special chars: " \' {} to parse'.format(
dialect.delimiter))
)
print()
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/msg/Num.msg;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/msg/TestChar.msg;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/msg/TestDurationArray.msg;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/msg/TestHeaderArray.msg;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/msg/TestHeader.msg;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/msg/TestHeaderTwo.msg;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/msg/TestTimeArray.msg;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/msg/TestUInt8.msg;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/msg/TestUInt8FixedSizeArray16.msg"
services_str = "/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/srv/AddTwoInts.srv;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/srv/SendBytes.srv;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/srv/TestArrayRequest.srv;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/srv/TestEmpty.srv;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/srv/TestMultipleRequestFields.srv;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/srv/TestMultipleResponseFields.srv;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/srv/TestNestedService.srv;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/srv/TestRequestAndResponse.srv;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/srv/TestRequestOnly.srv;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/srv/TestResponseOnly.srv"
pkg_name = "rosbridge_library"
dependencies_str = "std_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "rosbridge_library;/home/xtark/ros_ws/src/third_packages/rosbridge_suite/rosbridge_library/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = 'TRUE' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
import pytest
from unify_idents.engine_parsers.base_parser import BaseParser
def test_uninitialized_parser_compatiblity_is_false():
input_file = (
pytest._test_path / "data" / "test_Creinhardtii_QE_pH11_xtandem_alanine.xml"
)
compat = BaseParser.check_parser_compatibility(input_file)
assert compat is False
|
#!/bin/python3
# -*- coding: utf-8 -*-
import yaml
import json
import sys
import os
import pathlib
from jinja2 import Environment, FileSystemLoader
from pprint import pprint
def ensure_folder(folderTest):
"""[Check if folder exist and create it]
Args:
folderTest (string): [folder to test existance]
"""
file = pathlib.Path(folderTest)
if not file.exists ():
print ("File "+ folderTest +" not exist, creating it")
pathlib.Path(folderTest).mkdir(parents=True, exist_ok=True)
def main_menu(menuItem):
"""Main menu
Args:
menuItem ([list]): [Switch available in your json file]
Returns:
Just display list of switch
"""
os.system('clear')
while True:
try :
print('List of switch configuration available in your json file :')
for menuValue in menuItem:
print(str(menuItem.index(menuValue)) + " - " + menuValue)
print(" Ctrl + D to quit")
choice = int(input(" >> "))
return menuItem[int(choice)]
except ValueError:
print("Value is not a number")
except IndexError:
print("Value not available")
# Variables statiques
jinja_dir = 'jinja_templates'
vlan_json_file = 'json/vlan.json'
switch_description_json_file = 'json/switch_port_config.json'
switch_model_definition_file = 'json/switch_model.json'
#script path
script_dir = os.path.dirname(os.path.realpath(__file__))
#jinja template dir
jinja_template_dir = os.path.join(script_dir,jinja_dir)
# vlan json
# todo : make this variable may be?
vlanfile = os.path.join(script_dir,vlan_json_file)
#vlanfile = os.path.join(script_dir,'json/huawei.vlan.json')
# switch configuration json file
# todo : make this variable may be?
switchjsonfile = os.path.join(script_dir,switch_description_json_file)
#switchjsonfile = os.path.join(script_dir,'json/huawei.interfaces.json')
# switch model json file
# todo : make this variable may be?
switchmodeljsonfile = os.path.join(script_dir,switch_model_definition_file)
#create Jinja2 environment object and refer to templates directory
env = Environment(loader=FileSystemLoader(jinja_template_dir))
#portDict dictionaire qui contient le nommages des ports du switchs en fonction du modèle
portDict = {}
switchNameList = []
##changement des fichiers json
#fichier de la config des vlan
vlanconfig = json.loads(open(vlanfile).read())
#fichier db des switchs
switchmodeldb = json.loads(open(switchmodeljsonfile).read())
#fichier de la config des switch
configjson = json.loads(open(switchjsonfile).read())
#fin changement des fichiers json
#creation du menu deroulant pour la liste des switchs
for switch in configjson:
switchNameList.append(switch)
switchname = main_menu(sorted(switchNameList))
#check du folder puis creation du fichier de conf de sortie
ensure_folder(script_dir + "/output/")
configOutPutFile = script_dir + "/output/" + switchname + ".ios"
##initialisation des variables pour cree le fichier de config
#template pour crée le fichier de conf
template_file = configjson[switchname]['template']
template = env.get_template(template_file)
#hostname du switch
hostname = configjson[switchname]['hostname']
#domaine vlan utiliser par le switch
vlan_domain = configjson[switchname]['vlan_domain']
#vlans du domaine
vlans = vlanconfig[vlan_domain]['vlans']
#mgnt = configjson[switchname]["interfacemgnt"]
#dictionnaire des vlans
vlandict = { vlan["name"]: vlan["id"] for vlan in vlans}
#création du dictionnaire de description des ports qui compose le switchs
modulePortconfig = switchmodeldb.get(configjson[switchname]['model']).get('access_port')
for modPort in modulePortconfig.keys():
portStart = modulePortconfig[modPort]['start']
portEnd = modulePortconfig[modPort]['end']
portName = modulePortconfig[modPort]['name']
for portID in range(portStart, portEnd +1):
portDict[str(portID)] = portName + str(portID)
#config du port de management
if switchmodeldb.get(configjson[switchname]['model']).get('management_port').get('embedded'):
mgntFlag = True
mgntPort = switchmodeldb.get(configjson[switchname]['model']).get('management_port').get('name')
else:
mgntFlag = False
mgntPort = 'None'
# on crée la variable emptyPortDict qui va contenir les ports non configurés sur le switch pour les passer a shutdown
emptyPortDict = { emptyPort['port']: "null" for emptyPort in configjson[switchname].get('interfacephy') }
emptyPortDict = portDict.keys() - emptyPortDict.keys()
#hack sordide pour que les ports soit dans le bon ordre 10 avant 1, on les converti en int pour les envoyer a jinja2, pour etre dans le bon
# ordre, puis dans le template jinja2 cast en string
emptyPortDict = {int(x) for x in emptyPortDict}
emptyPortDict = sorted(emptyPortDict)
#emptyPortDict = {}
#uncomment next line for debug
#pprint(ma_var)
#sys.exit()
#on crée un json qui décrit les variables envoyés à Jinja
switchConfigTemplateVariable = {"vlans": vlans,
"hostname": hostname,
"configjson": configjson[switchname],
"vlandict": vlandict,
"portdict": portDict,
"emptyportdict": emptyPortDict,
"mgntflag": mgntFlag,
"mgntport": mgntPort
}
switchConfigGenerate = template.render(switchConfigTemplateVariable)
print(switchConfigGenerate, file=open(configOutPutFile,"w"))
print("config file created in : "+configOutPutFile)
|
#!/usr/bin/env python
import os
import sys
import subprocess
import json
import platform
import argparse
import mmap
import traceback
import ConfigParser
from distutils import dir_util
from utils.space_checker_utils import wget_wrapper
def create_directory(directory):
"""Create parent directories as necessary.
:param directory: (~str) Path of directory to be made.
:return: True - if directory is created, and False - if not.
"""
try:
os.makedirs(directory)
return True
except OSError:
# Checks if the folder is empty
if not os.listdir(directory):
return True
return False
def run_command(command):
"""Execute the provided shell command.
:param command: (~str) Linux shell command.
:return: True - if command executed, and False if not.
"""
Colorizer.normal("[*] Running following command")
Colorizer.info("%s" % command)
# If command is `wget`, then before execution, `wget_wrapper` checks whether there is enough disk space available
if not wget_wrapper(command):
return False
return os.system(command)
def owtf_last_commit():
"""Prints the local git repo's last commit hash."""
if os.path.exists(os.path.join(root_dir, '.git')):
command = 'git log -n 1 --pretty=format:"%H"'
commit_hash = os.popen(command).read()
return commit_hash
else:
return "*Not a git repository.*"
def check_sudo():
"""Checks if the user has sudo access."""
sudo = os.system("sudo -v")
if not sudo:
return
else:
Colorizer.warning("[!] Your user does not have sudo privileges. Some OWTF components require"
"sudo permissions to install")
sys.exit()
def install_in_directory(directory, command):
"""Execute a certain command while staying inside one directory.
:param directory: (~str) Path of directory in which installation command has to be executed.
:param command: (~str) Linux shell command (most likely `wget` here)
:return: True - if installation successful or directory already exists, and False if not.
"""
if create_directory(directory):
Colorizer.info("[*] Switching to %s" % directory)
os.chdir(directory)
return run_command(command)
else:
Colorizer.warning("[!] Directory %s already exists, so skipping installation for this" % directory)
return True
def install_using_pip(requirements_file):
"""Install pip libraries as mentioned in a requirements file.
:param requirements_file: (~str) Path to requirements file - in which libraries are listed.
:return: True - if installation successful, and False if not.
"""
# Instead of using file directly with pip which can crash because of single library
return run_command("pip2 install --upgrade -r %s" % requirements_file)
def install_restricted_from_cfg(config_file):
"""Install restricted tools and dependencies which are distro independent.
:param config_file: (~str) Path to configuration file having information about restricted content.
"""
cp = ConfigParser.ConfigParser({"RootDir": root_dir, "Pid": pid})
cp.read(config_file)
for section in cp.sections():
Colorizer.info("[*] Installing %s" % section)
install_in_directory(os.path.expanduser(cp.get(section, "directory")), cp.get(section, "command"))
def is_debian_derivative():
compatible_value = os.system("which apt-get >> /dev/null 2>&1")
if (compatible_value >> 8) == 1:
return False
else:
return True
def finish():
Colorizer.success("[*] Finished!")
Colorizer.info("[*] Run following command to start virtualenv: source ~/.%src; workon owtf"
% os.environ["SHELL"].split(os.sep)[-1])
Colorizer.info("[*] Start OWTF by running 'cd path/to/pentest/directory; ./path/to/owtf.py'")
def setup_virtualenv():
Colorizer.info("[*] Setting up virtual environment named owtf...")
# If /usr/local/bin/virtualenvwrapper.sh doesn't exist, create a symlink from /usr/bin/
if not os.path.isfile('/usr/local/bin/virtualenvwrapper.sh'):
run_command('sudo ln -s /usr/bin/virtualenvwrapper.sh /usr/local/bin/virtualenvwrapper.sh >/dev/null 2>&1;')
# sources files and commands
source = 'source /usr/local/bin/virtualenvwrapper.sh'
setup_env = 'cd $WORKON_HOME; virtualenv -q --always-copy --python=python2.7 owtf >/dev/null 2>&1;'\
' source owtf/bin/activate'
dump = '%s -c "import os, json;print json.dumps(dict(os.environ))"' % sys.executable
pipe = subprocess.Popen(['/bin/bash', '-c', '%s >/dev/null 2>&1; %s; %s' % (source, setup_env, dump)],
stdout=subprocess.PIPE)
env = json.loads(pipe.stdout.read())
# Update the os environment variable
os.environ.update(env)
try:
if os.path.join(os.environ["WORKON_HOME"], "owtf") == os.environ["VIRTUAL_ENV"]:
# Add source to shell config file only if not present
Colorizer.info("[*] Adding virtualenvwrapper source to shell config file")
shell_rc_path = os.path.join(os.environ["HOME"], ".%src" % os.environ["SHELL"].split(os.sep)[-1])
with open(shell_rc_path, "r") as f:
if mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ).find(source) == -1:
run_command("echo '%s' >> %s" % (source, shell_rc_path))
else:
Colorizer.info("[+] Source line already added to the $SHELL config ")
return True
except KeyError:
traceback.print_exc()
return False
def setup_pip():
# Installing pip
Colorizer.info("[*] Installing pip")
directory = "/tmp/owtf-install/pip/%s" % os.getpid()
command = 'command -v pip2 >/dev/null || { wget --user-agent="Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101'\
' Firefox/15.0" --tries=3 https://bootstrap.pypa.io/get-pip.py; sudo python get-pip.py;}'
install_in_directory(os.path.expanduser(directory), command)
Colorizer.info("[*] Installing required packages for pipsecure")
run_command("sudo pip2 install --upgrade pyopenssl ndg-httpsclient pyasn1")
# Installing virtualenv
Colorizer.info("[*] Installing virtualenv and virtualenvwrapper")
install_in_directory(os.path.expanduser(str(os.getpid())), "sudo pip2 install --upgrade virtualenv virtualenvwrapper")
def install(cmd_arguments):
"""Perform installation of OWTF Framework. Wraps around all helper methods made in this module.
:param cmd_arguments:
"""
args = parser.parse_args(cmd_arguments)
# User asked to select distro (in case it can't be automatically detected) and distro related stuff is installed
cp = ConfigParser.ConfigParser({"RootDir": root_dir, "Pid": pid})
cp.read(distros_cfg)
# Try get the distro automatically
distro, version, arch = platform.linux_distribution()
distro_num = 0
if "kali" in distro.lower():
distro_num = 1
elif is_debian_derivative():
distro_num = 2
if distro_num != 0:
Colorizer.info("[*] %s has been automatically detected... " % distro)
Colorizer.normal("[*] Continuing in auto-mode")
elif (distro_num == 0) and args.no_user_input:
Colorizer.info("[*] Cannot auto-detect a supported distro...")
Colorizer.normal("[*] Continuing in auto-mode with the core installation...")
else:
# Loop until proper input is received
while True:
print("")
for i, item in enumerate(cp.sections()):
Colorizer.warning("(%d) %s" % (i + 1, item))
Colorizer.warning("(0) My distro is not listed :( %s" % distro)
num_input = raw_input("Select a number based on your distribution : ")
try:
if int(num_input) <= len(cp.sections()):
distro_num = int(num_input)
break
else:
print("")
Colorizer.warning("[!] Invalid number - not a supported distro")
continue
except ValueError:
print("")
Colorizer.warning("[!] Invalid Number specified")
continue
# Install distro specific dependencies and packages needed for OWTF to work
if distro_num != 0:
run_command(cp.get(cp.sections()[int(distro_num) - 1], "install"))
else:
Colorizer.normal("[*] Skipping distro related installation :(")
# Installing pip and setting up virtualenv.
# This requires distro specific dependencies to be installed properly.
setup_pip()
if setup_virtualenv():
install_using_pip(owtf_pip)
else:
Colorizer.danger("Unable to setup virtualenv...")
Colorizer.danger("Skipping installation of OWTF python dependencies ...")
# Now install distro independent stuff - optional
# This is due to db config setup included in this. Should run only after PostgreSQL is installed.
# See https://github.com/owtf/owtf/issues/797.
install_restricted_from_cfg(restricted_cfg)
Colorizer.normal("[*] Upgrading pip to the latest version ...")
# Upgrade pip before install required libraries
run_command("pip2 install --upgrade pip")
Colorizer.normal("Upgrading setuptools to the latest version ...")
# Upgrade setuptools
run_command("pip2 install --upgrade setuptools")
Colorizer.normal("Upgrading cffi to the latest version ...")
# Mitigate cffi errors by upgrading it first
run_command("pip2 install --upgrade cffi")
class Colorizer:
"""Helper class for colorized strings.
Different statements will have different colors:
- `normal`, denoting ongoing procedure (WHITE)
- `info`, any file path, commit hash or any other info (BLUE)
- `warning`, any potential hindrance in installation (YELLOW)
- `danger`, abrupt failure, desired file/dir not found etc. (RED)
"""
BOLD = '\033[1m'
RED = BOLD + '\033[91m'
GREEN = BOLD + '\033[92m'
YELLOW = BOLD + '\033[93m'
BLUE = BOLD + '\033[34m'
PURPLE = BOLD + '\033[95m'
CYAN = BOLD + '\033[36m'
WHITE = BOLD + '\033[37m'
END = '\033[0m\033[0m'
def __init__(self):
pass
@classmethod
def normal(cls, string):
print(cls.WHITE + string + cls.END)
@classmethod
def info(cls, string):
print(cls.CYAN + string + cls.END)
@classmethod
def warning(cls, string):
print(cls.YELLOW + string + cls.END)
@classmethod
def success(cls, string):
print(cls.GREEN + string + cls.END)
@classmethod
def danger(cls, string):
print(cls.RED + string + cls.END)
if __name__ == "__main__":
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
pid = os.getpid()
# Path to custom scripts for tasks such as setting up/ running PostgreSQL db, run arachni, nikto, wapiti etc.
scripts_path = os.path.join(root_dir, "scripts")
# OWTF python libraries
owtf_pip = os.path.join(root_dir, "install", "owtf.pip")
# Restricted tools and dictionaries which are distro independent
restricted_cfg = os.path.join(root_dir, "install", "distro-independent.cfg")
# Various distros and install scripts
distros_cfg = os.path.join(root_dir, "install", "linux-distributions.cfg")
parser = argparse.ArgumentParser()
parser.add_argument('--no-user-input', help='run script with default options for user input', action="store_true")
Colorizer.normal("[*] Great that you are installing OWTF :D")
Colorizer.warning("[!] There will be lot of output, please be patient")
Colorizer.info("[*] Last commit hash: %s" % owtf_last_commit())
check_sudo()
install(sys.argv[1:])
# Copying config files
dest_config_path = os.path.join(os.path.expanduser('~'), '.owtf', 'configuration')
create_directory(dest_config_path)
src_config_path = os.path.join(root_dir, 'configuration')
dir_util.copy_tree(src_config_path, dest_config_path)
finish()
|
from rrb3 import *
from random import randint
rr = RRB3(8, 6)
i = 0
while True:
speedl = randint(0, 100) / 100.0
speedr = randint(0, 100) / 100.0
dl = randint(0, 1)
dr = randint(0, 1)
rr.set_motors(speedl, dl, speedr, dr)
time.sleep(3)
i += 1
print(i)
|
from flask import Flask
from heapsy import HeapDendrogram
app = Flask(__name__)
class LeakObj(object):
data = 1
leakable = []
@app.route('/leakable')
def leak():
global leakable
leakable.append(LeakObj())
return f'hi {len(leakable)}'
@app.route('/metrics')
def heap_usage():
hd = HeapDendrogram()
hd.generate()
return hd.as_prometheus_metric()
if __name__ == '__main__':
app.run()
|
import numpy as np
import numpy.matlib
from scipy.special import roots_hermite
def sampling(n_samp, dim, randtype = 'Gaussian', distparam = None):
'''
returns a matrix of (n_samp) vectors of dimension (dim)
Each vector is sampled from a distribution determined by (randtype) and (distparam)
@ Params
n_samp ..... number of sample vectors
dim ........ dimension of each vector
randtype ... (str) type of the distribution
possible types:
(i) Gaussian
(ii) Uniform
distparam .. (dict) distributrion parameters
(i) Gaussian: {'mean' = mu, 'std' = sigma}
(ii) Uniform: {'mean' = x0 (vector), 'rad' = r}
currently only supports a unif dist on a sphere
centered at x0, with radius r
(iii) Coord : Random coordinate base
draw uniformly from {e_1, ..., e_n}
@ Return
samples .... (np.ndarray) matrix of sampled vectors
size = (n_samp) - by - (dim)
'''
if randtype == 'Gaussian' or randtype == 'Normal':
if distparam == None:
return np.random.normal(size = (n_samp, dim)) # Gaussian
else:
return np.random.normal(distparam['mean'], distparam['std'], size = (n_samp, dim)) # Gaussian
elif randtype == 'Uniform':
mat = np.random.normal(size = (n_samp, dim)) # Gaussian
norms = np.linalg.norm(mat,axis=1) # to Normalize
if distparam == None:
return mat/norms[:,None]
else:
return mat/norms[:,None]*distparam['rad'] + np.matlib.repmat(distparam['mean'],n_samp,1)
elif randtype == 'Coord':
if distparam == None:
idx = np.random.choice(range(dim), size=(n_samp,1))
mat = np.zeros((n_samp, dim))
for i in range(n_samp):
mat[idx[i]] = 1.
return mat
elif randtype == 'Box': # square atk
wsz = distparam['windowsz'] # size of the square
coord = distparam['coord'] # lower left corner of the square
Isz = distparam['ImSize']
mat = np.zeros(dim)
sgn = np.random.rand()
if sgn>0.5:
sgn = 1
else:
sgn = -1
for imdim in range(2):
if coord[imdim]+wsz >= Isz[imdim]:
coord[imdim] = Isz[imdim]-wsz-1
for i in range(wsz):
for j in range(wsz):
mat[coord2idx(coord[0]+i, coord[1]+j)] = sgn
return mat
elif randtype == 'Vert': # random vertical perturabtion
Isz = distparam['ImSize']
mat = np.zeros(dim)
for i in range(Isz[0]):
sgn = np.random.rand()
if sgn>0.5:
sgn = 1
else:
sgn = -1
for j in range(Isz[1]):
mat[coord2idx(i,j)] = sgn
return mat
def coord2idx(x, y, ImSize=(28, 28)):
if 0<= y and y <= ImSize[0]:
if 0<= x and x <= ImSize[1]:
return x*ImSize[0] + y
print(f"ERR (coord2idx): ({x}, {y}) cannot be converted into an idx")
return 0
def idx2coord(idx, ImSize=(28,28)):
if 0<= idx and idx <= ImSize[0]*ImSize[1]:
return [idx//ImSize[0], idx%ImSize[0]]
print(f"ERR (idx2coord): {idx} cannot be converted into coordinates")
return (0,0)
def oracles(f, xs, pool=None):
m = xs.shape[0]
if pool==None:
return np.array([f(xs[i,:]) for i in range(m)])
else: # overhead can be large, so be careful
xlist = [xs[i,:] for i in range(m)]
return np.array(pool.map(f, xlist))
def oracles_with_p(f, xs, pool=None):
m = xs.shape[0]
if pool==None:
val = np.empty(m)
probs = np.empty((m,10))
for i in range(m):
res = f(xs[i,:])
val[i] = res['fval']
probs[i,:] = res['pdist'][0]
return val, probs
# else part not implemented
# else: # overhead can be large, so be careful
# xlist = [xs[i,:] for i in range(m)]
# return np.array(pool.map(f, xlist))
def CentDiff(f, x, h, u, fval, proj):
h_valid = False
h_init = h
# proj doesn't do anything if not constrained
while not h_valid: # if h too small, increase it
xp = x + h*u # proj(x + h*u)
xm = x - h*u # proj(x - h*u)
fp = f(xp)
fm = f(xm)
d = (fp-fm)/2./h
d2 = (fp - 2*fval + fm)/h**2
if min(np.abs(d),np.abs(d2)) > 1e-8:
h_valid = True
h = h_init # prevent h being too large
else:
h *= 1.5
return d, d2
def NumQuad(f, x, h, u, fval, ATK, GH_pts = 5):
gh = roots_hermite(GH_pts)
gh_value = np.expand_dims(gh[0], axis=1)
if GH_pts%2 == 0:
xs = np.matlib.repmat(x, GH_pts, 1) + h*np.sqrt(2.0)*gh_value*u
fs = oracles(f,xs)
else: # can reuse fval = f(x)
xs = np.matlib.repmat(x, GH_pts, 1) + h*np.sqrt(2.0)*gh_value*u
xm = xs[:(GH_pts-1)//2,:]
xp = xs[-(GH_pts-1)//2:,:]
fs = np.empty(GH_pts)
fs[:(GH_pts-1)//2] = oracles(f,xm)
fs[(GH_pts-1)//2] = fval
fs[-(GH_pts-1)//2:] = oracles(f,xp)
gh_weight = gh[1]
fsgh = fs * gh_weight
gh_value = np.transpose(gh_value)
grad_u = 1. / np.sqrt(np.pi) / h * np.sum(fsgh * (np.sqrt(2.)*gh_value))
hess_u = 1. / np.sqrt(np.pi) / h**2 * np.sum(fsgh * (2*gh_value**2-1))
D3f_u = 1. / np.sqrt(np.pi) / h**3 * np.sum(fsgh * (np.sqrt(8.)*gh_value**3-3.*np.sqrt(2.)*gh_value))
D4f_u = 1. / np.sqrt(np.pi) / h**4 * np.sum(fsgh * (4*gh_value**4-6*2*gh_value**2+3))
return grad_u, hess_u, D3f_u, D4f_u
def argminF(*args):
'''
@param *args = tuple of dicts, each of which containing 'x' and 'fval' fields
@return dict with smallest fval
'''
minelt = args[0]
minfval = minelt['fval']
minidx = 0
for idx, elt in enumerate(args):
if elt['fval'] < minfval:
minfval = elt['fval']
minelt = elt
minidx = idx
return minelt, minidx
|
from mongoengine import Document, fields
from bson import ObjectId
from graphene_mongo_extras.tests.conftest import setup_mongo
class Packaging(Document):
name = fields.StringField()
class Toy(Document):
meta = {'allow_inheritance': True}
id = fields.ObjectIdField(primary_key=True, default=ObjectId)
name = fields.StringField()
packaging = fields.ReferenceField(Packaging)
class Plushie(Toy):
animal = fields.StringField()
class Videogame(Toy):
genre = fields.StringField()
|
"""bootstraphistogram
A multi-dimensional histogram. The distribution of the histograms bin values is computed with the Possion bootstrap
re-sampling method. The main class is implemented in :py:class:`bootstraphistogram.BootstrapHistogram`.
Some basic plotting functions are provided in :py:mod:`bootstraphistogram.plot`
"""
import boost_histogram.axis as axis
import bootstraphistogram.plot as plot
from bootstraphistogram import _version
from bootstraphistogram.bootstraphistogram import BootstrapHistogram
__version__ = _version.version(__name__)
__license__ = "MIT"
__author__ = "David Hadley"
url = "https://github.com/davehadley/bootstraphistogram"
__all__ = ["BootstrapHistogram", "axis", "plot"]
|
'''
We are making n stone piles! The first pile has n stones. If n is
even, then all piles have an even number of stones. If n is odd, all
piles have an odd number of stones. Each pile must have more stones
than the previous pile but as few as possible. Write a Python
program to find the number of stones in each pile.
Input: 2
Output:
[2, 4]
Input: 10
Output:
[10, 12, 14, 16, 18, 20, 22, 24, 26, 28]
Input: 3
Output:
[3, 5, 7]
Input: 17
Output:
[17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49]
'''
#License: https://bit.ly/3oLErEI
def test(n):
# multiply and then add
return [n + 2 * i for i in range(n)]
n = 2
print("Number of piles:",n)
print("Number of stones in each pile:")
print(test(n))
n = 10
print("\nNumber of piles:",n)
print("Number of stones in each pile:")
print(test(n))
n = 3
print("\nNumber of piles:",n)
print("Number of stones in each pile:")
print(test(n))
n = 17
print("\nNumber of piles:",n)
print("Number of stones in each pile:")
print(test(n))
|
# package indicator for spin Graphics
# $Id$
version_maj_number = 1.1
version_min_number = 0
version = "%s.%s" % (version_maj_number, version_min_number)
|
from test import cmd as test_cmd
def test_ls():
_, err = test_cmd('ls .')
assert err is None
if __name__ == '__main__':
test_ls()
|
# **************************************************************************** #
# #
# ::: :::::::: #
# pomodoro_procedure.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: javgonza <marvin@42.fr> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2022/04/10 15:00:48 by javgonza #+# #+# #
# Updated: 2022/04/10 16:53:51 by javgonza ### ########.fr #
# #
# **************************************************************************** #
from timer_printer import TimerPrinter
from timer_alerts import *
import sys, time
def pomodoro(_minutes, _color, _seconds):
minutes = _minutes
seconds = _seconds
for remaining in range(60 * minutes + seconds, 0, -1):
TimerPrinter.print(minutes, seconds, _color)
if seconds > 0:
seconds -= 1
else:
if minutes <= 0:
return 0
minutes -= 1
seconds = 59
time.sleep(1)
def pomodoroNormalTimer():
pomodoro(TimerPrinter.minutes, TimerPrinter.BlueColor, TimerPrinter.seconds)
sys.stdout.write("\n\n")
printAlert()
def pomodoroBreakTimer():
pomodoro(TimerPrinter.breakMinutes, TimerPrinter.GreenColor, TimerPrinter.breakSeconds)
sys.stdout.write("\n\n")
printAlert()
def pomodoroProcedure():
input("\nPress ENTER to start...")
for n in range(10, 0, -1): # Limit of 10 for protection
pomodoroNormalTimer()
alertMessage("\nPress ENTER to start your break...")
pomodoroBreakTimer()
alertMessage("\nPress ENTER to focus...")
|
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
import intake
from intake_orc.source import ORCSource
|
from datetime import datetime
from typing import List, Dict
from gateway.schema.customfeedinfo import CustomFeedInfo
class SiteHost:
def __init__(
self,
host: str,
last_seen: datetime = None,
feeds: Dict[str, CustomFeedInfo] = None,
):
self.host = host
self.last_seen = last_seen
self.feeds = feeds or {}
def __eq__(self, other):
return isinstance(other, self.__class__) and other.host == self.host
def __hash__(self):
return hash(f"{self.host}")
def __repr__(self):
return f"{self.__class__.__name__}({self.host})"
def load_feeds(self, feeds: List[CustomFeedInfo]) -> None:
self.feeds = {str(feed.url): feed for feed in feeds}
|
import sys
import ConfigParser
import logging
import logging.config
import tweepy
from tweepy.auth import OAuthHandler
from tweepy.streaming import StreamListener, Stream
class twitterManager:
def __init__(self, log ):
self.logger = log
config = ConfigParser.ConfigParser()
config.read("./twitter.config")
self.livDeviceName = config.get('TWITTER', 'liv_device_name')
self.alarmActive = config.getboolean('TWITTER', 'twitter_alarm_active')
self.reportActive = config.getboolean('TWITTER', 'twitter_report_active')
self.reportTime = config.getint('TWITTER', 'twitter_report_time')
self.accessToken = config.get('TWITTER', 'access_token')
self.accessTokenSecret = config.get('TWITTER', 'access_token_secret')
self.consumerKey = config.get('TWITTER', 'consumer_key')
self.consumerSecret = config.get('TWITTER', 'consumer_secret')
self.minuteCounter = 0
def sendTweet(self, type, message):
#try:
##OLD TWITTER lib code broken? t = Twitter(auth=OAuth(self.accessToken, self.accessTokenSecret, self.consumerKey, self.consumerSecret))
##s = self.livDeviceName +' '+ type + ' ' + message
##t.statuses.update(status=s)
auths = OAuthHandler(self.consumerKey, self.consumerSecret)
auths.set_access_token(self.accessToken, self.accessTokenSecret)
api = tweepy.API(auths)
s = type + ' ' + message
api.update_status(status=s)
self.logger.info("LiV sent a tweet " + s)
#except Exception as e:
# e = sys.exc_info()[0]
# self.logger.critical(str(e))
def getTwitterAlarmFlag(self):
return self.alarmActive
def sendTwitterReportNow(self):
if self.minuteCounter == self.reportTime:
return True
else:
return False
def getTwitterReportFlag(self):
return self.reportActive
def incrementMinuteCounter(self):
self.minuteCounter +=1
def resetMinuteCounter(self):
self.minuteCounter = 0
if __name__ == "__main__":
logging.config.fileConfig('livXMPPLogging.ini')
logger = logging.getLogger(__name__)
tm = twitterManager(logger)
print "twitterAlarmFlag" + " " + str(tm.alarmActive)
print "twitterReportFlag" + " " + str(tm.reportActive)
print "token " + " " + str(tm.accessToken)
print "token secret " + " " + str(tm.accessTokenSecret)
print "consumer secret " + " " + str(tm.consumerSecret)
print "consumer key " + " " + str(tm.consumerKey)
if tm.alarmActive == True:
tm.sendTweet("Alarm", "this is an alarm test")
print "sent alarm tweet"
if tm.reportActive == True:
tm.sendTweet("Report", "WHASD SLDKJ LSD LKjlk")
print "send report tweet"
|
#!/usr/bin/python
import Queue
class Queuer(object):
def __init__(self, entry_point, should_print):
print 'Crawling %s...' % entry_point
self.unvisited = Queue.Queue()
self.unvisited.put(entry_point.decode('utf-8'))
self.visited = []
self.invalid = []
self.results = []
self.should_print = should_print
'''
The next page to visit in the waiting list
'''
def next_unvisited(self):
if not self.unvisited.empty():
return self.unvisited.get()
'''
Populate a visited page with its assets,
adds linked pages to waiting list,
'''
def add(self, results, current_url):
assets = {}
for asset in results['assets']:
assets[asset] = results['assets'][asset]
current_page_data = {
'url': current_url,
'assets': assets
}
self.results.append(current_page_data)
self.visited.append(current_url)
for link in results['next']['url']:
if link not in self.unvisited.queue and link not in self.visited:
self.unvisited.put(link)
self.print_status(current_url)
'''
Save a list of corrupt pages
'''
def add_invalid(self, invalid_url, status_code):
self.invalid.append(invalid_url)
self.print_status(invalid_url, status_code)
'''
Status after visiting current page
'''
def print_status(self, current_url, status_code=None):
if not self.should_print:
return
invalid = 'Invalid: {0}, reason: {1}'.format(
str(len(self.invalid)), status_code) if status_code else ''
print u'{0:60} Visited: {1:8} Remaining: {2:7} {3}'.format(
current_url,
str(len(self.visited)),
str(self.unvisited.qsize()),
invalid
)
|
import commodity_model as commodity
com1=commodity.Commodity(1,20,156,1,'Cigarette')
com1.myprint()
print('\n')
New_ID=2
New_price=21
Incre=100
Decre=1
New_Shelf=2
New_Type='Alcohol'
com1.Change_ID(New_ID)
com1.myprint()
print('\n')
com1.Change_Price(New_price)
com1.myprint()
print('\n')
com1.Add_Inventory(Incre)
com1.myprint()
print('\n')
com1.Dec_Inventory(Decre)
com1.myprint()
print('\n')
com1.Change_Shelf(New_Shelf)
com1.myprint()
print('\n')
com1.Change_Type(New_Type)
com1.myprint()
print('\n')
com1.Reset()
com1.myprint()
print('\n')
|
import pandas as pd
import numpy as np
def export_wind_profile_shapes(heights,
u_wind, v_wind,
output_file=None,
do_scale=True,
ref_height=100.):
"""
From given wind profile return formatted pandas data frame for evaluation.
Parameters
----------
heights : list
Height steps of vertical wind profile.
u_wind : list
u-component of vertical wind profile wind speed.
v_wind : list
v-component of vertical wind profile wind speed.
output_file : string, optional
If given, write csv output to this file. The default is None.
do_scale : Bool, optional
If True, scale wind profile to 1 at reference height.
The default is True.
ref_height : Float, optional
Reference height where the wind speed is scaled to 1 - if do_scale.
The default is 100..
Returns
-------
df : pandas DataFrame
Absolute wind profile shapes and scale factors.
"""
df = pd.DataFrame({
'height [m]': heights,
})
scale_factors = []
if len(u_wind.shape) == 1:
single_profile = True
else:
single_profile = False
for i, (u, v) in enumerate(zip(u_wind, v_wind)):
if single_profile:
u, v = u_wind, v_wind
w = (u**2 + v**2)**.5
if do_scale:
# Get normalised wind speed at reference height via linear
# interpolation
w_ref = np.interp(ref_height, heights, w)
# Scaling factor such that the normalised absolute wind speed
# at the reference height is 1
sf = 1/w_ref
else:
sf = 1.
dfi = pd.DataFrame({
'u{} [-]'.format(i+1): u*sf,
'v{} [-]'.format(i+1): v*sf,
'scale factor{} [-]'.format(i+1): sf,
})
df = pd.concat((df, dfi), axis=1)
scale_factors.append(sf)
if single_profile:
break
if output_file is not None:
assert output_file[-4:] == ".csv"
df.to_csv(output_file, index=False, sep=";")
return df, scale_factors
|
from django.contrib import admin
from .models import Designations, Batch
from .models import Faculty, Staff, UndergraduateStudents, MscStudents, PhdStudents, PhdAlumni, Publication
# Register your models here.
admin.site.register(Designations, verbose_name_plural="Designations")
admin.site.register(Faculty, verbose_name_plural="Faculty")
admin.site.register(Staff, verbose_name_plural="Staff")
admin.site.register(Batch, verbose_name_plural="Batch")
admin.site.register(UndergraduateStudents, verbose_name_plural="UndergraduateStudents")
admin.site.register(MscStudents, verbose_name_plural="MscStudents")
admin.site.register(PhdStudents, verbose_name_plural="PhdStudents")
admin.site.register(PhdAlumni, verbose_name_plural="PhdAlumni")
admin.site.register(Publication, verbose_name_plural="Publication")
|
"""
Definition of urls for soldajustica.
"""
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from django.conf.urls import include
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('app.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# -*- coding: utf-8 -*-
import logging
from concurrent.futures.thread import ThreadPoolExecutor
from urllib.parse import quote_plus
from lncrawl.core.crawler import Crawler
logger = logging.getLogger(__name__)
search_url = 'https://booknet.com/en/search?q=%s'
get_chapter_url = 'https://booknet.com/reader/get-page'
class LitnetCrawler(Crawler):
base_url = [
'https://booknet.com/',
]
def initialize(self):
self.home_url = 'https://booknet.com/'
self.executor = ThreadPoolExecutor(1)
# end def
def search_novel(self, query):
query = quote_plus(query.lower())
soup = self.get_soup(search_url % query)
results = []
for div in soup.select('.book-item'):
a = div.select_one('.book-title a')
author = div.select_one('.author-wr a.author').text.strip()
views = div.select_one('span.count-views').text.strip()
favourites = div.select_one('span.count-favourites').text.strip()
results.append({
'title': a.text.strip(),
'url': self.absolute_url(a['href']),
'info': 'Author: %s | %s views | %s favorites' % (author, views, favourites)
})
# end for
return results
# end def
def read_novel_info(self):
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
self.csrf_token = soup.select_one('meta[name="csrf-token"]')['content']
self.csrf_param = soup.select_one('meta[name="csrf-param"]')['content']
logger.info('%s: %s', self.csrf_param, self.csrf_token)
self.novel_title = soup.select_one('h1.roboto').text.strip()
logger.info('Novel title: %s', self.novel_title)
img_src = soup.select_one('.book-view-cover img')
if not img_src:
img_src = soup.select_one('.book-cover img')
# end if
if img_src:
self.novel_cover = self.absolute_url(img_src['src'])
# end if
logger.info('Novel cover: %s', self.novel_cover)
author = soup.select_one('.book-view-info a.author')
if not author:
author = soup.select_one('.book-head-content a.book-autor')
# end if
if author:
self.novel_author = author.text.strip()
# end if
logger.info('Novel author: %s', self.novel_author)
chapters = soup.find('select', {'name': 'chapter'})
if chapters is None:
chapters = soup.select('.collapsible-body a.collection-item')
else:
chapters = chapters.find_all('option')
chapters = [a for a in chapters if a.attrs['value']]
# end if
volumes = set([])
for a in chapters:
chap_id = len(self.chapters) + 1
vol_id = len(self.chapters) // 100 + 1
volumes.add(vol_id)
abs_url = self.last_visited_url.replace('/en/book/', '/en/reader/')
chap_url = abs_url + ('?c=%s' % a.attrs['value'])
self.chapters.append({
'id': chap_id,
'volume': 1,
'url': chap_url,
'chapter_id': a.attrs['value'],
})
# end for
self.volumes = [{'id': x} for x in volumes]
# end def
def download_chapter_body(self, chapter):
data = self._get_chapter_page(chapter)
chapter['title'] = data['chapterTitle']
content = data['data']
for page in range(2, data['totalPages'] + 1):
data = self._get_chapter_page(chapter, page)
content += data['data']
# end for
return content
# end def
def _get_chapter_page(self, chapter, page=1):
return self.post_json(get_chapter_url, data={
'chapterId': int(chapter['chapter_id']),
'page': page,
self.csrf_param: self.csrf_token
}, headers={
'X-CSRF-Token': self.csrf_token,
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
})
# end def
# end class
|
from typing import Union
class OrExpression:
def __init__(self):
self.and_expression = []
def evaluate(self, bindings: dict) -> bool:
"""
This function evaluates all the and expressions using boolean OR operator
:param bindings:
:return: True or False or the expression itself if there's only one expression
"""
for i in range(len(self.and_expression)):
if not self.and_expression[i].evaluate(bindings):
return False
return True
|
import info
from Package.CMakePackageBase import *
class subinfo(info.infoclass):
def setTargets(self):
self.svnTargets["master"] = "https://invent.kde.org/network/neochat.git"
self.defaultTarget = "master"
self.displayName = "NeoChat"
self.description = "A client for matrix, the decentralized communication protocol."
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["libs/qt5/qtdeclarative"] = None
self.runtimeDependencies["libs/qt5/qtquickcontrols2"] = None
self.runtimeDependencies["libs/qt5/qtmultimedia"] = None
self.runtimeDependencies["qt-libs/qtkeychain"] = None
self.runtimeDependencies["kde/frameworks/tier1/kirigami"] = None
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = None
self.runtimeDependencies["kde/frameworks/tier1/ki18n"] = None
self.runtimeDependencies["kde/frameworks/tier1/kitemmodels"] = None
self.runtimeDependencies["kde/frameworks/tier1/syntax-highlighting"] = None
self.runtimeDependencies["kde/frameworks/tier3/knotifications"] = None
self.runtimeDependencies["qt-libs/libquotient"] = None
self.runtimeDependencies["libs/cmark"] = None
self.runtimeDependencies["kde/libs/kquickimageeditor"] = None
self.runtimeDependencies["qt-libs/qcoro"] = None
if not CraftCore.compiler.isAndroid:
self.runtimeDependencies["kde/frameworks/tier1/sonnet"] = None
self.runtimeDependencies["kde/frameworks/tier1/breeze-icons"] = None
self.runtimeDependencies["kde/frameworks/tier3/kio"] = None
self.runtimeDependencies["kde/frameworks/tier3/qqc2-desktop-style"] = None
self.runtimeDependencies["kde/plasma/breeze"] = None
else:
self.runtimeDependencies["kde/plasma/qqc2-breeze-style"] = None
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
def createPackage(self):
self.defines["executable"] = r"bin\neochat.exe"
self.addExecutableFilter(r"(bin|libexec)/(?!(neochat|update-mime-database|snoretoast)).*")
self.ignoredPackages.append("binary/mysql")
if not CraftCore.compiler.isLinux:
self.ignoredPackages.append("libs/dbus")
return super().createPackage()
|
from aat.config import Side
from aat.core import Instrument, OrderBook, Order
from .helpers import _seed
_INSTRUMENT = Instrument("TE.ST")
class TestMarketOrder:
def test_order_book_market_order(self):
ob = OrderBook(_INSTRUMENT)
_seed(ob, _INSTRUMENT)
assert ob.topOfBook()[Side.BUY] == [5.0, 1.0]
assert ob.topOfBook()[Side.SELL] == [5.5, 1.0]
data = Order(
volume=100.0,
price=0.0,
side=Order.Sides.SELL,
order_type=Order.Types.MARKET,
instrument=_INSTRUMENT,
)
ob.add(data)
print(ob)
print(ob.topOfBook())
assert ob.topOfBook() == {Side.BUY: [0, 0], Side.SELL: [5.5, 1.0]}
print(ob.levels(3))
assert ob.levels(3) == {
Side.BUY: [[0, 0], [0, 0], [0, 0]],
Side.SELL: [[5.5, 1.0], [6.0, 1.0], [6.5, 1.0]],
}
class TestStopLoss:
def test_stop_limit(self):
ob = OrderBook(_INSTRUMENT)
_seed(ob, _INSTRUMENT)
assert ob.topOfBook()[Side.BUY] == [5.0, 1.0]
assert ob.topOfBook()[Side.SELL] == [5.5, 1.0]
print(ob)
assert ob.topOfBook() == {Side.BUY: [5.0, 1.0], Side.SELL: [5.5, 1.0]}
data = Order(
volume=0.0,
price=5.0,
side=Order.Sides.SELL,
order_type=Order.Types.STOP,
stop_target=Order(
volume=1.0,
price=4.0,
side=Order.Sides.SELL,
order_type=Order.Types.MARKET,
instrument=_INSTRUMENT,
),
instrument=_INSTRUMENT,
)
print(ob)
ob.add(data)
data = Order(
volume=0.0,
price=5.0,
side=Order.Sides.SELL,
order_type=Order.Types.STOP,
stop_target=Order(
volume=0.5,
price=5.0,
side=Order.Sides.SELL,
instrument=_INSTRUMENT,
order_type=Order.Types.LIMIT,
),
instrument=_INSTRUMENT,
)
print(ob)
ob.add(data)
print(ob.topOfBook())
assert ob.topOfBook() == {Side.BUY: [5.0, 1.0], Side.SELL: [5.5, 1.0]}
data = Order(
volume=0.5,
price=5.0,
side=Order.Sides.SELL,
order_type=Order.Types.LIMIT,
instrument=_INSTRUMENT,
)
print(ob)
ob.add(data)
print(ob.topOfBook())
assert ob.topOfBook() == {Side.BUY: [4.5, 0.5], Side.SELL: [5.0, 0.5]}
def test_stop_market(self):
ob = OrderBook(_INSTRUMENT)
_seed(ob, _INSTRUMENT)
assert ob.topOfBook()[Side.BUY] == [5.0, 1.0]
assert ob.topOfBook()[Side.SELL] == [5.5, 1.0]
print(ob)
assert ob.topOfBook() == {Side.BUY: [5.0, 1.0], Side.SELL: [5.5, 1.0]}
data = Order(
volume=0.0,
price=5.0,
side=Order.Sides.SELL,
order_type=Order.Types.STOP,
stop_target=Order(
volume=0.5,
price=4.5,
side=Order.Sides.SELL,
instrument=_INSTRUMENT,
order_type=Order.Types.LIMIT,
),
instrument=_INSTRUMENT,
)
print(ob)
ob.add(data)
print(ob.topOfBook())
assert ob.topOfBook() == {Side.BUY: [5.0, 1.0], Side.SELL: [5.5, 1.0]}
data = Order(
volume=0.5,
price=5.0,
side=Order.Sides.SELL,
order_type=Order.Types.LIMIT,
instrument=_INSTRUMENT,
)
print(ob)
ob.add(data)
print(ob.topOfBook())
assert ob.topOfBook() == {Side.BUY: [4.5, 1.0], Side.SELL: [5.5, 1.0]}
|
#coding=utf-8
from django.conf.urls import url
from django.views.generic import TemplateView
from app.views.phone.device import index, historical, information_details
from app.views.phone.user import my_information, login, logout, personal,workorder,warning
urlpatterns = [
url(r'index/$', index, name="phone_index"),
url(r'historical/$', historical, name="historical"),
url(r'informationDetails/$', information_details, name="information_details"),
url(r'myInformation/$', my_information, name="phone/my_information"),
url(r'personal/$', personal, name="personal"),
url(r'workorder/$', workorder, name="workorder"),
url(r'warning/$', warning, name="warning"),
url(r'user/login/$', login, name="login"),
url(r'user/logout/$', logout, name="logout"),
url(r'aboutUs/$', TemplateView.as_view(template_name="phone/aboutUs.html")),
]
|
#!/usr/bin/env python
from ciscoconfparse import CiscoConfParse
cisco_cfg = CiscoConfParse("cisco_ipsec.txt")
crypto_maps = cisco_cfg.find_objects_w_child(parentspec=r"^crypto map", childspec=r"pfs group2")
for i in crypto_maps:
print i.text
for child in i.children:
print child.text
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to ensure the datetime wrappers are working as expected."""
from datetime import datetime, timezone
from freezegun import freeze_time
def test_datetime_utcnow():
"""Assert that datetime.utcnow returns a non-naive datetime object."""
import legal_api.utils.datetime as _datetime
now = datetime(2020, 9, 17, 0, 0, 0, 0)
with freeze_time(now):
d = _datetime.datetime.utcnow()
assert d == now.replace(tzinfo=timezone.utc)
def test_datetime_isoformat():
"""Assert that the isoformat has the tzinfo set to +00:00."""
import legal_api.utils.datetime as _datetime
now = datetime(2020, 9, 17, 0, 0, 0, 0)
with freeze_time(now):
d = _datetime.datetime.utcnow()
iso = d.isoformat()
tz = iso[iso.find('+'):]
assert tz == '+00:00'
|
from abc import abstractmethod
class Action:
def __init__(self):
self.condition = None
self.attempt_limit: int = 0
self.time_limit: int = 0
self.start_callback = None
self.finished_callback = None
self.action_id: int = 0
@abstractmethod
def execute(self):
pass
@abstractmethod
def pause(self):
pass
@abstractmethod
def check_condition(self):
pass
|
from GNN.globals import *
import math
from GNN.utils import *
from GNN.graph_samplers import *
from GNN.norm_aggr import *
import torch
import scipy.sparse as sp
import scipy
import numpy as np
import time
import hashlib
def _coo_scipy2torch(adj, coalesce=True, use_cuda=False):
"""
convert a scipy sparse COO matrix to torch
"""
values = adj.data
indices = np.vstack((adj.row, adj.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
ans = torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))
if use_cuda:
ans = ans.cuda()
if coalesce:
ans = ans.coalesce()
return ans
class Minibatch:
"""
This minibatch iterator iterates over nodes for supervised learning.
"""
def __init__(self,
adj_full_norm,
adj_train,
adj_val_norm,
role,
train_params,
cpu_eval=False):
"""
role: array of string (length |V|)
storing role of the node ('tr'/'va'/'te')
"""
self.use_cuda = (args_global.gpu >= 0)
if cpu_eval:
self.use_cuda = False
self.node_train = np.array(role['tr'])
self.node_val = np.array(role['va'])
self.node_test = np.array(role['te'])
self.node_trainval = np.concatenate((self.node_train,self.node_val),axis=None)
self.adj_full_norm_sp = adj_full_norm.tocsr()
self.adj_full_norm = _coo_scipy2torch(adj_full_norm.tocoo())
self.adj_val_norm = _coo_scipy2torch(adj_val_norm.tocoo())
self.adj_train = adj_train
if self.use_cuda: # now i put everything on GPU. Ideally, full graph adj/feat should be optionally placed on CPU
self.adj_full_norm = self.adj_full_norm.cuda()
self.adj_val_norm = self.adj_val_norm.cuda()
# below: book-keeping for mini-batch
self.node_subgraph = None
self.batch_num = -1
self.method_sample = None
self.subgraphs_remaining_indptr = []
self.subgraphs_remaining_indices = []
self.subgraphs_remaining_data = []
self.subgraphs_remaining_nodes = []
self.subgraphs_remaining_edge_index = []
self.norm_loss_train = np.zeros(self.adj_train.shape[0])
# norm_loss_test is used in full batch evaluation (without sampling). so neighbor features are simply averaged.
self.norm_loss_test = np.zeros(self.adj_full_norm.shape[0])
_denom = len(self.node_train) + len(self.node_val) + len(
self.node_test)
self.norm_loss_test[self.node_train] = 1. / _denom
self.norm_loss_test[self.node_val] = 1. / _denom
self.norm_loss_test[self.node_test] = 1. / _denom
self.norm_loss_test = torch.from_numpy(
self.norm_loss_test.astype(np.float32))
if self.use_cuda:
self.norm_loss_test = self.norm_loss_test.cuda()
self.norm_aggr_train = np.zeros(self.adj_train.size)
self.sample_coverage = train_params['sample_coverage']
self.deg_train = np.array(self.adj_train.sum(1)).flatten()
def set_sampler(self, train_phases):
self.subgraphs_remaining_indptr = list()
self.subgraphs_remaining_indices = list()
self.subgraphs_remaining_data = list()
self.subgraphs_remaining_nodes = list()
self.subgraphs_remaining_edge_index = list()
self.method_sample = train_phases['sampler']
if self.method_sample == 'mrw':
if 'deg_clip' in train_phases:
_deg_clip = int(train_phases['deg_clip'])
else:
_deg_clip = 100000 # setting this to a large number so essentially there is no clipping in probability
self.size_subg_budget = train_phases['size_subgraph']
self.graph_sampler = mrw_sampling(self.adj_train,self.node_train,\
self.size_subg_budget,train_phases['size_frontier'],_deg_clip)
elif self.method_sample == 'rw':
self.size_subg_budget = train_phases['num_root'] * train_phases[
'depth']
self.graph_sampler = rw_sampling(self.adj_train,self.node_train,\
self.size_subg_budget,int(train_phases['num_root']),int(train_phases['depth']))
elif self.method_sample == 'edge':
self.size_subg_budget = train_phases['size_subg_edge'] * 2
self.graph_sampler = edge_sampling(self.adj_train, self.node_train,
train_phases['size_subg_edge'])
elif self.method_sample == 'node':
self.size_subg_budget = train_phases['size_subgraph']
self.graph_sampler = node_sampling(self.adj_train, self.node_train,
self.size_subg_budget)
elif self.method_sample == 'full_batch':
self.size_subg_budget = self.node_train.size
self.graph_sampler = full_batch_sampling(self.adj_train,
self.node_train,
self.size_subg_budget)
else:
raise NotImplementedError
self.norm_loss_train = np.zeros(self.adj_train.shape[0])
self.norm_aggr_train = np.zeros(self.adj_train.size).astype(np.float32)
# caching norm_aggr_train and norm_loss_train
text = args_global.data_prefix
for k, v in train_phases.items():
text += str(k) + str(v)
path = 'pytorch_models/sample' + hashlib.md5(text.encode('utf-8')).hexdigest() + '.npz'
if os.path.isfile(path):
print('Found existing sampling normalization coeefficients, loading from', path)
samplef = np.load(path)
self.norm_loss_train = samplef['norm_loss_train']
self.norm_aggr_train = samplef['norm_aggr_train']
else:
print('Saving sampling normalization coeefficients to', path)
# For edge sampler, no need to estimate norm factors, we can calculate directly.
# However, for integrity of the framework, we decide to follow the same procedure for all samplers:
# 1. sample enough number of subgraphs
# 2. estimate norm factor alpha and lambda
tot_sampled_nodes = 0
while True:
self.par_graph_sample('train')
tot_sampled_nodes = sum(
[len(n) for n in self.subgraphs_remaining_nodes])
if tot_sampled_nodes > self.sample_coverage * self.node_train.size:
break
print()
num_subg = len(self.subgraphs_remaining_nodes)
for i in range(num_subg):
self.norm_aggr_train[self.subgraphs_remaining_edge_index[i]] += 1
self.norm_loss_train[self.subgraphs_remaining_nodes[i]] += 1
assert self.norm_loss_train[self.node_val].sum(
) + self.norm_loss_train[self.node_test].sum() == 0
for v in range(self.adj_train.shape[0]):
i_s = self.adj_train.indptr[v]
i_e = self.adj_train.indptr[v + 1]
val = np.clip(
self.norm_loss_train[v] / self.norm_aggr_train[i_s:i_e], 0,
1e4)
val[np.isnan(val)] = 0.1
self.norm_aggr_train[i_s:i_e] = val
self.norm_loss_train[np.where(self.norm_loss_train == 0)[0]] = 0.1
self.norm_loss_train[self.node_val] = 0
self.norm_loss_train[self.node_test] = 0
self.norm_loss_train[
self.node_train] = num_subg / self.norm_loss_train[
self.node_train] / self.node_train.size
np.savez(path, norm_loss_train=self.norm_loss_train, norm_aggr_train=self.norm_aggr_train)
self.norm_loss_train = torch.from_numpy(
self.norm_loss_train.astype(np.float32))
if self.use_cuda:
self.norm_loss_train = self.norm_loss_train.cuda()
def par_graph_sample(self, phase):
t0 = time.time()
_indptr, _indices, _data, _v, _edge_index = self.graph_sampler.par_sample(
phase)
t1 = time.time()
print('sampling 200 subgraphs: time = {:.3f} sec'.format(t1 - t0),
end="\r")
self.subgraphs_remaining_indptr.extend(_indptr)
self.subgraphs_remaining_indices.extend(_indices)
self.subgraphs_remaining_data.extend(_data)
self.subgraphs_remaining_nodes.extend(_v)
self.subgraphs_remaining_edge_index.extend(_edge_index)
def one_batch(self, mode='train'):
if mode in ['val', 'test']:
self.node_subgraph = np.arange(self.adj_full_norm.shape[0])
if mode == 'val':
adj = self.adj_val_norm
elif mode == 'test':
adj = self.adj_full_norm
else:
assert mode == 'train'
if len(self.subgraphs_remaining_nodes) == 0:
self.par_graph_sample('train')
print()
self.node_subgraph = self.subgraphs_remaining_nodes.pop()
self.size_subgraph = len(self.node_subgraph)
adj = sp.csr_matrix((self.subgraphs_remaining_data.pop(),\
self.subgraphs_remaining_indices.pop(),\
self.subgraphs_remaining_indptr.pop()),\
shape=(self.size_subgraph,self.size_subgraph))
adj_edge_index = self.subgraphs_remaining_edge_index.pop()
#print("{} nodes, {} edges, {} degree".format(self.node_subgraph.size,adj.size,adj.size/self.node_subgraph.size))
norm_aggr(adj.data,
adj_edge_index,
self.norm_aggr_train,
num_proc=args_global.num_cpu_core)
adj = adj_norm(adj, deg=self.deg_train[self.node_subgraph])
adj = _coo_scipy2torch(adj.tocoo(),use_cuda=self.use_cuda)
self.batch_num += 1
norm_loss = self.norm_loss_test if mode in ['val', 'test'
] else self.norm_loss_train
norm_loss = norm_loss[self.node_subgraph]
return self.node_subgraph, adj, norm_loss
def num_training_batches(self):
return math.ceil(self.node_train.shape[0] /
float(self.size_subg_budget))
def shuffle(self):
self.node_train = np.random.permutation(self.node_train)
self.batch_num = -1
def end(self):
return (self.batch_num +
1) * self.size_subg_budget >= self.node_train.shape[0]
|
import gym
import torch
from .QLearning import QLearning
from Gym.tools.utils import env_run
from Gym.tools.atari_wrappers import wrap_env
env = wrap_env(gym.make("PongDeterministic-v4"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
agent = QLearning(
device=device,
n_actions=env.action_space.n,
n_features=env.observation_space.shape,
learning_rate=0.0001,
gamma=0.99,
tau=0.01,
updateTargetFreq=10000,
epsilonStart=1,
epsilonEnd=0.02,
epsilonDecayFreq=100000,
mSize=1000000,
batchSize=32,
startTrainSize=100,
)
if __name__ == "__main__":
RENDER = False # 顯示模擬會拖慢運行速度, 等學得差不多了再顯示
env.seed(1) # 固定隨機種子 for 再現性
# env = env.unwrapped # 不限定 episode
torch.manual_seed(500) # 固定隨機種子 for 再現性
env_run(
env=env,
agent=agent,
callerPath=__file__,
stopRewardFunc=lambda x: x > 19,
RENDER=RENDER,
test=False,
)
|
# -------------------------------------------------------------------------
# * This is an Java library for random number generation. The use of this
# * library is recommended as a replacement for the Java class Random,
# * particularly in simulation applications where the statistical
# * 'goodness' of the random number generator is important.
# *
# * The generator used in this library is a so-called 'Lehmer random number
# * generator' which returns a pseudo-random number uniformly distributed
# * between 0.0 and 1.0. The period is (m - 1) where m = 2,147,483,647 and
# * the smallest and largest possible values are (1 / m) and 1 - (1 / m)
# * respectively. For more details see:
# *
# * "Random Number Generators: Good Ones Are Hard To Find"
# * Steve Park and Keith Miller
# * Communications of the ACM, October 1988
# *
# * Note that as of 7-11-90 the multiplier used in this library has changed
# * from the previous "minimal standard" 16807 to a new value of 48271. To
# * use this library in its old (16807) form change the constants MULTIPLIER
# * and CHECK as indicated in the comments.
# *
# * Name : Rng.java (Random Number Generation - Single Stream)
# * Authors : Steve Park & Dave Geyer
# * Translated by : Philip Steele
# * Language : Python 3.3
# * Latest Revision : 3/26/14
# *
# * Program rng : Section 2.2
# * -------------------------------------------------------------------------
# */
from time import time
CHECK = 399268537 #/* use 1043616065 for the "minimal standard" */
MODULUS = 2147483647 #/* DON'T CHANGE THIS VALUE */
MULTIPLIER = 48271 #/* use 16807 for the "minimal standard" */
DEFAULT = 123456789
seed = int(DEFAULT)
def random():
#/* ---------------------------------------------------------------------
#* Random is a Lehmer generator that returns a pseudo-random real number
#* uniformly distributed between 0.0 and 1.0. The period is (m - 1)
#* where m = 2,147,483,647 amd the smallest and largest possible values
#* are (1 / m) and 1 - (1 / m) respectively.
#* ---------------------------------------------------------------------
#*/
global seed
Q = int(MODULUS / MULTIPLIER)
R = int(MODULUS % MULTIPLIER)
t = int(MULTIPLIER * (seed % Q) - R * int(seed / Q))
if (t > 0):
seed = int(t)
else:
seed = int(t + MODULUS)
return float(seed / MODULUS)
def putSeed(x):
# /* -------------------------------------------------------------------
# * Use this (optional) procedure to initialize or reset the state of
# * the random number generator according to the following conventions:
# * if x > 0 then x is the initial seed (unless too large)
# * if x < 0 then the initial seed is obtained from the system clock
# * if x = 0 then the initial seed is to be supplied interactively
# * --------------------------------------------------------------------
# */
global seed
ok = False
if (x > 0):
x = x % MODULUS
# correct if x is too large
if (x < 0):
x = time()
x = x % MODULUS
if (x == 0):
while (ok == False):
line = input("\nEnter a positive integer seed (9 digits or less) >> ")
x = int(line)
ok = (0 < x) and (x < MODULUS)
if (ok == False):
print("\nInput out of range ... try again\n")
seed = int(x)
def getSeed():
# /* --------------------------------------------------------------------
# * Use this (optional) procedure to get the current state of the random
# * number generator.
# * --------------------------------------------------------------------
# */
return seed
def testRandom():
# /* -------------------------------------------------------------------
# * Use this (optional) procedure to test for a correct implementation.
# * -------------------------------------------------------------------
# */
putSeed(1); #/* set initial state to 1 */
for i in range(0,10000):
u = random()
x = getSeed()
if (CHECK == x):
print("\n The implementation of Random is correct")
else:
print("\n ERROR - the implementation of Random is not correct")
|
#!/usr/bin/env python3
import unittest
import re
import file_utils
class TestFileUtils(unittest.TestCase):
def setUp(self):
# seconds decimal point . followed by 6 digits at end of string
# self.regex_timestamp_end = re.compile(r"\.\d{6}$")
self.regex_timestamp_end = re.compile(r"\d{8}T\d{6}\.\d{6}$")
self.regex_filename_end = re.compile(r"\d{8}T\d{6}\.\d{6}\.jpg$")
def test_timestamp_string(self):
actual = file_utils.FileUtils.timestamp_string()
# TODO: Update test by year 2020
self.assertEqual("201", actual[0:3])
def test_timestamp_string_end(self):
actual = file_utils.FileUtils.timestamp_string()
# use findall to get list
# alternatively could use search not match when not matching beginning of string
# https://docs.python.org/3.3/howto/regex_timestamp_end.html
matches = self.regex_timestamp_end.findall(actual)
self.assertEqual(1, len(matches))
def test_filename_with_timestamp_start(self):
base_name = "bar"
actual = file_utils.FileUtils.filename_with_timestamp(base_name)
self.assertEqual(base_name, actual[0:len(base_name)])
def test_filename_with_timestamp_end(self):
base_name = "grizzly"
actual = file_utils.FileUtils.filename_with_timestamp(base_name)
matches = self.regex_filename_end.findall(actual)
self.assertEqual(1, len(matches))
if __name__ == "__main__":
unittest.main()
|
# @Author: Brett Andrews <andrews>
# @Date: 2019-05-28 14:05:49
# @Last modified by: andrews
# @Last modified time: 2019-06-20 09:06:65
"""Visualization utilities."""
from pathlib import Path
from astropy.visualization import make_lupton_rgb
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from .fileio import construct_path_out, load_config
from .utils import import_model
class PlotModel(object):
"""Make plots.
Args:
run_name (str): Run name. Default is None.
path (str): Path to results. Default is None.
"""
def __init__(self, run_name=None, path=None):
self.run_name, self.path_results = construct_path_out(run_name=run_name, path=path)
self.path_logs = self.path_results / "logs"
self.path_pasquet = Path(__file__).resolve().parents[3] / "data" / "pasquet2019"
self._labels = None
self._logs = None
@property
def labels(self):
if self._labels is None:
print("Loading labels...")
self._labels = pd.read_hdf(self.path_pasquet / "labelsBinned.h5", "data")
return self._labels
@property
def logs(self):
if self._logs is None:
self._logs = pd.read_csv(self.path_logs / "log.csv")
return self._logs
def _accuracy(self, ax):
ax.plot(self.logs.epoch, self.logs.capsnet_acc, label="train")
ax.plot(self.logs.epoch, self.logs.val_capsnet_acc, label="validation")
ax.set_ylabel("accuracy")
ax.legend()
return ax
def _loss(self, ax):
ax.plot(self.logs.epoch, self.logs.capsnet_loss, label="margin")
ax.plot(self.logs.epoch, self.logs.decoder_loss, label="decoder")
ax.plot(self.logs.epoch, self.logs.loss, label="total")
ax.set_xlabel("epoch")
ax.set_ylabel("loss")
ax.legend()
return ax
def _to_ind(self, page, row, col):
"""Convert from (page, row, column) of galaxies.pdf to index of labelsMerged.csv."""
page -= 1
row -= 1
col -= 1
return page * 150 + row * 10 + col
def _load_image_data(self, ind):
obj = self.labels.iloc[ind]
galid = f"{obj.plate:04}-{obj.mjd}-{obj.fiberID:03}"
fin = (
self.path_pasquet
/ "cubes"
/ f"{obj.plate:04}"
/ f"{obj.mjd}"
/ f"{galid}.npy"
)
return np.load(fin)
def plot_logs(self, path_out=None, savefig=False, **kwargs):
"""Plot accuracy and loss for a model.
Args:
path_out (str): Output path. Default is None.
savefig (bool): If True save figure to ``path_out``. Default is
True.
**kwargs: Keyword arguments to pass to ``plt.subplots()``.
Returns:
``matplotlib.figure.Figure``
"""
fig, axes = plt.subplots(nrows=2, **kwargs)
axes[0] = self._accuracy(ax=axes[0])
axes[1] = self._loss(ax=axes[1])
path_out = path_out if path_out is not None else self.path_logs
fout = path_out if path_out.is_file() else path_out / "logs.pdf"
if savefig:
fig.savefig(fout)
return fig
def plot_accuracy(self, path_out=None, savefig=False, **kwargs):
"""Plot accuracy for a model.
Args:
path_out (str): Output path. Default is None.
savefig (bool): If True save figure to ``path_out``. Default is
False.
**kwargs: Keyword arguments to pass to ``plt.subplots()``.
Returns:
``matplotlib.figure.Figure``
"""
fig, ax = plt.subplots(**kwargs)
ax = self._accuracy(ax=ax)
path_out = path_out if path_out is not None else self.path_logs
fout = path_out if path_out.is_file() else path_out / "accuracy.pdf"
if savefig:
fig.savefig(fout)
return fig
def plot_loss(self, path_out=None, savefig=False, **kwargs):
"""Plot loss for a model.
Args:
path_out (str): Output path. Default is None.
savefig (bool): If True save figure to ``path_out``. Default is
False.
**kwargs: Keyword arguments to pass to ``plt.subplots()``.
Returns:
``matplotlib.figure.Figure``
"""
fig, ax = plt.subplots(**kwargs)
ax = self._loss(ax=ax)
path_out = path_out if path_out is not None else self.path_logs
fout = path_out if path_out.is_file() else path_out / "loss.pdf"
if savefig:
fig.savefig(fout)
return fig
def plot_ugriz_gri(self, inds, path_out=None, savefig=False, rgb_kwargs=None, **kwargs):
"""Plot images for ugriz bands individually and gri composite.
Args:
inds (list): Indices of galaxies in ``labels`` or tuples of
(page, row, column) from ``galaxies.pdf``.
path_out (str): Output path. Default is None.
savefig (bool): If True save figure to ``path_out``. Default is
False.
rgb_kwargs: Keyword arguments to pass to
``make_lupton_rgb()``. Default is None.
**kwargs: Keyword arguments to pass to ``plt.subplots()``.
Returns:
``matplotlib.figure.Figure``
"""
inds = [self._to_ind(ind) if isinstance(ind, tuple) else ind for ind in inds]
if rgb_kwargs is None:
rgb_kwargs = {"stretch": 0.6, "Q": 5}
nrows = len(inds)
if "figsize" not in kwargs:
kwargs["figsize"] = (12, nrows * 3)
fig, axes = plt.subplots(nrows=nrows, ncols=6, **kwargs)
axes = np.atleast_2d(axes)
image_types = ["u", "g", "r", "i", "z", "gri"]
for ax, title in zip(axes[0], image_types):
ax.set_title(title)
for ind, row in zip(inds, axes):
data = self._load_image_data(ind=ind)
u, g, r, i, z = np.transpose(data, axes=(2, 0, 1))
for jj, band in enumerate((u, g, r, i, z)):
row[jj].imshow(band, origin="lower", cmap="Greys_r")
rgb = make_lupton_rgb(i, r, g, **rgb_kwargs)
row[5].imshow(rgb, origin="lower")
row[0].annotate(
f"z = {self.labels.z.iloc[ind]:.4}",
xy=(0.05, -0.15),
xycoords="axes fraction",
)
for ax in row:
ax.grid(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
path_out = path_out if path_out is not None else self.path_logs
fout = path_out if path_out.is_file() else path_out / "ugriz_gri.pdf"
if savefig:
fig.savefig(fout)
return fig
def plot_gri_recon(
self,
inds,
path_config=None,
checkpoint=None,
path_out=None,
savefig=False,
rgb_kwargs=None,
**kwargs,
):
"""
inds (list): Indices of galaxies in ``labels`` or tuples of
(page, row, column) from ``galaxies.pdf``.
path_config (str): Path to config file used to train model.
Default is None.
checkpoint (int): Checkpoint to load weights from. Default is
None.
"""
inds = [self._to_ind(ind) if isinstance(ind, tuple) else ind for ind in inds]
if rgb_kwargs is None:
rgb_kwargs = {"stretch": 0.6, "Q": 5}
config = load_config(path_config, verbose=False)
config.pop("checkpoint")
# load model
CapsNet = import_model(config["model_name"])
__, eval_model = CapsNet(**config)
nrows = len(inds)
if "figsize" not in kwargs:
kwargs["figsize"] = (6, nrows * 3)
fig, axes = plt.subplots(nrows=nrows, ncols=2, **kwargs)
axes = np.atleast_2d(axes)
image_types = ["truth", "recon"]
for ax, title in zip(axes[0], image_types):
ax.set_title(title)
for ind, row in zip(inds, axes):
data = self._load_image_data(ind=ind)
data = data[16:48, 16:48] # TODO crop to inner 32x32
from ..base.run_model import predict
y_class, y_prob, recon = predict(
model=eval_model,
images=np.expand_dims(data, axis=0),
checkpoint=checkpoint,
**config
)
u, g, r, i, z = np.transpose(data, axes=(2, 0, 1))
u_recon, g_recon, r_recon, i_recon, z_recon = np.transpose(recon[0], axes=(2, 0, 1))
rgb = make_lupton_rgb(i, r, g, **rgb_kwargs)
row[0].imshow(rgb, origin="lower")
rgb_recon = make_lupton_rgb(i_recon, r_recon, g_recon, **rgb_kwargs)
row[1].imshow(rgb_recon, origin="lower")
z_classes = (self.labels.z_class.iloc[ind] // 18, y_class[0]) # TODO for 10 bins only!
for ii, (ax, z_class) in enumerate(zip(row, z_classes)):
if ii == 0:
redshift = self.labels.z.iloc[ind]
else:
bin_edges = np.linspace(0, 0.4, 10) # TODO for 10 bins only!
redshift = np.mean([bin_edges[z_class], bin_edges[z_class + 1]])
ax.annotate(
# f"z = {redshift:.4}\n"
f"z_class = {z_class}",
xy=(0.05, -0.15),
xycoords="axes fraction",
)
for ax in row:
ax.grid(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
path_out = path_out if path_out is not None else self.path_logs
fout = path_out if path_out.is_file() else path_out / "gri_recon.pdf"
if savefig:
fig.savefig(fout)
return fig
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import numpy as np
from scipy.interpolate import griddata
def calc_psi_norm(R, Z, psi, xpt, axis_mag):
# normalize psi
# psi_interp = Rbf(R, Z, psi)
# psi_min = psi_interp(axis_mag[0], axis_mag[1])
#
# psi_shifted = psi - psi_min # set center to zero
# psi_shifted_interp = Rbf(R, Z, psi_shifted)
# psi_shifted_xpt = psi_shifted_interp(xpt[0], xpt[1])
#
# psi_norm = psi_shifted / psi_shifted_xpt
psi_min = griddata(np.column_stack((R.flatten(), Z.flatten())),
psi.flatten(),
[axis_mag[0], axis_mag[1]],
method='cubic')
psi_shifted = psi - psi_min # set center to zero
psi_shifted_xpt_l, psi_shifted_xpt_u = None, None
if xpt[0] is not None:
psi_shifted_xpt_l = griddata(np.column_stack((R.flatten(), Z.flatten())),
psi_shifted.flatten(),
[xpt[0][0], xpt[0][1]],
method='cubic')
if xpt[1] is not None:
psi_shifted_xpt_u = griddata(np.column_stack((R.flatten(), Z.flatten())),
psi_shifted.flatten(),
[xpt[1][0], xpt[1][1]],
method='cubic')
psi_shifted_xpt = [psi_shifted_xpt_l, psi_shifted_xpt_u]
if xpt[1] is None:
psi_norm = psi_shifted / np.average(psi_shifted_xpt_l)
elif xpt[0] is None:
psi_norm = psi_shifted / np.average(psi_shifted_xpt_u)
else:
psi_norm = psi_shifted / np.average(psi_shifted_xpt)
return psi_norm
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cmath
import numpy as np
import pytest
import cirq
from cirq.linalg import matrix_commutes
def test_is_diagonal():
assert cirq.is_diagonal(np.empty((0, 0)))
assert cirq.is_diagonal(np.empty((1, 0)))
assert cirq.is_diagonal(np.empty((0, 1)))
assert cirq.is_diagonal(np.array([[1]]))
assert cirq.is_diagonal(np.array([[-1]]))
assert cirq.is_diagonal(np.array([[5]]))
assert cirq.is_diagonal(np.array([[3j]]))
assert cirq.is_diagonal(np.array([[1, 0]]))
assert cirq.is_diagonal(np.array([[1], [0]]))
assert not cirq.is_diagonal(np.array([[1, 1]]))
assert not cirq.is_diagonal(np.array([[1], [1]]))
assert cirq.is_diagonal(np.array([[5j, 0], [0, 2]]))
assert cirq.is_diagonal(np.array([[1, 0], [0, 1]]))
assert not cirq.is_diagonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_diagonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_diagonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_diagonal(np.array([[1, 0.1], [0.1, 1]]))
assert cirq.is_diagonal(np.array([[1, 1e-11], [1e-10, 1]]))
def test_is_diagonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_diagonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_diagonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_diagonal(np.array([[1, 0.5], [-0.5, 1]]), atol=atol)
assert not cirq.is_diagonal(np.array([[1, 0.5], [-0.6, 1]]), atol=atol)
def test_is_hermitian():
assert cirq.is_hermitian(np.empty((0, 0)))
assert not cirq.is_hermitian(np.empty((1, 0)))
assert not cirq.is_hermitian(np.empty((0, 1)))
assert cirq.is_hermitian(np.array([[1]]))
assert cirq.is_hermitian(np.array([[-1]]))
assert cirq.is_hermitian(np.array([[5]]))
assert not cirq.is_hermitian(np.array([[3j]]))
assert not cirq.is_hermitian(np.array([[0, 0]]))
assert not cirq.is_hermitian(np.array([[0], [0]]))
assert not cirq.is_hermitian(np.array([[5j, 0], [0, 2]]))
assert cirq.is_hermitian(np.array([[5, 0], [0, 2]]))
assert cirq.is_hermitian(np.array([[1, 0], [0, 1]]))
assert not cirq.is_hermitian(np.array([[1, 0], [1, 1]]))
assert not cirq.is_hermitian(np.array([[1, 1], [0, 1]]))
assert cirq.is_hermitian(np.array([[1, 1], [1, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j], [-1j, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j], [-1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_hermitian(np.array([[1, 1j], [1j, 1]]))
assert not cirq.is_hermitian(np.array([[1, 0.1], [-0.1, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j + 1e-11], [-1j, 1 + 1j * 1e-9]]))
def test_is_hermitian_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_hermitian(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert cirq.is_hermitian(np.array([[1, 0.25], [-0.25, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0], [-0.6, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0.25], [-0.35, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_hermitian(np.array([[1, 0.5, 0.5], [0, 1, 0], [0, 0, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0.5, 0.6], [0, 1, 0], [0, 0, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0, 0.6], [0, 1, 0], [0, 0, 1]]), atol=atol)
def test_is_unitary():
assert cirq.is_unitary(np.empty((0, 0)))
assert not cirq.is_unitary(np.empty((1, 0)))
assert not cirq.is_unitary(np.empty((0, 1)))
assert cirq.is_unitary(np.array([[1]]))
assert cirq.is_unitary(np.array([[-1]]))
assert cirq.is_unitary(np.array([[1j]]))
assert not cirq.is_unitary(np.array([[5]]))
assert not cirq.is_unitary(np.array([[3j]]))
assert not cirq.is_unitary(np.array([[1, 0]]))
assert not cirq.is_unitary(np.array([[1], [0]]))
assert not cirq.is_unitary(np.array([[1, 0], [0, -2]]))
assert cirq.is_unitary(np.array([[1, 0], [0, -1]]))
assert cirq.is_unitary(np.array([[1j, 0], [0, 1]]))
assert not cirq.is_unitary(np.array([[1, 0], [1, 1]]))
assert not cirq.is_unitary(np.array([[1, 1], [0, 1]]))
assert not cirq.is_unitary(np.array([[1, 1], [1, 1]]))
assert not cirq.is_unitary(np.array([[1, -1], [1, 1]]))
assert cirq.is_unitary(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert cirq.is_unitary(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_unitary(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_unitary(np.array([[1, 1j + 1e-11], [1j, 1 + 1j * 1e-9]]) * np.sqrt(0.5))
def test_is_unitary_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_unitary(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_unitary(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_unitary(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol)
assert not cirq.is_unitary(np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]]), atol=atol)
def test_is_orthogonal():
assert cirq.is_orthogonal(np.empty((0, 0)))
assert not cirq.is_orthogonal(np.empty((1, 0)))
assert not cirq.is_orthogonal(np.empty((0, 1)))
assert cirq.is_orthogonal(np.array([[1]]))
assert cirq.is_orthogonal(np.array([[-1]]))
assert not cirq.is_orthogonal(np.array([[1j]]))
assert not cirq.is_orthogonal(np.array([[5]]))
assert not cirq.is_orthogonal(np.array([[3j]]))
assert not cirq.is_orthogonal(np.array([[1, 0]]))
assert not cirq.is_orthogonal(np.array([[1], [0]]))
assert not cirq.is_orthogonal(np.array([[1, 0], [0, -2]]))
assert cirq.is_orthogonal(np.array([[1, 0], [0, -1]]))
assert not cirq.is_orthogonal(np.array([[1j, 0], [0, 1]]))
assert not cirq.is_orthogonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_orthogonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_orthogonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_orthogonal(np.array([[1, -1], [1, 1]]))
assert cirq.is_orthogonal(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert not cirq.is_orthogonal(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_orthogonal(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_orthogonal(np.array([[1, 1e-11], [0, 1 + 1e-11]]))
def test_is_orthogonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_orthogonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_orthogonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_orthogonal(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol)
assert not cirq.is_orthogonal(np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]]), atol=atol)
def test_is_special_orthogonal():
assert cirq.is_special_orthogonal(np.empty((0, 0)))
assert not cirq.is_special_orthogonal(np.empty((1, 0)))
assert not cirq.is_special_orthogonal(np.empty((0, 1)))
assert cirq.is_special_orthogonal(np.array([[1]]))
assert not cirq.is_special_orthogonal(np.array([[-1]]))
assert not cirq.is_special_orthogonal(np.array([[1j]]))
assert not cirq.is_special_orthogonal(np.array([[5]]))
assert not cirq.is_special_orthogonal(np.array([[3j]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0]]))
assert not cirq.is_special_orthogonal(np.array([[1], [0]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0], [0, -2]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0], [0, -1]]))
assert cirq.is_special_orthogonal(np.array([[-1, 0], [0, -1]]))
assert not cirq.is_special_orthogonal(np.array([[1j, 0], [0, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, -1], [1, 1]]))
assert cirq.is_special_orthogonal(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert not cirq.is_special_orthogonal(np.array([[1, 1], [1, -1]]) * np.sqrt(0.5))
assert not cirq.is_special_orthogonal(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_special_orthogonal(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_special_orthogonal(np.array([[1, 1e-11], [0, 1 + 1e-11]]))
def test_is_special_orthogonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_special_orthogonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_special_orthogonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries, except for determinant factors.
assert cirq.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]]), atol=atol
)
assert not cirq.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol
)
assert not cirq.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]]), atol=atol
)
def test_is_special_unitary():
assert cirq.is_special_unitary(np.empty((0, 0)))
assert not cirq.is_special_unitary(np.empty((1, 0)))
assert not cirq.is_special_unitary(np.empty((0, 1)))
assert cirq.is_special_unitary(np.array([[1]]))
assert not cirq.is_special_unitary(np.array([[-1]]))
assert not cirq.is_special_unitary(np.array([[5]]))
assert not cirq.is_special_unitary(np.array([[3j]]))
assert not cirq.is_special_unitary(np.array([[1, 0], [0, -2]]))
assert not cirq.is_special_unitary(np.array([[1, 0], [0, -1]]))
assert cirq.is_special_unitary(np.array([[-1, 0], [0, -1]]))
assert not cirq.is_special_unitary(np.array([[1j, 0], [0, 1]]))
assert cirq.is_special_unitary(np.array([[1j, 0], [0, -1j]]))
assert not cirq.is_special_unitary(np.array([[1, 0], [1, 1]]))
assert not cirq.is_special_unitary(np.array([[1, 1], [0, 1]]))
assert not cirq.is_special_unitary(np.array([[1, 1], [1, 1]]))
assert not cirq.is_special_unitary(np.array([[1, -1], [1, 1]]))
assert cirq.is_special_unitary(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert cirq.is_special_unitary(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_special_unitary(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_special_unitary(np.array([[1, 1j + 1e-11], [1j, 1 + 1j * 1e-9]]) * np.sqrt(0.5))
def test_is_special_unitary_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_special_unitary(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_special_unitary(np.array([[1, 0], [-0.6, 1]]), atol=atol)
assert cirq.is_special_unitary(np.array([[1, 0], [0, 1]]) * cmath.exp(1j * 0.1), atol=atol)
assert not cirq.is_special_unitary(np.array([[1, 0], [0, 1]]) * cmath.exp(1j * 0.3), atol=atol)
# Error isn't accumulated across entries, except for determinant factors.
assert cirq.is_special_unitary(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]]), atol=atol)
assert not cirq.is_special_unitary(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol)
assert not cirq.is_special_unitary(
np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]]), atol=atol
)
def test_is_normal():
assert cirq.is_normal(np.array([[1]]))
assert cirq.is_normal(np.array([[3j]]))
assert cirq.is_normal(cirq.testing.random_density_matrix(4))
assert cirq.is_normal(cirq.testing.random_unitary(5))
assert not cirq.is_normal(np.array([[0, 1], [0, 0]]))
assert not cirq.is_normal(np.zeros((1, 0)))
def test_is_normal_tolerance():
atol = 0.25
# Pays attention to specified tolerance.
assert cirq.is_normal(np.array([[0, 0.5], [0, 0]]), atol=atol)
assert not cirq.is_normal(np.array([[0, 0.6], [0, 0]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_normal(np.array([[0, 0.5, 0], [0, 0, 0.5], [0, 0, 0]]), atol=atol)
assert not cirq.is_normal(np.array([[0, 0.5, 0], [0, 0, 0.6], [0, 0, 0]]), atol=atol)
def test_commutes():
assert matrix_commutes(np.empty((0, 0)), np.empty((0, 0)))
assert not matrix_commutes(np.empty((1, 0)), np.empty((0, 1)))
assert not matrix_commutes(np.empty((0, 1)), np.empty((1, 0)))
assert not matrix_commutes(np.empty((1, 0)), np.empty((1, 0)))
assert not matrix_commutes(np.empty((0, 1)), np.empty((0, 1)))
assert matrix_commutes(np.array([[1]]), np.array([[2]]))
assert matrix_commutes(np.array([[1]]), np.array([[0]]))
x = np.array([[0, 1], [1, 0]])
y = np.array([[0, -1j], [1j, 0]])
z = np.array([[1, 0], [0, -1]])
xx = np.kron(x, x)
zz = np.kron(z, z)
assert matrix_commutes(x, x)
assert matrix_commutes(y, y)
assert matrix_commutes(z, z)
assert not matrix_commutes(x, y)
assert not matrix_commutes(x, z)
assert not matrix_commutes(y, z)
assert matrix_commutes(xx, zz)
assert matrix_commutes(xx, np.diag([1, -1, -1, 1 + 1e-9]))
def test_commutes_tolerance():
atol = 0.5
x = np.array([[0, 1], [1, 0]])
z = np.array([[1, 0], [0, -1]])
# Pays attention to specified tolerance.
assert matrix_commutes(x, x + z * 0.1, atol=atol)
assert not matrix_commutes(x, x + z * 0.5, atol=atol)
def test_allclose_up_to_global_phase():
assert cirq.allclose_up_to_global_phase(np.array([1]), np.array([1j]))
assert not cirq.allclose_up_to_global_phase(np.array([[[1]]]), np.array([1]))
assert cirq.allclose_up_to_global_phase(np.array([[1]]), np.array([[1]]))
assert cirq.allclose_up_to_global_phase(np.array([[1]]), np.array([[-1]]))
assert cirq.allclose_up_to_global_phase(np.array([[0]]), np.array([[0]]))
assert cirq.allclose_up_to_global_phase(np.array([[1, 2]]), np.array([[1j, 2j]]))
assert cirq.allclose_up_to_global_phase(np.array([[1, 2.0000000001]]), np.array([[1j, 2j]]))
assert not cirq.allclose_up_to_global_phase(np.array([[1]]), np.array([[1, 0]]))
assert not cirq.allclose_up_to_global_phase(np.array([[1]]), np.array([[2]]))
assert not cirq.allclose_up_to_global_phase(np.array([[1]]), np.array([[2]]))
def test_binary_sub_tensor_slice():
a = slice(None)
e = Ellipsis
assert cirq.slice_for_qubits_equal_to([], 0) == (e,)
assert cirq.slice_for_qubits_equal_to([0], 0b0) == (0, e)
assert cirq.slice_for_qubits_equal_to([0], 0b1) == (1, e)
assert cirq.slice_for_qubits_equal_to([1], 0b0) == (a, 0, e)
assert cirq.slice_for_qubits_equal_to([1], 0b1) == (a, 1, e)
assert cirq.slice_for_qubits_equal_to([2], 0b0) == (a, a, 0, e)
assert cirq.slice_for_qubits_equal_to([2], 0b1) == (a, a, 1, e)
assert cirq.slice_for_qubits_equal_to([0, 1], 0b00) == (0, 0, e)
assert cirq.slice_for_qubits_equal_to([1, 2], 0b00) == (a, 0, 0, e)
assert cirq.slice_for_qubits_equal_to([1, 3], 0b00) == (a, 0, a, 0, e)
assert cirq.slice_for_qubits_equal_to([1, 3], 0b10) == (a, 0, a, 1, e)
assert cirq.slice_for_qubits_equal_to([3, 1], 0b10) == (a, 1, a, 0, e)
assert cirq.slice_for_qubits_equal_to([2, 1, 0], 0b001) == (0, 0, 1, e)
assert cirq.slice_for_qubits_equal_to([2, 1, 0], 0b010) == (0, 1, 0, e)
assert cirq.slice_for_qubits_equal_to([2, 1, 0], 0b100) == (1, 0, 0, e)
assert cirq.slice_for_qubits_equal_to([0, 1, 2], 0b101) == (1, 0, 1, e)
assert cirq.slice_for_qubits_equal_to([0, 2, 1], 0b101) == (1, 1, 0, e)
m = np.array([0] * 16).reshape((2, 2, 2, 2))
for k in range(16):
m[cirq.slice_for_qubits_equal_to([3, 2, 1, 0], k)] = k
assert list(m.reshape(16)) == list(range(16))
assert cirq.slice_for_qubits_equal_to([0], 0b1, num_qubits=1) == (1,)
assert cirq.slice_for_qubits_equal_to([1], 0b0, num_qubits=2) == (a, 0)
assert cirq.slice_for_qubits_equal_to([1], 0b0, num_qubits=3) == (a, 0, a)
assert cirq.slice_for_qubits_equal_to([2], 0b0, num_qubits=3) == (a, a, 0)
def test_binary_sub_tensor_slice_big_endian():
a = slice(None)
e = Ellipsis
sfqet = cirq.slice_for_qubits_equal_to
assert sfqet([], big_endian_qureg_value=0) == (e,)
assert sfqet([0], big_endian_qureg_value=0b0) == (0, e)
assert sfqet([0], big_endian_qureg_value=0b1) == (1, e)
assert sfqet([1], big_endian_qureg_value=0b0) == (a, 0, e)
assert sfqet([1], big_endian_qureg_value=0b1) == (a, 1, e)
assert sfqet([2], big_endian_qureg_value=0b0) == (a, a, 0, e)
assert sfqet([2], big_endian_qureg_value=0b1) == (a, a, 1, e)
assert sfqet([0, 1], big_endian_qureg_value=0b00) == (0, 0, e)
assert sfqet([1, 2], big_endian_qureg_value=0b00) == (a, 0, 0, e)
assert sfqet([1, 3], big_endian_qureg_value=0b00) == (a, 0, a, 0, e)
assert sfqet([1, 3], big_endian_qureg_value=0b01) == (a, 0, a, 1, e)
assert sfqet([3, 1], big_endian_qureg_value=0b01) == (a, 1, a, 0, e)
assert sfqet([2, 1, 0], big_endian_qureg_value=0b100) == (0, 0, 1, e)
assert sfqet([2, 1, 0], big_endian_qureg_value=0b010) == (0, 1, 0, e)
assert sfqet([2, 1, 0], big_endian_qureg_value=0b001) == (1, 0, 0, e)
assert sfqet([0, 1, 2], big_endian_qureg_value=0b101) == (1, 0, 1, e)
assert sfqet([0, 2, 1], big_endian_qureg_value=0b101) == (1, 1, 0, e)
m = np.array([0] * 16).reshape((2, 2, 2, 2))
for k in range(16):
m[sfqet([0, 1, 2, 3], big_endian_qureg_value=k)] = k
assert list(m.reshape(16)) == list(range(16))
assert sfqet([0], big_endian_qureg_value=0b1, num_qubits=1) == (1,)
assert sfqet([1], big_endian_qureg_value=0b0, num_qubits=2) == (a, 0)
assert sfqet([1], big_endian_qureg_value=0b0, num_qubits=3) == (a, 0, a)
assert sfqet([2], big_endian_qureg_value=0b0, num_qubits=3) == (a, a, 0)
def test_qudit_sub_tensor_slice():
a = slice(None)
sfqet = cirq.slice_for_qubits_equal_to
assert sfqet([], 0, qid_shape=()) == ()
assert sfqet([0], 0, qid_shape=(3,)) == (0,)
assert sfqet([0], 1, qid_shape=(3,)) == (1,)
assert sfqet([0], 2, qid_shape=(3,)) == (2,)
assert sfqet([2], 0, qid_shape=(1, 2, 3)) == (a, a, 0)
assert sfqet([2], 2, qid_shape=(1, 2, 3)) == (a, a, 2)
assert sfqet([2], big_endian_qureg_value=2, qid_shape=(1, 2, 3)) == (a, a, 2)
assert sfqet([1, 3], 3 * 2 + 1, qid_shape=(2, 3, 4, 5)) == (a, 1, a, 2)
assert sfqet([3, 1], 5 * 2 + 1, qid_shape=(2, 3, 4, 5)) == (a, 2, a, 1)
assert sfqet([2, 1, 0], 9 * 2 + 3 * 1, qid_shape=(3,) * 3) == (2, 1, 0)
assert sfqet([1, 3], big_endian_qureg_value=5 * 1 + 2, qid_shape=(2, 3, 4, 5)) == (a, 1, a, 2)
assert sfqet([3, 1], big_endian_qureg_value=3 * 1 + 2, qid_shape=(2, 3, 4, 5)) == (a, 2, a, 1)
m = np.array([0] * 24).reshape((1, 2, 3, 4))
for k in range(24):
m[sfqet([3, 2, 1, 0], k, qid_shape=(1, 2, 3, 4))] = k
assert list(m.reshape(24)) == list(range(24))
assert sfqet([0], 1, num_qubits=1, qid_shape=(3,)) == (1,)
assert sfqet([1], 0, num_qubits=3, qid_shape=(3, 3, 3)) == (a, 0, a)
with pytest.raises(ValueError, match='len.* !='):
sfqet([], num_qubits=2, qid_shape=(1, 2, 3))
with pytest.raises(ValueError, match='exactly one'):
sfqet([0, 1, 2], 0b101, big_endian_qureg_value=0b101)
|
# pylint: disable=unused-import
"""Ancestry display module constants."""
from app.analysis_results.constants import ANCESTRY_NAME as MODULE_NAME
from app.tool_results.ancestry.constants import MODULE_NAME as TOOL_MODULE_NAME
|
import spotlight
import requests
SPOTLIGHT_URI = "https://api.dbpedia-spotlight.org"
def spotlight_lookup(x, lang='en', conf=0.01):
url = '{}/{}/annotate'.format(SPOTLIGHT_URI, lang)
try:
results = spotlight.annotate(url, x)
matches = []
for result in results:
result = result['URI'].replace('de.', '').replace('pt.', '')
result = 'http://' + result.split('://')[1]
resp = requests.get(result, headers={'Connection': 'close'})
result = resp.url.replace('/page/', '/resource/')
matches.append(result)
return result
except Exception as e:
# warnings.warn('[SPOTLIGHT] Something went wrong with request to '
# '{}. Returning nothing...'.format(url))
print(e)
return []
|
import multiprocessing
import numpy as np
import os
import pickle
import sys
import time
from FDApy.representation.functional_data import MultivariateFunctionalData
from FDApy.clustering.fcubt import Node, FCUBT
from joblib import Parallel, delayed
from sklearn.metrics import adjusted_rand_score
NUM_CORES = multiprocessing.cpu_count()
def analyze_data(idx):
print(f'Simulation {idx}')
with open(f'./data/scenario_4_{idx}.pkl', 'rb') as f:
data_fd = pickle.load(f)
with open(f'./data/labels.pkl', 'rb') as f:
labels = pickle.load(f)
start = time.time()
root_node = Node(data_fd, is_root=True)
fcubt = FCUBT(root_node=root_node)
fcubt.grow(n_components=[0.95, 2])
fcubt.join(n_components=[0.95, 2])
comp = time.time() - start
return {'n_clusters': len(np.unique(fcubt.labels_join)),
'ARI': adjusted_rand_score(labels, fcubt.labels_join)}
def main():
inputs = range(100)
start = time.time()
results = Parallel(n_jobs=NUM_CORES)(delayed(analyze_data)(i) for i in inputs)
print(f'{time.time() - start}')
file = open("./results/results_fcubt_comptime.pkl", "wb")
pickle.dump(results, file)
file.close()
if __name__ == '__main__':
main()
|
from rest_framework import serializers
from rdmo.conditions.models import Condition
from ..models import OptionSet, Option
from ..validators import OptionSetUniqueKeyValidator, OptionUniquePathValidator
class OptionSetIndexOptionsSerializer(serializers.ModelSerializer):
class Meta:
model = Option
fields = (
'id',
'path',
'text'
)
class OptionSetIndexSerializer(serializers.ModelSerializer):
options = OptionSetIndexOptionsSerializer(many=True)
class Meta:
model = OptionSet
fields = (
'id',
'key',
'options'
)
class OptionSetSerializer(serializers.ModelSerializer):
key = serializers.CharField(required=True)
class Meta:
model = OptionSet
fields = (
'id',
'uri_prefix',
'key',
'comment',
'order',
'conditions'
)
validators = (OptionSetUniqueKeyValidator(),)
class OptionSerializer(serializers.ModelSerializer):
key = serializers.CharField(required=True)
optionset = serializers.PrimaryKeyRelatedField(queryset=OptionSet.objects.all(), required=True)
class Meta:
model = Option
fields = (
'id',
'optionset',
'uri_prefix',
'key',
'comment',
'order',
'text_en',
'text_de',
'additional_input'
)
validators = (OptionUniquePathValidator(),)
class ConditionSerializer(serializers.ModelSerializer):
class Meta:
model = Condition
fields = (
'id',
'key'
)
|
'''初始化'''
from .food import Apple
from .snake import Snake
from .endInterface import endInterface
from .utils import drawGameGrid, showScore
|
# Problem: https://www.hackerrank.com/challenges/python-division/problem
# Score: 10
a, b = int(input()), int(input())
print(a // b, a / b, sep='\n')
|
import unittest
from unittest.mock import patch
from mongoengine import connect, disconnect
from spaceone.core.unittest.result import print_data
from spaceone.core.unittest.runner import RichTestRunner
from spaceone.core import config
from spaceone.core import utils
from spaceone.core.model.mongo_model import MongoModel
from spaceone.core.transaction import Transaction
from spaceone.notification.service.user_channel_service import UserChannelService
from spaceone.notification.model.user_channel_model import UserChannel
from spaceone.notification.connector.secret_connector import SecretConnector
from spaceone.notification.connector.identity_connector import IdentityConnector
from spaceone.notification.info.user_channel_info import *
from spaceone.notification.info.common_info import StatisticsInfo
from test.factory.protocol_factory import ProtocolFactory
from test.factory.user_channel_factory import UserChannelFactory
class TestUserChannelService(unittest.TestCase):
@classmethod
def setUpClass(cls):
config.init_conf(package='spaceone.notification')
connect('test', host='mongomock://localhost')
cls.domain_id = utils.generate_id('domain')
cls.transaction = Transaction({
'service': 'notificaiton',
'api_class': 'UserChannel'
})
super().setUpClass()
@classmethod
def tearDownClass(cls) -> None:
super().tearDownClass()
disconnect()
@patch.object(MongoModel, 'connect', return_value=None)
def tearDown(self, *args) -> None:
print()
print('(tearDown) ==> Delete all User Channel')
user_channel_vos = UserChannel.objects.filter()
user_channel_vos.delete()
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, 'get_user', return_value={'user_id': 'bluese05', 'name': 'JH Song'})
@patch.object(MongoModel, 'connect', return_value=None)
def test_create_user_channel(self, *args):
protocol_vo = ProtocolFactory(domain_id=self.domain_id)
protocol_id = protocol_vo.protocol_id
params = {
'name': 'Test User Channel',
'protocol_id': protocol_id,
'schema': 'slack_webhook',
'user_id': 'bluese05',
'data': {
'token': 'xxxxxx',
'channel': 'bob'
},
'is_scheduled': True,
'schedule': {
'day_of_week': ['MON'],
'start_hour': 1,
'end_hour': 10
},
'tags': {
utils.random_string(): utils.random_string()
},
'domain_id': self.domain_id
}
self.transaction.method = 'create'
user_ch_svc = UserChannelService(transaction=self.transaction)
user_ch_vo = user_ch_svc.create(params.copy())
print_data(user_ch_vo.to_dict(), 'test_create_project_channel')
UserChannelInfo(user_ch_vo)
self.assertIsInstance(user_ch_vo, UserChannel)
self.assertEqual(params['name'], user_ch_vo.name)
self.assertEqual(True, user_ch_vo.is_scheduled)
self.assertEqual(False, user_ch_vo.is_subscribe)
self.assertEqual(None, user_ch_vo.secret_id)
self.assertEqual(params['schedule']['day_of_week'], user_ch_vo.schedule.day_of_week)
self.assertEqual(params['schedule']['start_hour'], user_ch_vo.schedule.start_hour)
self.assertEqual(params['tags'], user_ch_vo.tags)
self.assertEqual(params['domain_id'], user_ch_vo.domain_id)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, 'get_user', return_value={'user_id': 'bluese05', 'name': 'JH Song'})
@patch.object(SecretConnector, 'create_secret', return_value={'secret_id': 'secret-xyz', 'name': 'Secret'})
@patch.object(SecretConnector, 'update_secret', return_value={'secret_id': 'secret-xyz', 'name': 'Update Secret'})
@patch.object(MongoModel, 'connect', return_value=None)
def test_create_user_channel_secret(self, *args):
protocol_capability = {
'data_type': 'SECRET',
'supported_schema': ['slack_webhook']
}
protocol_vo = ProtocolFactory(domain_id=self.domain_id, capability=protocol_capability)
protocol_id = protocol_vo.protocol_id
params = {
'name': 'Test User Channel',
'protocol_id': protocol_id,
'schema': 'slack_webhook',
'user_id': 'bluese05',
'data': {
'token': 'xxxxxx',
'channel': 'bob'
},
'is_scheduled': True,
'schedule': {
'day_of_week': ['MON'],
'start_hour': 1,
'end_hour': 10
},
'tags': {
utils.random_string(): utils.random_string()
},
'domain_id': self.domain_id
}
self.transaction.method = 'create'
user_ch_svc = UserChannelService(transaction=self.transaction)
user_ch_vo = user_ch_svc.create(params.copy())
print_data(user_ch_vo.to_dict(), 'test_create_project_channel')
UserChannelInfo(user_ch_vo)
self.assertIsInstance(user_ch_vo, UserChannel)
self.assertEqual(params['name'], user_ch_vo.name)
self.assertEqual(True, user_ch_vo.is_scheduled)
self.assertEqual(False, user_ch_vo.is_subscribe)
self.assertEqual('secret-xyz', user_ch_vo.secret_id)
self.assertEqual({}, user_ch_vo.data)
self.assertEqual(params['schedule']['day_of_week'], user_ch_vo.schedule.day_of_week)
self.assertEqual(params['schedule']['start_hour'], user_ch_vo.schedule.start_hour)
self.assertEqual(params['tags'], user_ch_vo.tags)
self.assertEqual(params['domain_id'], user_ch_vo.domain_id)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(MongoModel, 'connect', return_value=None)
def test_update_user_channel(self, *args):
user_channel_vo = UserChannelFactory(domain_id=self.domain_id)
name = 'Update User Channel'
data = {
'token': 'update-token',
'channel': 'update-channel'
}
params = {
'name': name,
'user_channel_id': user_channel_vo.user_channel_id,
'data': data,
'tags': {
utils.random_string(): utils.random_string()
},
'domain_id': self.domain_id
}
self.transaction.method = 'update'
user_ch_svc = UserChannelService(transaction=self.transaction)
update_user_ch_vo = user_ch_svc.update(params.copy())
print_data(update_user_ch_vo.to_dict(), 'test_update_project_channel')
UserChannelInfo(update_user_ch_vo)
self.assertIsInstance(update_user_ch_vo, UserChannel)
self.assertEqual(name, update_user_ch_vo.name)
self.assertEqual(data, update_user_ch_vo.data)
self.assertEqual(params['tags'], update_user_ch_vo.tags)
self.assertEqual(params['domain_id'], update_user_ch_vo.domain_id)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(SecretConnector, 'update_secret_data')
@patch.object(MongoModel, 'connect', return_value=None)
def test_update_user_channel_secret(self, *args):
user_channel_vo = UserChannelFactory(domain_id=self.domain_id, secret_id='secret-xyz')
name = 'Update User Channel'
data = {
'token': 'update-token',
'channel': 'update-channel'
}
params = {
'name': name,
'user_channel_id': user_channel_vo.user_channel_id,
'data': data,
'tags': {
utils.random_string(): utils.random_string()
},
'domain_id': self.domain_id
}
self.transaction.method = 'update'
user_ch_svc = UserChannelService(transaction=self.transaction)
update_user_ch_vo = user_ch_svc.update(params.copy())
print_data(update_user_ch_vo.to_dict(), 'test_update_user_channel_secret')
UserChannelInfo(update_user_ch_vo)
self.assertIsInstance(update_user_ch_vo, UserChannel)
self.assertEqual(name, update_user_ch_vo.name)
self.assertEqual('secret-xyz', update_user_ch_vo.secret_id)
self.assertEqual({}, update_user_ch_vo.data)
self.assertEqual(params['tags'], update_user_ch_vo.tags)
self.assertEqual(params['domain_id'], update_user_ch_vo.domain_id)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(MongoModel, 'connect', return_value=None)
def test_enable_user_channel(self, *args):
user_channel_vo = UserChannelFactory(domain_id=self.domain_id, state='DISABLED')
params = {
'user_channel_id': user_channel_vo.user_channel_id,
'domain_id': self.domain_id
}
self.transaction.method = 'enable'
user_channel_svc = UserChannelService(transaction=self.transaction)
updated_user_channel_vo = user_channel_svc.enable(params.copy())
print_data(updated_user_channel_vo.to_dict(), 'test_enable_user_channel')
UserChannelInfo(updated_user_channel_vo)
self.assertIsInstance(updated_user_channel_vo, UserChannel)
self.assertEqual(updated_user_channel_vo.user_channel_id, user_channel_vo.user_channel_id)
self.assertEqual('ENABLED', updated_user_channel_vo.state)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(MongoModel, 'connect', return_value=None)
def test_disable_user_channel(self, *args):
user_channel_vo = UserChannelFactory(domain_id=self.domain_id, state='ENABLED')
params = {
'user_channel_id': user_channel_vo.user_channel_id,
'domain_id': self.domain_id
}
self.transaction.method = 'disable'
user_channel_svc = UserChannelService(transaction=self.transaction)
updated_user_channel_vo = user_channel_svc.disable(params.copy())
print_data(updated_user_channel_vo.to_dict(), 'test_disable_project_channel')
UserChannelInfo(updated_user_channel_vo)
self.assertIsInstance(updated_user_channel_vo, UserChannel)
self.assertEqual(updated_user_channel_vo.user_channel_id, user_channel_vo.user_channel_id)
self.assertEqual('DISABLED', updated_user_channel_vo.state)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(MongoModel, 'connect', return_value=None)
def test_delete_user_channel(self, *args):
user_channel_vo = UserChannelFactory(domain_id=self.domain_id)
params = {
'user_channel_id': user_channel_vo.user_channel_id,
'domain_id': self.domain_id
}
self.transaction.method = 'delete'
user_channel_svc = UserChannelService(transaction=self.transaction)
result = user_channel_svc.delete(params)
self.assertIsNone(result)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(SecretConnector, 'delete_secret')
@patch.object(MongoModel, 'connect', return_value=None)
def test_delete_user_channel_secret(self, *args):
user_channel_vo = UserChannelFactory(domain_id=self.domain_id, secret_id='secret-abcde')
params = {
'user_channel_id': user_channel_vo.user_channel_id,
'domain_id': self.domain_id
}
self.transaction.method = 'delete'
user_channel_svc = UserChannelService(transaction=self.transaction)
result = user_channel_svc.delete(params)
self.assertIsNone(result)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(MongoModel, 'connect', return_value=None)
def test_get_user_channel(self, *args):
user_channel_vo = UserChannelFactory(domain_id=self.domain_id)
params = {
'user_channel_id': user_channel_vo.user_channel_id,
'domain_id': self.domain_id
}
self.transaction.method = 'get'
user_channel_svc = UserChannelService(transaction=self.transaction)
get_user_channel_vo = user_channel_svc.get(params)
print_data(get_user_channel_vo.to_dict(), 'test_get_user_channel')
UserChannelInfo(get_user_channel_vo)
self.assertIsInstance(get_user_channel_vo, UserChannel)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(MongoModel, 'connect', return_value=None)
def test_list_user_channels_by_user_channel_id(self, *args):
user_channel_vos = UserChannelFactory.build_batch(10, domain_id=self.domain_id)
list(map(lambda vo: vo.save(), user_channel_vos))
params = {
'user_channel_id': user_channel_vos[0].user_channel_id,
'domain_id': self.domain_id
}
self.transaction.method = 'list'
user_channel_svc = UserChannelService(transaction=self.transaction)
user_channel_svc, total_count = user_channel_svc.list(params)
UserChannelsInfo(user_channel_svc, total_count)
self.assertEqual(len(user_channel_svc), 1)
self.assertIsInstance(user_channel_svc[0], UserChannel)
self.assertEqual(total_count, 1)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(MongoModel, 'connect', return_value=None)
def test_list_user_channels_by_name(self, *args):
user_channel_vos = UserChannelFactory.build_batch(10, domain_id=self.domain_id)
list(map(lambda vo: vo.save(), user_channel_vos))
params = {
'name': user_channel_vos[0].name,
'domain_id': self.domain_id
}
self.transaction.method = 'list'
user_channel_svc = UserChannelService(transaction=self.transaction)
user_channel_vos, total_count = user_channel_svc.list(params)
UserChannelsInfo(user_channel_vos, total_count)
self.assertEqual(len(user_channel_vos), 1)
self.assertIsInstance(user_channel_vos[0], UserChannel)
self.assertEqual(total_count, 1)
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(MongoModel, 'connect', return_value=None)
def test_stat_user_channel(self, *args):
user_channel_vos = UserChannelFactory.build_batch(10, domain_id=self.domain_id)
list(map(lambda vo: vo.save(), user_channel_vos))
params = {
'domain_id': self.domain_id,
'query': {
'aggregate': [{
'group': {
'keys': [{
'key': 'user_channel_id',
'name': 'Id'
}],
'fields': [{
'operator': 'count',
'name': 'Count'
}]
}
}, {
'sort': {
'key': 'Count',
'desc': True
}
}]
}
}
self.transaction.method = 'stat'
user_channel_svc = UserChannelService(transaction=self.transaction)
values = user_channel_svc.stat(params)
StatisticsInfo(values)
print_data(values, 'test_stat_user_channel')
@patch.object(SecretConnector, '__init__', return_value=None)
@patch.object(IdentityConnector, '__init__', return_value=None)
@patch.object(MongoModel, 'connect', return_value=None)
def test_stat_user_channel_distinct(self, *args):
user_channel_vos = UserChannelFactory.build_batch(10, domain_id=self.domain_id)
list(map(lambda vo: vo.save(), user_channel_vos))
params = {
'domain_id': self.domain_id,
'query': {
'distinct': 'user_channel_id',
'page': {
'start': 2,
'limit': 3
}
}
}
self.transaction.method = 'stat'
user_channel_svc = UserChannelService(transaction=self.transaction)
values = user_channel_svc.stat(params)
StatisticsInfo(values)
print_data(values, 'test_stat_user_channel_distinct')
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner)
|
from pioneer.das.api.sensors.sensor import Sensor
from pioneer.das.api.sensors.pixell import Pixell
from pioneer.das.api.sensors.motor_lidar import MotorLidar
from pioneer.das.api.sensors.camera import Camera
from pioneer.das.api.sensors.imu_sbg_ekinox import ImuSbgEkinox
from pioneer.das.api.sensors.encoder import Encoder
from pioneer.das.api.sensors.carla_imu import CarlaIMU
from pioneer.das.api.sensors.radar_ti import RadarTI
from pioneer.das.api.sensors.radar_conti import RadarConti
from pioneer.das.api.sensors.lcax import LCAx
from pioneer.das.api.sensors.lca3 import LCA3
SENSOR_FACTORY = {
'lca2': LCAx,
'pixell': Pixell,
'lca3': LCA3,
'eagle': LCA3,
'flir': Camera,
'camera': Camera,
'sbgekinox': ImuSbgEkinox,
'vlp16': MotorLidar,
'ouster64': MotorLidar,
'peakcan': Sensor,
'radarTI': RadarTI,
'radarConti': RadarConti,
'webcam': Camera,
'encoder': Encoder,
'carlaimu': CarlaIMU,
'leddar': LCAx,
'lidar': MotorLidar,
'any': Sensor,
'manual': Sensor,
}
|
from sqlalchemy import create_engine, Column, Table, ForeignKey, MetaData
from sqlalchemy.orm import relationship # , foreign
from sqlalchemy.ext.declarative import declarative_base # DeclarativeBase
from sqlalchemy import (
Integer, String, Date, DateTime, Float, Boolean, Text)
from scrapy.utils.project import get_project_settings
# from sqlalchemy.pool import StaticPool
# from sqlalchemy.ext.mutable import MutableList
# from sqlalchemy import PickleType
Base = declarative_base()
# CONNECTION_STRING = 'sqlite:///TickerScrape.db'
def db_connect():
"""
Performs database connection using database settings from settings.py.
Returns sqlalchemy engine instance
"""
return create_engine(get_project_settings().get("CONNECTION_STRING"),
connect_args={'check_same_thread': False},)
# poolclass=StaticPool) # , echo=True)
# return create_engine(get_project_settings().get("CONNECTION_STRING"), connect_args={'check_same_thread': False})
def create_table(engine):
Base.metadata.create_all(engine)
# def create_output_table(engine, spider_name):
# # Create table with the spider_name
# DeclarativeBase.metadata.create_all(engine)
# Association Table for Many-to-Many relationship between Security and Country
countries_association = Table('countries_association', Base.metadata,
Column('security_id', Integer, ForeignKey(
'security.id'), primary_key=True),
Column('country_id', Integer, ForeignKey(
'country.id'), primary_key=True)
)
# Association Table for Many-to-Many relationship between Security and Sector
industries_association = Table('industries_association', Base.metadata,
Column('security_id', Integer, ForeignKey(
'security.id'), primary_key=True),
Column('industry_id', Integer, ForeignKey(
'industry.id'), primary_key=True)
)
# Association Table for Many-to-Many relationship between Security and Exchange
exchanges_association = Table('exchanges_association', Base.metadata,
Column('security_id', Integer, ForeignKey(
'security.id'), primary_key=True),
Column('exchange_id', Integer, ForeignKey(
'exchange.id'), primary_key=True)
)
# Association Table for Many-to-Many relationship between Countries and Currencies
currencies_association = Table('currencies_association', Base.metadata,
Column('country_id', Integer, ForeignKey(
'country.id'), primary_key=True),
Column('currency_id', Integer, ForeignKey(
'currency.id'), primary_key=True)
)
# Association Table for Many-to-Many relationship between Security and Tag
tags_association = Table('tags_association', Base.metadata,
Column('security_id', Integer, ForeignKey(
'security.id'), primary_key=True),
Column('tag_id', Integer, ForeignKey(
'tag.id'), primary_key=True)
)
class Security(Base):
__tablename__ = "security"
id = Column(Integer, primary_key=True)
ticker = Column('ticker', Text(), nullable=False)
name = Column('name', Text())
beta = Column('beta', Float, default=None)
market_cap = Column('market_cap', Float, default=None)
pe_ratio = Column('pe_ratio', Float, default=None)
short_int = Column('short_int', Float, default=None)
price_to_sales = Column('price_to_sales', Float, default=None)
price_to_book = Column('price_to_book', Float, default=None)
price_to_fcf = Column('price_to_fcf', Float, default=None)
net_margin = Column('net_margin', Float, default=None)
roc = Column('roc', Float, default=None)
roi = Column('roi', Float, default=None)
debt_to_equity = Column('debt_to_equity', Float, default=None)
debt_to_assets = Column('debt_to_assets', Float, default=None)
current_ratio = Column('current_ratio', Float, default=None)
quick_ratio = Column('quick_ratio', Float, default=None)
cash_ratio = Column('cash_ratio', Float, default=None)
summary = Column('summary', Text(), default=None)
average_rec = Column('average_rec', Text(), default=None)
no_of_ratings = Column('no_of_ratings', Integer, default=None)
high_target = Column('high_target', Float, default=None)
median_target = Column('median_target', Float, default=None)
low_target = Column('low_target', Float, default=None)
avg_target = Column('avg_target', Float, default=None)
inception_date = Column('inception_date', DateTime, default=None)
# Many securities to one asset class
asset_class_id = Column(Integer, ForeignKey('asset_class.id'))
countries = relationship('Country', secondary='countries_association',
lazy='dynamic', backref="security", overlaps="country,securities") # M-to-M for securities and countries
industries = relationship('Industry', secondary='industries_association',
lazy='dynamic', backref="security", overlaps="security,industries") # M-to-M for security and industry
exchanges = relationship('Exchange', secondary='exchanges_association',
lazy='dynamic', backref="security", overlaps="security,exchanges") # M-to-M for security and exchange
tags = relationship('Tag', secondary='tags_association',
lazy='dynamic', backref="security", overlaps="security,tags") # M-to-M for security and tag
# 1-to-1 for security and signal
# signal = relationship(
# 'Signal', back_populates='security', uselist=False)
# def __repr__(self):
# return "<{0} Id: {1} - Ticker: {2}, Name: {3}, Industry: {4} Beta: {5} Analyst Rec: {6} Mkt. Cap: {7}>".format(self.__class__name, self.id,
# self.ticker, self.name, self.industry, self.beta, self.average_rec, self.market_cap)
class AssetClass(Base):
__tablename__ = "asset_class"
id = Column(Integer, primary_key=True)
name = Column('name', String(160), unique=True)
securities = relationship('Security', backref='asset_class', lazy='dynamic')
def __repr__(self):
# return "<{0} Id: {1} - Name: {2}>".format(self.__class__name, self.id,
# self.name)
return "<{0} Id: {1} - Name: {2}>".format(self.__tablename__, self.id,
self.name)
class Country(Base):
__tablename__ = "country"
id = Column(Integer, primary_key=True)
name = Column('name', String(32)) # , nullable=False)
ISO_3166 = Column('ISO_3166', String(8)) # , nullable=False)
continent = Column('continent', String(16), default=None)
region = Column('region', String(8), default=None)
econ_group = Column('econ_group', String(32), default=None)
geopol_group = Column('geopol_group', String(256), default=None)
# geopol_group = Column('geopol_group', MutableList.as_mutable(PickleType),
# default=[])
territory = Column('territory', Boolean, default=False)
territory_of = Column('territory_of', String(160), default=False)
cb_on_rate = Column('cb_on_rate', Float, default=None)
last_rate_change = Column('last_rate_change', DateTime, default=None)
last_m_infl = Column('last_m_infl', Float, default=None)
flag_url = Column('flag_url', Text(), default=None)
gdp = Column('gdp', Float, default=None)
gnp = Column('gnp', Float, default=None)
# currency_id = Column(Integer, ForeignKey('currency.id'))
currencies = relationship('Currency', secondary='currencies_association',
lazy='dynamic', backref="country", overlaps="currency,countries")
securities = relationship('Security', secondary='countries_association',
lazy='dynamic', backref="country", overlaps="country,securities")
# def __repr__(self):
# return "<{0} Id: {1} - Name: {2}, Continent: {3}, Region: {4}, Geopol. Group: {5}>".format(self.__class__name, self.id,
# self.name, self.continent, self.region, self.geo_region)
class Currency(Base):
__tablename__ = "currency"
id = Column(Integer, primary_key=True)
name = Column('name', String(160), unique=True)
ticker = Column('ticker', String(32), unique=True, default=None)
fx_symbol = Column('fx_symbol', String(16), default=None)
ISO_4217 = Column('ISO_4217', Integer, default=None)
minor_unit = Column('minor_unit', Integer, default=None)
fund = Column('fund', Boolean, default=False)
description = Column('description', Text(), default=None)
countries = relationship('Country', secondary='currencies_association',
lazy='dynamic', backref="currency", overlaps="currency,countries")
# exchanges = relationship('Exchange', backref='currency', lazy='dynamic')
def __repr__(self):
# return "<{0} Id: {1} - Name: {2}, Ticker: {3}, Minor Unit {4}, Fund {5}>".format(self.__class__name, self.id,
# self.name, self.ticker, self.minor_unit, self.fund)
return "<{0} Id: {1} - Name: {2}>".format(self.__tablename__, self.id,
self.name)
class Industry(Base):
__tablename__ = "industry"
id = Column(Integer, primary_key=True)
NAICS_code = Column('NAICS_code', Integer, default=None)
name = Column('name', String(64), unique=True, default=None)
sector = Column('sector', Text(), default=None)
NAICS_sector_code = Column('NAICS_sector_code', Integer, default=None)
NAICS_desc_code = Column('NAICS_desc_code', Integer, default=None)
Description = Column('Description', Text(), default=None)
beta = Column('beta', Float, default=None)
market_cap = Column('market_cap', Float, default=None)
pe_ratio = Column('pe_ratio', Float, default=None)
price_to_sales = Column('price_to_sales', Float, default=None)
price_to_book = Column('price_to_book', Float, default=None)
price_to_fcf = Column('price_to_fcf', Float, default=None)
net_margin = Column('net_margin', Float, default=None)
roc = Column('roc', Float, default=None)
roi = Column('roi', Float, default=None)
debt_to_equity = Column('debt_to_equity', Float, default=None)
debt_to_assets = Column('debt_to_assets', Float, default=None)
current_ratio = Column('current_ratio', Float, default=None)
quick_ratio = Column('quick_ratio', Float, default=None)
cash_ratio = Column('cash_ratio', Float, default=None)
securities = relationship('Security', secondary='industries_association',
lazy='dynamic', backref="industry", overlaps="security,industries")
# def __repr__(self):
# return "<{0} Id: {1} - Name: {2} Sector: {3} Beta: {4} Mkt. Cap: {5}>".format(self.__class__name, self.id,
# self.name, self.sector, self.market_cap)
# __table__args = {'exted_existing':True}
class Exchange(Base):
__tablename__ = "exchange"
id = Column(Integer, primary_key=True)
name = Column('name', String(32), unique=True) # , nullable=False)
country_id = Column(Integer, ForeignKey('country.id'))
timezone = Column(String(160), unique=True, default=None)
securities = relationship('Security', secondary='exchanges_association',
lazy='dynamic', backref="exchange", overlaps="security,exchanges") # M-to-M for security and topic
# def __repr__(self):
# return "<{0} Id: {1} - Name: {2} country_id: {3}>".format(self.__class__name, self.id,
# self.name, self.country_id)
class Tag(Base):
__tablename__ = "tag"
id = Column(Integer, primary_key=True)
name = Column('name', String(30), unique=True, default=None)
securities = relationship('Security', secondary='tags_association',
lazy='dynamic', backref="tag") # , overlaps="security,tags") # M-to-M for security and tag
# def __repr__(self):
# return "<{0} Id: {1} - Name: {2}>".format(self.__class__name, self.id,
# self.name)
# from sqlalchemy.orm import aliased
# Sector = aliased(Industry)
|
f_name = input()
f = open(f_name, "r")
text = f.read().split("\n")
f.close()
f = open("out-"+f_name, "w+")
for k, v in enumerate(text):
if k % 2 != 0:
f.write("{}\n".format(v))
f.close()
|
#!/usr/bin/python3
from math import sqrt
def lmapi(*args, **kwargs):
return list(map(int, *args, **kwargs))
def sign(x):
return 0 if x == 0 else x // abs(x)
def test(vx, vy, sx, ex, sy, ey):
x, y = 0, 0
ymax = 0
while True:
x += vx
y += vy
ymax = max(ymax, y)
if sx <= x <= ex and sy <= y <= ey:
return ymax
vx -= sign(vx)
vy -= 1
if vx >= 0 and x > ex: return -1
if vx <= 0 and x < sx: return -1
if vy <= 0 and y < sy: return -1
def solve(input_fname):
with open(input_fname) as ifile:
line = ifile.readline().strip().strip("target area: ")
lx, ly = line.split(", ")
sx, ex = lmapi(lx[2:].split(".."))
sy, ey = lmapi(ly[2:].split(".."))
target = sx, ex, sy, ey
gymax = 0
for vx in range(6,200): # was run with 22, 200 for main input
for vy in range(1,200):
ymax = test(vx, vy, *target)
gymax = max(ymax, gymax)
return gymax
if __name__ == "__main__":
print(solve("../inputs/day17-demo.in"))
|
import argparse
import gdown
import os
urls_fns_dict = {
"USPTO_50k": [
("https://drive.google.com/uc?id=1pz-qkfeXzeD_drO9XqZVGmZDSn20CEwr", "src-train.txt"),
("https://drive.google.com/uc?id=1ZmmCJ-9a0nHeQam300NG5i9GJ3k5lnUl", "tgt-train.txt"),
("https://drive.google.com/uc?id=1NqLI3xpy30kH5fbVC0l8bMsMxLKgO-5n", "src-val.txt"),
("https://drive.google.com/uc?id=19My9evSNc6dlk9od5OrwkWauBpzL_Qgy", "tgt-val.txt"),
("https://drive.google.com/uc?id=1l7jSqYfIr0sL5Ad6TUxsythqVFjFudIx", "src-test.txt"),
("https://drive.google.com/uc?id=17ozyajoqPFeVjfViI59-QpVid1M0zyKN", "tgt-test.txt")
],
"USPTO_full": [
("https://drive.google.com/uc?id=1PbHoIYbm7-69yPOvRA0CrcjojGxVCJCj", "src-train.txt"),
("https://drive.google.com/uc?id=1RRveZmyXAxufTEix-WRjnfdSq81V9Ud9", "tgt-train.txt"),
("https://drive.google.com/uc?id=1jOIA-20zFhQ-x9fco1H7Q10R6CfxYeZo", "src-val.txt"),
("https://drive.google.com/uc?id=19ZNyw7hLJaoyEPot5ntKBxz_o-_R14QP", "tgt-val.txt"),
("https://drive.google.com/uc?id=1ErtNB29cpSld8o_gr84mKYs51eRat0H9", "src-test.txt"),
("https://drive.google.com/uc?id=1kV9p1_KJm8EqK6OejSOcqRsO8DwOgjL_", "tgt-test.txt")
],
"USPTO_480k": [
("https://drive.google.com/uc?id=1RysNBvB2rsMP0Ap9XXi02XiiZkEXCrA8", "src-train.txt"),
("https://drive.google.com/uc?id=1CxxcVqtmOmHE2nhmqPFA6bilavzpcIlb", "tgt-train.txt"),
("https://drive.google.com/uc?id=1FFN1nz2yB4VwrpWaBuiBDzFzdX3ONBsy", "src-val.txt"),
("https://drive.google.com/uc?id=1pYCjWkYvgp1ZQ78EKQBArOvt_2P1KnmI", "tgt-val.txt"),
("https://drive.google.com/uc?id=10t6pHj9yR8Tp3kDvG0KMHl7Bt_TUbQ8W", "src-test.txt"),
("https://drive.google.com/uc?id=1FeGuiGuz0chVBRgePMu0pGJA4FVReA-b", "tgt-test.txt")
],
"USPTO_STEREO": [
("https://drive.google.com/uc?id=1r3_7WMEor7-CgN34Foj-ET-uFco0fURU", "src-train.txt"),
("https://drive.google.com/uc?id=1HUBLDtqEQc6MQ-FZQqNhh2YBtdc63xdG", "tgt-train.txt"),
("https://drive.google.com/uc?id=1WwCH8ASgBM1yOmZe0cJ46bj6kPSYYIRc", "src-val.txt"),
("https://drive.google.com/uc?id=19OsSpXxWJ-XWuDwfG04VTYzcKAJ28MTw", "tgt-val.txt"),
("https://drive.google.com/uc?id=1FcbWZnyixhptaO6DIVjCjm_CeTomiCQJ", "src-test.txt"),
("https://drive.google.com/uc?id=1rVWvbmoVC90jyGml_t-r3NhaoWVVSKLe", "tgt-test.txt")
]
}
def parse_args():
parser = argparse.ArgumentParser("download_raw_data.py", conflict_handler="resolve")
parser.add_argument("--data_name", help="data name", type=str, default="",
choices=["USPTO_50k", "USPTO_full", "USPTO_480k", "USPTO_STEREO"])
return parser.parse_args()
def main():
args = parse_args()
data_path = os.path.join("./data", args.data_name)
os.makedirs(data_path, exist_ok=True)
for url, fn in urls_fns_dict[args.data_name]:
ofn = os.path.join(data_path, fn)
if not os.path.exists(ofn):
gdown.download(url, ofn, quiet=False)
assert os.path.exists(ofn)
else:
print(f"{ofn} exists, skip downloading")
if __name__ == "__main__":
main()
|
## run 10 motif split simulation script
import logging
import click
import numpy as np
from pathlib import Path
import torch
from torch import optim
from raptgen import models
from raptgen.models import CNN_Mul_VAE, LSTM_Mul_VAE, CNNLSTM_Mul_VAE
from raptgen.models import CNN_AR_VAE, LSTM_AR_VAE, CNNLSTM_AR_VAE
from raptgen.models import CNN_PHMM_VAE, LSTM_PHMM_VAE, CNNLSTM_PHMM_VAE
from raptgen.data import SequenceGenerator, SingleRound
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
default_path = str(Path(f"{dir_path}/../out/simlulation/paired").resolve())
@click.command(help='run experiment with paired motif',
context_settings=dict(show_default=True))
@click.option("--n-seq", help = "the number of the sequence to generate", type = int, default = 5000)
@click.option("--seed", help = "seed for seqeunce generation reproduction", type = int, default = 0)
@click.option("--epochs", help = "the number of training epochs", type = int, default = 1000)
@click.option("--threshold", help = "the number of epochs with no loss update to stop training", type = int, default = 50)
@click.option("--use-cuda/--no-cuda", help = "use cuda if available", is_flag=True, default = True)
@click.option("--cuda-id", help = "the device id of cuda to run", type = int, default = 0)
@click.option("--save-dir", help = "path to save results", type = click.Path(), default=default_path)
@click.option("--reg-epochs", help = "the number of epochs to conduct state transition regularization", type = int, default=50)
@click.option("--multi", help = "the number of training for multiple times", type = int, default=1)
@click.option("--only-cnn/--all-models", help = "train all encoder types or not", type = bool, default=False)
def main(n_seq, seed, epochs, threshold, cuda_id, use_cuda, save_dir,reg_epochs, multi, only_cnn):
logger = logging.getLogger(__name__)
logger.info(f"saving to {save_dir}")
save_dir = Path(save_dir).expanduser()
save_dir.mkdir(exist_ok = True, parents=True)
# generate sequences
fwd_adapter = "AAAAA"
rev_adapter = "GGGGG"
generator = SequenceGenerator(
num_motifs = 1,
seed=seed,
fix_random_region_length=True,
error_rate=0,
generate_motifs=True,
add_primer=True,
forward_primer=fwd_adapter,
reverse_primer=rev_adapter,
middle_insert_range=[2, 6],
one_side_proba=0.5,
paired=True)
reads, motif_indices, paired_indices = generator.sample(n_seq)
with open(save_dir/"seqences.txt","w") as f:
for index, read in zip(motif_indices, reads):
f.write(f"{index}, {read}\n")
with open(save_dir/"motifs.txt","w") as f:
for motif in generator.motifs:
f.write(f"{motif}\n")
experiment = SingleRound(
reads,
forward_adapter = fwd_adapter,
reverse_adapter = rev_adapter)
# training
train_loader, test_loader = experiment.get_dataloader(use_cuda=use_cuda)
device = torch.device(f"cuda:{cuda_id}" if (use_cuda and torch.cuda.is_available()) else "cpu")
train_kwargs = {
"epochs" : epochs,
"threshold" : threshold,
"device" : device,
"train_loader" : train_loader,
"test_loader" : test_loader,
"save_dir" : save_dir,
"beta_schedule" : True,
"force_matching" : True,
"force_epochs" : reg_epochs,
}
# evaluate models
target_len = experiment.random_region_length
results = dict()
for i in range(multi):
eval_models = [
CNN_Mul_VAE (target_len=target_len, embed_size=2),
CNN_AR_VAE (embed_size=2),
CNN_PHMM_VAE (motif_len=target_len, embed_size=2)
]
if not only_cnn:
eval_models.extend([
LSTM_Mul_VAE (target_len=target_len, embed_size=2),
LSTM_AR_VAE (embed_size=2),
LSTM_PHMM_VAE (motif_len=target_len, embed_size=2),
CNNLSTM_Mul_VAE(target_len=target_len, embed_size=2),
CNNLSTM_AR_VAE(embed_size=2),
CNNLSTM_PHMM_VAE(motif_len=target_len, embed_size=2)])
for model in eval_models:
model_str = str(type(model)).split("\'")[-2].split(".")[-1].lower()
if multi > 1:
model_str += f"_{i}"
model_str += ".mdl"
print(f"training {model_str}")
optimizer = optim.Adam(model.parameters())
model = model.to(device)
train_kwargs.update({
"model" : model,
"model_str" : model_str,
"optimizer" : optimizer})
results[model_str] = models.train(**train_kwargs)
torch.cuda.empty_cache()
if __name__ == "__main__":
Path("./.log").mkdir(parents=True, exist_ok=True)
formatter = '%(levelname)s : %(name)s : %(asctime)s : %(message)s'
logging.basicConfig(
filename='.log/logger.log',
level=logging.DEBUG,
format=formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
main()
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.utils.translation import activate
from kirppu.views.accounting import accounting_receipt
class Command(BaseCommand):
help = 'Dump accounting CSV to standard output'
def add_arguments(self, parser):
parser.add_argument('--lang', type=str, help="Change language, for example: en")
parser.add_argument('event', type=str, help="Event slug to dump data for")
def handle(self, *args, **options):
if "lang" in options:
activate(options["lang"])
from kirppu.models import Event
event = Event.objects.get(slug=options["event"])
accounting_receipt(self.stdout, event)
|
# Generated by Django 2.0.3 on 2018-05-15 16:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('queueapp', '0002_related_names'),
]
operations = [
migrations.AddField(
model_name='issue',
name='number_tries',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='issue',
name='verdict',
field=models.CharField(choices=[('pending', 'pending'), ('succeeded', 'succeeded'), ('failed', 'failed')], default='pending', max_length=10),
),
]
|
from styx_msgs.msg import TrafficLight
import rospy
import cv2
import tensorflow as tf
import numpy as np
#import time
import os
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from keras.layers.core import Activation
from keras.optimizers import adam
from keras.utils.data_utils import Sequence
from keras.utils import plot_model, to_categorical
from keras import backend as K
class TLClassifier(object):
def __init__(self):
self.tl_classes_str = ['Red', 'Yellow','Green','--', 'Unknown']
self.tl_classes = [TrafficLight.RED, TrafficLight.YELLOW, TrafficLight.GREEN, '--', TrafficLight.UNKNOWN]
# Detection model
SSDLITE_GRAPH_FILE = 'light_classification/models/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb'
self.detection_graph = self.load_graph(SSDLITE_GRAPH_FILE)
# Get placeholders for session
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
# Classification model
CLASSIFIER_GRAPH_FILE = 'light_classification/models/tl_classifier_tf_model.pb'
self.classifier_graph = self.load_graph(CLASSIFIER_GRAPH_FILE)
# Get placeholders for session
self.output_tensor = self.classifier_graph.get_tensor_by_name('activation_5_2/Softmax:0')
self.input_tensor = self.classifier_graph.get_tensor_by_name('conv2d_1_input_2:0')
# configuration for possible GPU use
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Start sessions for detection and classification
self.sess = tf.Session(graph=self.detection_graph)
self.sess1 = tf.Session(graph=self.classifier_graph)
def __del__(self):
self.sess.close()
self.sess1.close()
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Try to detect position of tl in image
#t0 = time.time()
coords = self.detect_tl_in_image(image, self.sess)
#t1 = time.time()
#rospy.loginfo("Detection timing "+str((t1-t0)*1000)+" ms")
#rospy.loginfo("Coords "+str(coords))
if coords.size > 0:
# Crop image around detected traffic light and resize to (16x32 px)
#t2 = time.time()
img = cv2.resize(image[coords[0]:coords[2], coords[1]:coords[3]], (16,32))
image_np = np.expand_dims(np.asarray(img, dtype=np.uint8), 0)
pred_label = 4
# Predict label (red, yellow, green)
#t3 = time.time()
predictions = self.sess1.run(self.output_tensor, {self.input_tensor: image_np})
pred_label = np.argmax(predictions[0])
#t4 = time.time()
#rospy.loginfo("Classification timing: "+str((t4-t3)*1000)+" ms")
color_string = self.tl_classes_str[pred_label]
#rospy.loginfo("[INFO] TL_Classifier classified TL as "+ color_string)
return self.tl_classes[pred_label]
else:
rospy.loginfo("[WARN] TL_Detector could not find a tl in image ")
return TrafficLight.UNKNOWN
def detect_tl_in_image(self, image, sess):
"""Uses session and pretrained model to detect traffic light in image and
returns the coordinates in the image"""
image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0)
(boxes, scores, classes) = sess.run([self.detection_boxes, self.detection_scores, self.detection_classes],
feed_dict={self.image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
confidence_cutoff = 0.45
search_class = 10 #ID for traffic light
# Filter boxes with a confidence score less than `confidence_cutoff`
boxes, scores, classes = self.filter_boxes(confidence_cutoff, search_class, boxes, scores, classes)
# The current box coordinates are normalized to a range between 0 and 1.
# This converts the coordinates actual location on the image.
width, height = image.shape[1], image.shape[0]
box_coords = self.to_image_coords(boxes, height, width)
# Crop image to detected traffic light
coords = np.squeeze(box_coords).astype(int)
return coords
def filter_boxes(self, min_score, search_class, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score and classes[i] == search_class:
idxs.append(i)
break
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
def to_image_coords(self, boxes, height, width):
"""
The original box coordinate output is normalized, i.e [0, 1].
This converts it back to the original coordinate based on the image
size.
"""
box_coords = np.zeros_like(boxes)
box_coords[:, 0] = boxes[:, 0] * height
box_coords[:, 1] = boxes[:, 1] * width
box_coords[:, 2] = boxes[:, 2] * height
box_coords[:, 3] = boxes[:, 3] * width
return box_coords
def load_graph(self, graph_file):
"""Loads a frozen inference graph (for detection model)"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
|
# -----------------------------------------------------------------------------
# Autor: Eduardo Yuzo Nakai
#
# Description: A program for Syntax Analysis.
# -----------------------------------------------------------------------------
import ply.yacc as yacc
from lexical import tokens
import sys
from graphviz import Graph
import os
import string
# For windows:
# os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
#-----------------------tree structure-----------------------#
class Node:
def __init__(self, type, children=None, leaf=None):
self.type = type
if children:
self.children = children
else:
self.children = []
if leaf:
self.leaf = leaf
else:
self.leaf = []
#-----------------------precedence declaration-----------------------#
precedence = (
('left', 'EQUALS', 'LOWER', 'GREATER', 'LOWER_EQUAL', 'GREATER_EQUAL'),
('left', 'SUM', 'SUB'),
('left', 'MUL', 'DIV'),
)
#-----------------------grammar rules-----------------------#
def p_program(p):
'''program : declaration_list'''
p[0] = Node('program', [p[1]])
def p_declaration_list(p):
'''declaration_list : declaration_list declaration
| declaration'''
if len(p) == 3:
p[0] = Node('declaration_list', [p[1], p[2]])
elif len(p) == 2:
p[0] = Node('declaration_list', [p[1]])
def p_declaration(p):
'''declaration : variable_declaration
| variable_initialization
| function_declaration'''
p[0] = Node('declaration', [p[1]])
def p_variable_declaration(p):
'''variable_declaration : type COLON variable_list'''
p[0] = Node('variable_declaration', [p[1], Node(str(p[2])), p[3]])
def p_variable_initialization(p):
'''variable_initialization : attribution'''
p[0] = Node('variable_initialization', [p[1]])
def p_variable_list(p):
'''variable_list : variable_list COMMA var
| var'''
if len(p) == 4:
p[0] = Node('variable_list', [p[1], Node(str(p[2])), p[3]])
elif len(p) == 2:
p[0] = Node('variable_list', [p[1]])
def p_var(p):
'''var : ID
| ID index'''
if len(p) == 2:
p[0] = Node('var', [Node(str(p[1]))])
elif len(p) == 3:
p[0] = Node('var', [Node(str(p[1])), p[2]])
def p_index(p):
'''index : index L_BRACKET expression R_BRACKET
| L_BRACKET expression R_BRACKET'''
if len(p) == 5:
p[0] = Node('index', [p[1], Node(str(p[2])), p[3], Node(str(p[4]))])
elif len(p) == 4:
p[0] = Node('index', [Node(str(p[1])), p[2], Node(str(p[3]))])
def p_type(p):
'''type : INTEGER
| FLOATING'''
p[0] = Node('type', [Node(str(p[1]))])
def p_function_declaration(p):
'''function_declaration : type header
| header'''
if len(p) == 3:
p[0] = Node('function_declaration', [p[1], p[2]])
elif len(p) == 2:
p[0] = Node('function_declaration', [p[1]])
def p_header(p):
'''header : ID L_PAREN parameter_list R_PAREN body END'''
p[0] = Node('header', [Node(str(p[1])), Node(str(p[2])), p[3], Node(str(p[4])), p[5], Node(str(p[6]))], leaf=[p[1], p[2], p[4], p[6]])
def p_parameter_list(p):
'''parameter_list : parameter_list COMMA parameter
| parameter
| empty'''
if len(p) == 4:
p[0] = Node('parameter_list', [p[1], Node(str(p[2])), p[3]])
else:
if p[1] == None:
p[0] = Node('parameter_list', [Node('Empty')])
else:
p[0] = Node('parameter_list', [p[1]])
def p_parameter(p):
'''parameter : type COLON ID
| parameter L_BRACKET R_BRACKET'''
if p[2] == ':':
p[0] = Node('parameter', [p[1], Node(str(p[2])), Node(str(p[3]))])
elif p[2] == '[':
p[0] = Node('parameter', [p[1], Node(str(p[2])), Node(str(p[3]))])
def p_body(p):
'''body : body action
| empty'''
if len(p) == 3:
p[0] = Node('body', [p[1], p[2]])
else:
if p[1] == None:
p[0] = Node('body', [Node('Empty')])
else:
p[0] = Node('body', [p[1]])
def p_action(p):
'''action : expression
| variable_declaration
| if
| repeat
| read
| write
| return'''
p[0] = Node('action', [p[1]])
def p_if(p):
'''if : IF expression THEN body END
| IF expression THEN body ELSE body END'''
if p[5] == 'fim':
p[0] = Node('if', [Node(str(p[1])), p[2], Node(str(p[3])), p[4], Node(str(p[5]))])
elif p[5] == 'senão':
p[0] = Node('if', [Node(str(p[1])), p[2], Node(str(p[3])), p[4], Node(str(p[5])), p[6], Node(str(p[7]))])
def p_repeat(p):
'''repeat : REPEAT body UNTIL expression'''
p[0] = Node('repeat', [Node(str(p[1])), p[2], Node(str(p[3])), p[4]])
def p_attribution(p):
'''attribution : var ASSIGN expression'''
p[0] = Node('attribution', [p[1], Node(str(p[2])), p[3]])
def p_read(p):
'''read : READ L_PAREN var R_PAREN'''
p[0] = Node('read', [Node(str(p[1])), Node(str(p[2])), p[3], Node(str(p[4]))])
def p_write(p):
'''write : WRITE L_PAREN expression R_PAREN'''
p[0] = Node('write', [Node(str(p[1])), Node(str(p[2])), p[3], Node(str(p[4]))])
def p_return(p):
'''return : RETURN L_PAREN expression R_PAREN'''
p[0] = Node('return', [Node(str(p[1])), Node(str(p[2])), p[3], Node(str(p[4]))])
def p_expression(p):
'''expression : logical_expression
| attribution'''
p[0] = Node('expression', [p[1]])
def p_logical_expression(p):
'''logical_expression : simple_expression
| logical_expression logical_operator simple_expression'''
if len(p) == 2:
p[0] = Node('logical_expression', [p[1]])
elif len(p) == 4:
p[0] = Node('logical_expression', [p[1], p[2], p[3]])
def p_simple_expression(p):
'''simple_expression : additive_expression
| simple_expression relational_operator additive_expression'''
if len(p) == 2:
p[0] = Node('simple_expression', [p[1]])
elif len(p) == 4:
p[0] = Node('simple_expression', [p[1], p[2], p[3]])
def p_additive_expression(p):
'''additive_expression : multiplicative_expression
| additive_expression sum_operator multiplicative_expression'''
if len(p) == 2:
p[0] = Node('additive_expression', [p[1]])
elif len(p) == 4:
p[0] = Node('additive_expression', [p[1], p[2], p[3]])
def p_multiplicative_expression(p):
'''multiplicative_expression : unary_expression
| multiplicative_expression mult_operator unary_expression'''
if len(p) == 2:
p[0] = Node('multiplicative_expression', [p[1]])
elif len(p) == 4:
p[0] = Node('multiplicative_expression', [p[1], p[2], p[3]])
def p_unary_expression(p):
'''unary_expression : factor
| sum_operator factor
| neg_operator factor'''
if len(p) == 2:
p[0] = Node('unary_expression', [p[1]])
elif len(p) == 3:
p[0] = Node('unary_expression', [p[1], p[2]])
def p_relational_operator(p): #what about '<>' ?
'''relational_operator : LOWER
| GREATER
| EQUALS
| LOWER_EQUAL
| GREATER_EQUAL'''
p[0] = Node('relational_operator', [Node(str(p[1]))])
def p_sum_operator(p):
'''sum_operator : SUM
| SUB'''
p[0] = Node('sum_operator', [Node(str(p[1]))])
def p_logical_operator(p):
'''logical_operator : AND_OP
| OR_OP'''
p[0] = Node('logical_operator', [Node(str(p[1]))])
def p_neg_operator(p):
'''neg_operator : NEG_OP'''
p[0] = Node('neg_operator', [Node(str(p[1]))])
def p_mult_operator(p):
'''mult_operator : MUL
| DIV'''
p[0] = Node('mult_operator', [Node(str(p[1]))])
def p_factor(p):
'''factor : L_PAREN expression R_PAREN
| var
| function_call
| number'''
if len(p) == 4:
p[0] = Node('factor', [Node(str(p[1])), p[2], Node(str(p[3]))])
elif len(p) == 2:
p[0] = Node('factor', [p[1]])
def p_number(p):
'''number : INTEGER_NUMBER
| FLOATING_POINT_NUMBER'''
p[0] = Node('number', [Node(str(p[1]))])
def p_function_call(p):
'''function_call : ID L_PAREN argument_list R_PAREN'''
p[0] = Node('function_call', [Node(str(p[1])), Node(str(p[2])), p[3], Node(str(p[4]))])
def p_argument_list(p): #possible error here
'''argument_list : argument_list COMMA expression
| expression
| empty'''
if len(p) == 4:
p[0] = Node('argument_list', [p[1], Node(str(p[2])), p[3]])
elif len(p) == 2:
if p[1] == None:
p[0] = Node('argument_list', [Node('Empty')])
else:
p[0] = Node('argument_list', [p[1]])
def p_empty(p):
'''empty :'''
pass
#-----------------------error rules-----------------------#
error_flag = False
def p_error(p):
global error_flag
error_flag = True
print("\n")
if p:
print("Syntax error in input. Character: %s. Line: %d." % (p.value, p.lineno))
else:
print("Syntax error at EOF.")
#-----------------------repeat error rules-----------------------#
def p_repeat_miscellaneous_error(p):
'''repeat : REPEAT body UNTIL expression error'''
pass
def p_repeat_until_error(p):
'''repeat : REPEAT body error expression'''
print("Syntax error in 'repita': missing 'até'.")
def p_repeat_expression_error(p):
'''repeat : REPEAT body UNTIL error'''
print("Syntax error in 'repita': bad expression.")
def p_repeat_body_error(p):
'''repeat : REPEAT error UNTIL expression'''
print("Syntax error in 'repita'.")
#-----------------------if error rules-----------------------#
def p_if_miscellaneous_error(p):
'''if : IF expression THEN body END error
| IF expression THEN body ELSE body END error'''
pass
def p_if_end_error(p):
'''if : IF expression THEN body error
| IF expression THEN body ELSE body error'''
print("Syntax error in 'se': missing 'fim'.")
def p_if_then_error(p):
'''if : IF expression error body error
| IF expression error body ELSE body error'''
print("Syntax error in 'se': missing 'então'.")
def p_if_expression_error(p):
'''if : IF error THEN body END
| IF error THEN body ELSE body END'''
print("Syntax error in 'se': bad expression.")
def p_if_body_error(p):
'''if : IF expression THEN error END
| IF expression THEN error ELSE error END'''
print("Syntax error in 'se'.")
#-----------------------header error rules-----------------------#
def p_header_miscellaneous_error(p):
'''header : ID L_PAREN parameter_list R_PAREN body END error'''
pass
def p_header_end_error(p):
'''header : ID L_PAREN parameter_list R_PAREN body error'''
print("Syntax error in 'cabeçalho': missing 'fim'.")
def p_header_body_error(p):
'''header : ID L_PAREN parameter_list R_PAREN error END'''
print("Syntax error in 'cabeçalho'.")
#-----------------------simple error rules-----------------------#
def p_parameter_error(p):
'''parameter : error COLON ID
| error L_BRACKET R_BRACKET'''
print("Syntax error in parameter.")
def p_index_error(p):
'''index : index L_BRACKET error R_BRACKET
| L_BRACKET error R_BRACKET'''
print("Syntax error in index definition.")
def p_variable_declaration_error(p):
'''variable_declaration : type COLON error'''
print("Syntax error in variable declaration.")
def p_attribution_error(p):
'''attribution : var ASSIGN error'''
print("Syntax error in variable initialization.")
def p_write_error(p):
'''write : WRITE L_PAREN error R_PAREN'''
print("Syntax error in 'escreva'. Bad expression.")
def p_return_error(p):
'''return : RETURN L_PAREN error R_PAREN'''
print("Syntax error in 'retorna'. Bad expression.")
def p_read_error(p):
'''read : READ L_PAREN error R_PAREN'''
print("Syntax error in 'leia'. Bad expression.")
dot = Graph('AST')
count = 0
def show_tree(node, count_node):
global count
dot.node(str(count_node), str(node.type))
for children in node.children:
if children:
count = count + 1
dot.edges([(str(count_node), str(count))])
show_tree(children, count)
from semantic import Semantic
from cultivation import AST
from code_gen import Gen_Code
if __name__ == '__main__':
filename = sys.argv[1]
sourcefile = open(filename, encoding='UTF-8')
data = sourcefile.read()
parser = yacc.yacc(debug=True)
p = parser.parse(data) #sintatic analysis
if not error_flag:
Semantic(p) #semantic analysis: object 'p' won't be modified
AST(p) #generate abstract syntax tree: object 'p' will be modified
Gen_Code(p) #code generation: object 'p' won't be modified
#show_tree(p, 0) #uncomment to show tree
#dot.render('tree.gv', view=True) #uncomment to show tree
|
from django.db import models
import random
import os
from django.db.models.signals import pre_save, post_save
from django.urls import reverse
from blazemarketplace.utils import unique_slug_generator
def upload_image_path(instance, filename):
print(instance)
print(filename)
new_filename = random.randint(1, 3903459312)
name, ext = get_filename_ext(filename)
final_filename = f'{new_filename}{ext}'.format(new_filename= new_filename, ext = ext)
return 'products/{new_filename}/{final_filename}'.format(new_filename = new_filename, final_filename = final_filename)
def get_filename_ext(filename):
base_name = os.path.basename(filename)
name, ext = os.path.splitext(filename)
return name, ext
class ProductManager(models.Manager):
def get_by_id(self, id):
qs = self.get_queryset().filter(id=id)
if qs.count() == 1:
return qs.first()
return None
def search(self, query):
return self.get.get_queryset().active().search(query)
def featured(self):
return self.get_queryset().filter(featured=True)
def get_queryset(self):
return ProductQuerySet(self.model, using=self.db)
class ProductQuerySet(models.query.QuerySet):
def featured(self):
return self.filter(featured=True)
# Create your models here.
class Product(models.Model):
title = models.CharField(max_length=120)
slug = models.SlugField(blank=True, unique=True)
description = models.TextField()
price = models.DecimalField(decimal_places=2, max_digits=20, default=0.00)
featured = models.BooleanField(default=False)
image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
objects = ProductManager()
def get_absolute_url(self):
#return "/products/{slug}/".format(slug=self.slug)
return reverse("detail", kwargs={"slug": self.slug})
def __str__(self):
return self.title
def search(self, query):
lookups = (Q(title__icontains=query) |
Q(description__icontains=query) |
Q(price__icontains=query) |
Q(tag__title__icontains=query)
)
return self.filter(lookups).distinct()
def product_pre_save_reciever(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(product_pre_save_reciever, sender=Product)
|
n, m = map(int, input().split())
print(int(input()) * int(input()))
|
"""
Serve 'dank', 'random' request from MgClient with ZMQ.
"""
import os
from gensim.models import KeyedVectors
import numpy as np
from collections import OrderedDict
import re
from random import randint
import random
from lxml import objectify
import base64
import json
import threading
import zmq
from .helper import set_logger
from .nlp.model import EmbedModel
class MgServer(threading.Thread):
def __init__(self, args):
super().__init__()
"""Server routine"""
self.logger = set_logger('VENTILATOR')
self.model_path = os.path.abspath(args.model_path)
self.meme_dir = os.path.abspath(args.meme_dir)+'/'
self.xml_dir = os.path.abspath(args.xml_dir)+'/'
self.vec_path = os.path.abspath(args.vec_path)
self.lang = args.lang
self.port = args.port
self.thread_num = args.thread_num
self.url_worker = "inproc://workers"
self.url_client = "tcp://*:" + self.port
self.logger.info('opend server : %s' %(self.url_client))
self.logger.info('num of threads : %s' %(self.thread_num))
# Load model
self.logger.info('loading model...' )
self.model = EmbedModel(self.lang)
self.model.load_model(self.model_path)
self.word_vector = KeyedVectors.load_word2vec_format(self.vec_path)
self.logger.info('model load done.')
random.seed()
# Prepare Context and Sockets
self.logger.info('Prepare Context and Sockets...')
self.context = zmq.Context.instance()
# Socket to talk to clients
self.logger.info('opening client socket...')
self.clients = self.context.socket(zmq.ROUTER)
self.clients.bind(self.url_client)
# Socket to talk to workers
self.logger.info('opening worker socket...')
self.workers = self.context.socket(zmq.DEALER)
self.workers.bind(self.url_worker)
def run(self):
# Launch pool of worker threads
self.logger.info('starting workers...')
self.threads = []
for i in range(self.thread_num):
#thread = threading.Thread(target=worker_routine, args=(url_worker,i,))
thread = MgServer.MgWorker(worker_url=self.url_worker, worker_id=i, model=self.model,
meme_dir=self.meme_dir, xml_dir=self.xml_dir, vector=self.word_vector)
thread.start()
self.threads.append(thread)
zmq.proxy(self.clients, self.workers)
def close(self):
self.logger.info('shutting down...')
for p in self.threads:
p.close()
print(p, ': thread down.')
self.join()
def __exit__(self):
self.close()
class MgWorker(threading.Thread):
def __init__(self, worker_url, worker_id, model, meme_dir, xml_dir, vector, context=None):
super().__init__()
self.logger = set_logger('WORKER-%d ' % worker_id)
self.worker_url = worker_url
self.worker_id = worker_id
self.model = model
self.meme_dir = meme_dir
self.xml_dir = xml_dir
self.vector = vector
self.context = context
self.meme_list = self.vector.index2entity
def rep_json(self, rep_name, query, oov_flag, result_exist_flag, memefname_list, episode_list, text_list,
imgdata_list, sim_list):
rep_json = {
"rep": rep_name,
"query": query,
"oov" : oov_flag,
"result_exist": result_exist_flag,
"memefname": memefname_list,
"episode": episode_list,
"text" : text_list,
"imgdata": imgdata_list,
"sim": sim_list
}
return rep_json
def rm_gbg_from_xml(self, value):
# objectify.fromstring() returns value with garbage.
value = str(value).replace('\t','').replace('\n','')
value = re.search(r'\s{0,}(.*)', value).group(1)
return value
def get_text_and_bytes(self, xml_file_name):
meme_fname = None
episode = None
text = None
data = None
# xml_file_name : episode/some.xml
xml_path = self.xml_dir + xml_file_name
with open(xml_path) as xml_file:
xml_str = xml_file.read()
xml_root = objectify.fromstring(xml_str)
# ['filename'] : some.jpg
# .xml contains info of image file.
meme_fname = self.rm_gbg_from_xml(xml_root['filename'])
episode = self.rm_gbg_from_xml(xml_root['folder'])
text = self.rm_gbg_from_xml(xml_root['object']['name'])
meme_path = self.meme_dir + episode +'/'+ meme_fname
with open(meme_path, 'rb') as meme_file:
# Encode image with base64 and str.
data = base64.b64encode(meme_file.read())
data = str(data)
return meme_fname, episode, text, data
def image_result_json(self, query, request):
max_image_num = request['max_image_num']
min_similarity = request['min_similarity']
"""
memefname_list[ meme1_filename, meme2_filename, ... ]
episode_list[ meme1_episode, meme2_episode ... ]
text_list[ meme1_text, meme2_text ... ]
imgdata_list[ meme1_base64data, meme2_base64data, ... ]
sim_list[ meme1_similarity, meme2_similarity, ... ]
"""
memefname_list = []
episode_list = []
text_list = []
imgdata_list = []
sim_list = []
oov_flag = True
result_exist_flag = False
query_vector = self.model.embed_sentence(query)
if(np.any(query_vector[0])):
oov_flag = False
query_vector = np.array(query_vector[0], dtype=np.float32)
most_sim_vectors = self.vector.similar_by_vector(query_vector)
for img_num, xmlfname_and_similarity in enumerate(most_sim_vectors):
# xmlfname_and_similarity [0] : xml fname, [1] : similarity.
if img_num >= max_image_num:
break
if xmlfname_and_similarity[1] < min_similarity:
break
meme_fname, episode, text, data = self.get_text_and_bytes(xmlfname_and_similarity[0])
memefname_list.append(meme_fname)
episode_list.append(episode)
text_list.append(text)
imgdata_list.append(data)
sim_list.append(xmlfname_and_similarity[1])
if(len(imgdata_list)):
result_exist_flag = True
return self.rep_json(request['req'], query, oov_flag, result_exist_flag,
memefname_list, episode_list, text_list, imgdata_list, sim_list)
def dank_dealer(self, request):
"""
Send back multiple memes.
# REQ: 1 dank query. -> REP: N memes in a json in a list.
# = MgClient.dank(['req_query']) -> [rep_json([memefname_list, episode_list, imgdata_list, ...])]
# REQ: N dank queries. -> REP: N jsons in a list.
# = MgClient.dank(['req1_query', 'req2_query']) -> [rep1_json(...), rep2_json(...), ...]
"""
send_back_results = []
for query in request['queries']:
# json_dump = json.dumps(self.image_result_json(req))
rep_json = self.image_result_json(query, request)
send_back_results.append(rep_json)
return json.dumps(send_back_results)
def random_dealer(self, request):
"""
Send back single meme.
# REQ: 1 random query. -> REP: 1 meme in a json in a list.
# = MgClient.random() -> [rep_json([memefname_list, episode_list, imgdata_list, ...])]
"""
memefname_list = []
episode_list = []
text_list = []
imgdata_list = []
send_back_result = []
ridx = randint(0, len(self.meme_list)-1)
meme_fname, episode, text, data = self.get_text_and_bytes(self.meme_list[ridx])
memefname_list.append(meme_fname)
episode_list.append(episode)
text_list.append(text)
imgdata_list.append(data)
rep_json = self.rep_json(request['req'], query=None, oov_flag=False, result_exist_flag=True,
memefname_list=memefname_list, episode_list=episode_list, text_list=text_list,
imgdata_list=imgdata_list, sim_list=[])
send_back_result.append(rep_json)
return json.dumps(send_back_result)
def run(self):
"""Worker routine"""
self.context = self.context or zmq.Context.instance()
# Socket to talk to dispatcher
self.socket = self.context.socket(zmq.REP)
self.socket.connect(self.worker_url)
while True:
self.logger.info('waiting for query worker id %d: ' % (int(self.worker_id)))
# query = self.socket.recv().decode("utf-8")
request = self.socket.recv_string()
request = json.loads(request)
self.logger.info('request\treq worker id %d: %s %s' % (int(self.worker_id), str(request['req']),
str(request['queries'])))
rep_json = None
if request['req'] == 'dank':
rep_json = self.dank_dealer(request)
elif request['req'] == 'random':
rep_json = self.random_dealer(request)
# response.
self.socket.send_string(rep_json)
def close(self):
self.logger.info('shutting %d worker down...' %(self.worker_id))
self.terminate()
self.join()
self.logger.info('%d worker terminated!' %(self.worker_id))
|
# import biopython
from Bio import SeqIO
from joblib import Parallel, delayed
from tqdm import tqdm
import pathlib
import subprocess
def process(idx: int, id: str, seq: str):
if len(seq) > 1024:
return
folder = "MSA/%04d" % (idx % 1000)
if pathlib.Path(f"{folder}/{id}.a2m").exists():
return
with open(f"{folder}/{id}.fasta","w") as f:
f.write("> id\n" + seq)
# -o {id}
cmd = f"/bin/bash BuildFeatures/HHblitsWrapper/BuildMSA4DistPred.sh -n 3 -c 1 -o {folder} {folder}/{id}.fasta"
subprocess.run(cmd.split())
# BuildFeatures/A3MToA2M.sh -o ./ sample.a3m
cmd = f"/bin/bash BuildFeatures/A3MToA2M.sh -o {folder} {folder}/{id}.a3m"
subprocess.run(cmd.split())
pathlib.Path(f"{folder}/{id}.fasta").unlink(missing_ok=True)
pathlib.Path(f"{folder}/{id}.fasta_raw").unlink(missing_ok=True)
pathlib.Path(f"{folder}/{id}.a3m").unlink(missing_ok=True)
pathlib.Path(f"{folder}/{id}.seq").unlink(missing_ok=True)
for i in range(1000):
pathlib.Path("MSA/%04d" % i).mkdir(parents=True, exist_ok=True)
Parallel(n_jobs=68)(delayed(process)(idx, r.id, str(r.seq)) for idx, r in tqdm(enumerate(SeqIO.parse("/root/uniref90.fasta", "fasta"))))
|
from predict_popularity import predict as P_Predict
from predict_genre import predict as G_Predict
def runPrediction(audioFileName, transcripts):
text = ''
if len(transcripts) > 0:
text = transcripts[0]['text'] or ''
print("I got these transcripts")
print(transcripts)
return {
'hotness': P_Predict(audioFileName, text).tolist()[0],
'genre': G_Predict(audioFileName)
}
|
SECRET_KEY = '123456789'
FEEDLY_DEFAULT_KEYSPACE = 'test'
FEEDLY_CASSANDRA_HOSTS = [
'127.0.0.1', '127.0.0.2', '127.0.0.3'
]
CELERY_ALWAYS_EAGER = True
import djcelery
djcelery.setup_loader()
|
import os
import readline
import paramiko
from lib.connections import *
from lib.autocomplete import *
import configparser
from pathlib import Path
def workspace_ini_creator(config_path, append=False, ws_path=None):
Config = configparser.ConfigParser()
if append:
Config.read(os.path.join(ws_path))
servers = configparser.ConfigParser()
servers.read(os.path.join(config_path,'servers.ini'))
srv = select_server(servers)
readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
readline.set_completer(pathCompleter)
print (f'[+] Configuring workspace for {srv.name}')
while(1):
info = str(input('Add dir to Workspace (Default name = dir name)\n ex. /home/ReCon : ReConWs\n '))
workspace = [arg.strip() for arg in info.split(':') ]
if len(workspace)==1:
w_name = os.path.split(workspace[0])[-1]
w_path = workspace[0]
else:
w_path = workspace[0]
w_name = workspace[1]
if os.path.exists(w_path):
break
else:
print('[-] Path not available')
Config.add_section(srv.name)
Config.set(srv.name , w_name, w_path)
if append:
return Config
else:
with open(os.path.join(config_path,'workspaces.ini'), 'w+') as configfile:
Config.write(configfile)
return os.path.join(config_path,'workspaces.ini')
def props_ini_creator(config_path):
Config = configparser.ConfigParser()
servers = configparser.ConfigParser()
servers.read(os.path.join(config_path,'servers.ini'))
srv = select_server(servers)
Config['properties'] = {'default-server': srv.name}
Config['properties'] = {'auto-sync': False}
with open(os.path.join(config_path,'props.ini'), 'w+') as configfile:
Config.write(configfile)
return os.path.join(config_path,'props.ini')
def server_ini_creator(path, append=False):
home = str(Path.home())
config = configparser.ConfigParser()
if append:
config.read(os.path.join(path))
info = str(input('Add servers -> Nickname, Host, Username, Port, Enable-Jupyter-Forwarding(y/n) / next server...\n ex. pi, 192.168.1.1, Josh, 22, y \n'))
servers = info.split('/')
readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
readline.set_completer(pathCompleter)
for server in servers:
server = server.split(',')
pkey_path = input(f'RSA (Private) Key Path for Server {server[1].strip()} (Default: {os.path.join(home, ".ssh", "id_rsa")}): ')
if (pkey_path == ''):
pkey_path = os.path.join(home, '.ssh', 'id_rsa')
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_pkey = paramiko.RSAKey.from_private_key_file(pkey_path)
ssh.connect(server[1].strip(), port=server[3].strip(), pkey=ssh_pkey, username=server[2].strip())
print (f'[+] Success for host: {server[1].strip()}')
_, stdout, _ = ssh.exec_command('echo $HOME')
config[server[0]] = {'HOST' : server[1].strip(),
'UNAME' : server[2].strip(),
'PORT' : server[3].strip(),
'PKEY' : pkey_path,
'RECON_PATH': os.path.join(stdout.readlines()[0].strip('\n'), '.recon'),
'JUPYTER' : server[4].strip()}
except:
print(f"[-] Host {server[1].strip()} is Unavailable.")
if append:
return config
# with open(path, 'w') as configfile:
# config.write(configfile)
else:
with open(os.path.join(path,'config','servers.ini'), 'w+') as configfile:
config.write(configfile)
def remove_server(server_config,workspace_config, server_name):
server_config.remove_section(server_name)
workspace_config.remove_section(server_name)
return server_config, workspace_config
def save_ini(config, path, mode='w+'):
with open(path, mode) as configfile:
config.write(configfile)
def delete_workspace(ws_name, section, ws_config):
del ws_config[section][ws_name]
return ws_config
|
import warnings
from typing import Sequence
import numpy as np
import torch
from .base import Flow
from .inverted import InverseFlow
__all__ = ["SplitFlow", "MergeFlow", "SwapFlow", "CouplingFlow", "WrapFlow", "SetConstantFlow"]
class SplitFlow(Flow):
"""Split the input tensor into multiple output tensors.
Parameters
----------
*sizes_or_indices : int or sequence of ints
If int: lengths of the output tensors in dimension `dim`.
Otherwise: indices of the input tensor that are mapped each output tensor.
dim : int
Dimension along which to split.
Raises
------
ValueError
If the tensor is to short for the desired split in dimension `dim`.
Notes
-----
Specifying the size or indices of the last tensor is optional. If the tensor is longer
than the sum of all sizes, the last size will be inferred from the input
dimensions.
"""
def __init__(self, *sizes_or_indices, dim=-1):
super().__init__()
if isinstance(sizes_or_indices[0], Sequence) or isinstance(sizes_or_indices[0], np.ndarray):
self._sizes = None
self._indices = sizes_or_indices
else:
self._sizes = sizes_or_indices
self._indices = None
self._split_dim = dim
def _forward(self, x, **kwargs):
if self._indices is None:
return (*self._split_with_sizes(x), self._dlogp(x))
else:
return (*self._split_with_indices(x), self._dlogp(x))
def _inverse(self, *xs, **kwargs):
if self._indices is None:
y = torch.cat(xs, dim=self._split_dim)
else:
y = self._cat_with_indices(*xs)
return y, self._dlogp(xs[0])
def _dlogp(self, x):
index = [slice(None)] * len(x.shape)
index[self._split_dim] = slice(1)
return torch.zeros_like(x[index])
def _split_with_sizes(self, x):
last_size = x.shape[self._split_dim] - sum(self._sizes)
if last_size == 0:
sizes = self._sizes
elif last_size > 0:
sizes = [*self._sizes, last_size]
else:
raise ValueError(f"can't split x [{x.shape}] into sizes {self._sizes} along {self._split_dim}")
return torch.split(x, sizes, dim=self._split_dim)
def _split_with_indices(self, x):
is_done = torch.zeros(x.shape[self._split_dim], dtype=torch.bool)
result = []
for indices in self._indices:
if is_done[indices].any():
raise ValueError("Cannot split tensor. Indices are overlapping.")
result.append(x[self._range(indices, len(x.shape))])
is_done[indices] = True
if not is_done.all():
raise ValueError(f"Split with indices missed indices {torch.arange(len(is_done))[is_done]}")
return result
def _range(self, indices, n_dimensions):
dims = [slice(None) for _ in range(n_dimensions)]
dims[self._split_dim] = list(indices)
return dims
def _cat_with_indices(self, *xs):
length = sum(len(indices) for indices in self._indices)
output_shape = list(xs[0].shape)
output_shape[self._split_dim] = length
y = torch.empty(*output_shape, device=xs[0].device, dtype=xs[0].dtype)
is_done = torch.zeros(length, dtype=torch.bool)
for x, indices in zip(xs, self._indices):
if is_done[indices].any():
raise ValueError("Cannot merge tensor. Indices are overlapping.")
y[self._range(indices, len(x.shape))] = x
is_done[indices] = True
if not is_done.all():
raise ValueError(f"Merge with indices missed indices {torch.arange(len(is_done))[is_done]}")
return y
class MergeFlow(InverseFlow):
def __init__(self, *sizes, dim=-1):
""" Shortcut to InverseFlow(SplitFlow()) """
super().__init__(SplitFlow(*sizes, dim=dim))
class SwapFlow(Flow):
def __init__(self):
""" Swaps two input channels """
super().__init__()
def _forward(self, *xs, **kwargs):
dlogp = torch.zeros(*xs[0].shape[:-1], 1).to(xs[0])
if len(xs) == 1:
warnings.warn("applying swapping on a single tensor has no effect")
xs = (xs[1], xs[0], *xs[2:])
return (*xs, dlogp)
def _inverse(self, *xs, **kwargs):
dlogp = torch.zeros(*xs[0].shape[:-1], 1).to(xs[0])
if len(xs) == 1:
warnings.warn("applying swapping on a single tensor has no effect")
xs = (xs[1], xs[0], *xs[2:])
return (*xs, dlogp)
class CouplingFlow(Flow):
"""Coupling Layer
Parameters
----------
transformer : torch.nn.Module
the transformer
transformed_indices : Iterable of int
indices of the inputs to be transformed
cond_indices : Iterable of int
indices of the inputs for the conditioner
cat_dim : int
the dimension along which the conditioner inputs are concatenated
Raises
------
ValueError
If transformer and conditioner indices are not disjointed.
"""
def __init__(self, transformer, transformed_indices=(1,), cond_indices=(0,), cat_dim=-1):
super().__init__()
self.transformer = transformer
self.transformed_indices = transformed_indices
self.cond_indices = cond_indices
invalid = np.intersect1d(self.transformed_indices, self.cond_indices)
if len(invalid) > 0:
raise ValueError(f"Indices {invalid} cannot be both transformed and conditioned on.")
self.cat_dim = cat_dim
def _forward(self, *x, **kwargs):
input_lengths = [x[i].shape[self.cat_dim] for i in self.transformed_indices]
inputs = torch.cat([x[i] for i in self.transformed_indices], dim=self.cat_dim)
cond_inputs = torch.cat([x[i] for i in self.cond_indices], dim=self.cat_dim)
x = list(x)
y, dlogp = self.transformer.forward(cond_inputs, inputs, **kwargs)
y = torch.split(y, input_lengths, self.cat_dim)
for i, yi in zip(self.transformed_indices, y):
x[i] = yi
return (*x, dlogp)
def _inverse(self, *x, **kwargs):
input_lengths = [x[i].shape[self.cat_dim] for i in self.transformed_indices]
inputs = torch.cat([x[i] for i in self.transformed_indices], dim=self.cat_dim)
cond_inputs = torch.cat([x[i] for i in self.cond_indices], dim=self.cat_dim)
x = list(x)
y, dlogp = self.transformer.forward(cond_inputs, inputs, **kwargs, inverse=True)
y = torch.split(y, input_lengths, self.cat_dim)
for i, yi in zip(self.transformed_indices, y):
x[i] = yi
return (*x, dlogp)
class WrapFlow(Flow):
"""Apply a flow to a subset of inputs.
Parameters
----------
flow : bgflow.Flow
The flow that is applied to a subset of inputs.
indices : Iterable of int
Indices of the inputs that are passed to the `flow`.
out_indices : Iterable of int
The outputs of the `flow` are assigned to those outputs of the wrapped flow.
By default, the out indices are the same as the indices.
"""
def __init__(self, flow, indices, out_indices=None):
super().__init__()
self._flow = flow
self._indices = indices
self._argsort_indices = np.argsort(indices)
self._out_indices = indices if out_indices is None else out_indices
self._argsort_out_indices = np.argsort(self._out_indices)
def _forward(self, *xs, **kwargs):
inp = (xs[i] for i in self._indices)
output = [xs[i] for i in range(len(xs)) if i not in self._indices]
*yi, dlogp = self._flow(*inp, **kwargs)
for i in self._argsort_out_indices:
index = self._out_indices[i]
output.insert(index, yi[i])
return (*tuple(output), dlogp)
def _inverse(self, *xs, **kwargs):
inp = (xs[i] for i in self._out_indices)
output = [xs[i] for i in range(len(xs)) if i not in self._out_indices]
*yi, dlogp = self._flow(*inp, inverse=True, **kwargs)
for i in self._argsort_indices:
index = self._indices[i]
output.insert(index, yi[i])
return (*tuple(output), dlogp)
class SetConstantFlow(Flow):
"""A flow that sets some inputs constant in the forward direction and removes them in the inverse.
Parameters
----------
indices : Sequence[int]
Indices to be set to constants.
values : Sequence[tensor]
Constant values; sequence has to have the same length as `indices`.
n_event_dims0 : int, optional
The number of event dims of x[0]. Required to infer the batch shape.
"""
def __init__(self, indices, values, n_event_dims0=1):
super().__init__()
argsort = np.argsort(indices)
self.indices = [indices[i] for i in argsort]
values = [values[i] for i in argsort]
for i, v in enumerate(values):
self.register_buffer(f"_values_{i}", v)
self.n_event_dims0 = n_event_dims0
@property
def values(self):
result = []
i = 0
while hasattr(self, f"_values_{i}"):
result.append(getattr(self, f"_values_{i}"))
i += 1
return result
def _forward(self, *xs, **kwargs):
"""insert constants"""
batch_shape = list(xs[0].shape[:self.n_event_dims0])
y = list(xs)
for i, v in zip(self.indices, self.values):
y.insert(i, v.repeat([*batch_shape, *np.ones_like(v.shape)]))
dlogp = torch.zeros(batch_shape + [1], device=xs[0].device, dtype=xs[0].dtype)
return (*y, dlogp)
def _inverse(self, *xs, **kwargs):
"""remove constants"""
y = tuple(xs[i] for i, z in enumerate(xs) if i not in self.indices)
batch_shape = list(y[0].shape[:self.n_event_dims0])
dlogp = torch.zeros(batch_shape + [1], device=y[0].device, dtype=y[0].dtype)
return (*y, dlogp)
|
from setuptools import setup
with open("pimp/__version__.py") as fh:
version = fh.readlines()[-1].split()[-1].strip("\"'")
setup(
name='PyImp',
version=version,
packages=['pimp', 'pimp.epm', 'pimp.utils', 'pimp.utils.io', 'pimp.evaluator', 'pimp.importance',
'pimp.configspace'],
entry_points={
'console_scripts': ['pimp=pimp.pimp:cmd_line_call'],
},
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Visualization",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Education",
],
platforms=['Linux'],
install_requires=[
"numpy",
"sklearn",
"matplotlib",
"ConfigSpace>=0.4",
"scipy",
"pyrfr>=0.8.0",
"smac>=0.8.0",
"fanova",
"tqdm",
"argcomplete",
"pandas",
"bokeh>=1.1.0",
],
url='',
license='BSD 3-clause',
author='biedenka',
author_email='biedenka@cs.uni-freiburg.de',
description='Package for automated Parameter Importance Analysis after Configuration.'
)
|
################################################################################
#
# MEASURE input file for acetyl + O2 reaction network
#
################################################################################
title = 'acetyl + oxygen'
description = \
"""
The chemically-activated reaction of acetyl with oxygen. This system is of
interest in atmospheric chemistry as a step in the conversion of acetaldehyde
to the secondary pollutant peroxyacetylnitrate (PAN); it is also potentially
important in the ignition chemistry of ethanol.
"""
species(
label='acetylperoxy',
SMILES='CC(=O)O[O]',
E0=(-34.6,'kcal/mol'),
states=States(
rotations=RigidRotor(
linear=False,
inertia=([54.2978, 104.8364, 156.0495],"amu*angstrom^2"),
symmetry=1,
),
vibrations=HarmonicOscillator(
frequencies=([321.607, 503.468, 539.885, 547.148, 731.506, 979.187, 1043.981, 1126.416, 1188.619, 1399.432, 1458.200, 1463.423, 1881.701, 3055.285, 3115.447, 3155.144], 'cm^-1'),
),
torsions=[
HinderedRotor(inertia=(7.38359,"amu*angstrom^2"), barrier=(6.11665,"kcal/mol"), symmetry=1),
HinderedRotor(inertia=(2.94725,"amu*angstrom^2"), barrier=(1.22157,"kcal/mol"), symmetry=3),
],
frequencyScaleFactor=0.99,
spinMultiplicity=2,
),
lennardJones=LennardJones(sigma=(5.09,'angstrom'), epsilon=(473,'K')),
)
species(
label='hydroperoxylvinoxy',
SMILES='[CH2]C(=O)OO',
E0=(-32.4,'kcal/mol'),
states=States(
rotations=RigidRotor(
linear=False,
inertia=([44.8035, 110.2250, 155.0285],"amu*angstrom^2"),
symmetry=1,
),
vibrations=HarmonicOscillator(
frequencies=([320.665, 423.425, 670.208, 680.006, 757.327, 869.833, 1004.445, 1025.639, 1243.606, 1446.509, 1494.622, 1697.996, 3164.246, 3282.391, 3454.878], 'cm^-1'),
),
torsions=[
HinderedRotor(inertia=(1.68465,"amu*angstrom^2"), barrier=(8.06130,"kcal/mol"), symmetry=2),
HinderedRotor(inertia=(8.50433,"amu*angstrom^2"), barrier=(14.7920,"kcal/mol"), symmetry=1),
HinderedRotor(inertia=(0.803313,"amu*angstrom^2"), barrier=(4.78136,"kcal/mol"), symmetry=1),
],
frequencyScaleFactor=0.99,
spinMultiplicity=2,
),
lennardJones=LennardJones(sigma=(5.09,'angstrom'), epsilon=(473,'K')),
)
species(
label='acetyl',
SMILES='C[C]=O',
E0=(0.0,'kcal/mol'),
states=States(
rotations=RigidRotor(
linear=False,
inertia=([5.94519, 50.8166, 53.6436],"amu*angstrom^2"),
symmetry=1,
),
vibrations=HarmonicOscillator(
frequencies=([467.090, 850.182, 1016.581, 1044.643, 1351.572, 1443.265, 1450.874, 1917.585, 3003.317, 3094.973, 3097.873], 'cm^-1'),
),
torsions=[
HinderedRotor(inertia=(1.61753,"amu*angstrom^2"), barrier=(0.510278,"kcal/mol"), symmetry=3),
],
frequencyScaleFactor=0.99,
spinMultiplicity=2,
),
)
species(
label='oxygen',
SMILES='[O][O]',
E0=(0.0,'kcal/mol'),
states=States(
rotations=RigidRotor(
linear=True,
inertia=(11.6056,"amu*angstrom^2"),
symmetry=2,
),
vibrations=HarmonicOscillator(
frequencies=([1631.232],"cm^-1"),
),
frequencyScaleFactor=0.99,
spinMultiplicity=3,
),
)
species(
label='ketene',
SMILES='C=C=O',
E0=(-6.6,'kcal/mol'),
)
species(
label='lactone',
SMILES='C1OC1(=O)',
E0=(-30.8,'kcal/mol'),
)
species(
label='hydroxyl',
SMILES='[OH]',
E0=(0.0,'kcal/mol'),
)
species(
label='hydroperoxyl',
SMILES='O[O]',
E0=(0.0,'kcal/mol'),
)
species(
label='nitrogen',
SMILES='N#N',
lennardJones=LennardJones(sigma=(3.70,'angstrom'), epsilon=(94.9,'K')),
collisionModel = SingleExponentialDown(
alpha0 = (0.5718,'kcal/mol'),
T0 = (300,'K'),
n = 0.85,
),
)
################################################################################
isomer('acetylperoxy')
isomer('hydroperoxylvinoxy')
reactants('acetyl', 'oxygen')
################################################################################
reaction(
reactants=['acetyl', 'oxygen'],
products=['acetylperoxy'],
kinetics=Arrhenius(
A=(2.65e6,'m^3/(mol*s)'),
n=0.0,
Ea=(0.0,'kcal/mol')
),
transitionState=TransitionState(
E0=(0.0,'kcal/mol'),
)
)
reaction(
reactants=['acetylperoxy'],
products=['hydroperoxylvinoxy'],
kinetics=Arrhenius(
A=(2.31e9,'s^-1'),
n=0.75,
Ea=(23.21,'kcal/mol')
),
transitionState=TransitionState(
E0=(-5.8,'kcal/mol'),
#states=States(
#rotations=RigidRotor(
#linear=False,
#inertia=([49.3418, 103.6975, 149.6820],"amu*angstrom^2"),
#symmetry=1,
#),
#vibrations=HarmonicOscillator(
#frequencies=([150.516, 309.455, 487.595, 540.400, 602.963, 679.579, 837.549, 923.888, 1028.647, 1037.741, 1107.523, 1136.811, 1409.904, 1711.388, 1855.194, 3096.997, 3181.924], 'cm^-1'),
#),
#frequencyScaleFactor=0.99,
#spinMultiplicity=2,
#),
frequency=(-1672.207,'cm^-1'),
)
)
reaction(
reactants=['acetylperoxy'],
products=['ketene', 'hydroperoxyl'],
kinetics=Arrhenius(
A=(2.62e9,'s^-1'),
n=1.24,
Ea=(34.06,'kcal/mol')
),
transitionState=TransitionState(
E0=(0.6,'kcal/mol'),
#states=States(
#rotations=RigidRotor(
#linear=False,
#inertia=([55.4256, 136.1886, 188.2442],"amu*angstrom^2"),
#symmetry=1,
#),
#vibrations=HarmonicOscillator(
#frequencies=([59.306, 205.421, 354.483, 468.861, 482.875, 545.574, 657.825, 891.898, 1023.947, 1085.617, 1257.494, 1316.937, 1378.552, 1688.566, 2175.346, 3079.822, 3154.325], 'cm^-1'),
#),
#frequencyScaleFactor=0.99,
#spinMultiplicity=2,
#),
frequency=(-1048.9950,'cm^-1'),
)
)
reaction(
reactants=['hydroperoxylvinoxy'],
products=['ketene', 'hydroperoxyl'],
kinetics=Arrhenius(
A=(5.33e16,'s^-1'),
n=-1.02,
Ea=(29.51,'kcal/mol')
),
transitionState=TransitionState(
E0=(-4.6,'kcal/mol'),
#states=States(
#rotations=RigidRotor(
#linear=False,
#inertia=([51.7432, 119.3733, 171.1165],"amu*angstrom^2"),
#symmetry=1,
#),
#vibrations=HarmonicOscillator(
#frequencies=([ 251.808, 386.126, 547.638, 582.452, 598.884, 709.641, 970.482, 1109.848, 1153.775, 1424.448, 1492.202, 1995.657, 3146.702, 3162.637, 3274.761], 'cm^-1'),
#),
#frequencyScaleFactor=0.99,
#spinMultiplicity=2,
#),
frequency=(-402.5870,'cm^-1'),
)
)
reaction(
reactants=['hydroperoxylvinoxy'],
products=['lactone', 'hydroxyl'],
kinetics=Arrhenius(
A=(1.92e17,'s^-1'),
n=-1.07,
Ea=(27.19,'kcal/mol')
),
transitionState=TransitionState(
E0=(-7.2,'kcal/mol'),
#states=States(
#rotations=RigidRotor(
#linear=False,
#inertia=([53.2821, 120.4050, 170.1570],"amu*angstrom^2"),
#symmetry=1,
#),
#vibrations=HarmonicOscillator(
#frequencies=([ 153.065, 182.997, 313.611, 350.732, 612.127, 628.112, 810.164, 954.550, 1001.209, 1002.946, 1176.094, 1421.050, 1845.809, 3143.113, 3264.608, 3656.183], 'cm^-1'),
#),
#frequencyScaleFactor=0.99,
#spinMultiplicity=2,
#),
frequency=(-615.7468,'cm^-1'),
)
)
#################################################################################
bathGas = {
'nitrogen': 1.0,
}
temperatures(Tmin=(300.0,'K'), Tmax=(2000.0,'K'), count=7)
pressures(Pmin=(0.01,'bar'), Pmax=(100.0,'bar'), count=5)
energies(dE=(0.5,'kcal/mol'), count=250)
method('modified strong collision')
#method('reservoir state')
interpolationModel('chebyshev', 6, 4)
|
from typing import Optional
from pyvisa import ResourceManager
from .scpi.operation import Operation
from .visa_instrument import VisaInstrument
class Instrument(VisaInstrument):
idn = Operation('*IDN?')
def __init__(self, name: str, address: str, read_term: str = '\n', write_term: str = '\n',
resource_manager: Optional[ResourceManager] = None):
super().__init__(name=name, address=address, read_term=read_term, write_term=write_term,
resource_manager=resource_manager)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import aiounittest
from unittest.mock import MagicMock
from botbuilder.core import TurnContext, BotState, MemoryStorage, UserState
from botbuilder.core.adapters import TestAdapter
from botbuilder.schema import Activity
from test_utilities import TestUtilities
RECEIVED_MESSAGE = Activity(type='message',
text='received')
STORAGE_KEY = 'stateKey'
def cached_state(context, state_key):
cached = context.services.get(state_key)
return cached['state'] if cached is not None else None
def key_factory(context):
assert context is not None
return STORAGE_KEY
class TestBotState(aiounittest.AsyncTestCase):
storage = MemoryStorage()
adapter = TestAdapter()
context = TurnContext(adapter, RECEIVED_MESSAGE)
middleware = BotState(storage, key_factory)
def test_state_empty_name(self):
#Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
#Act
with self.assertRaises(TypeError) as _:
user_state.create_property('')
def test_state_none_name(self):
#Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
#Act
with self.assertRaises(TypeError) as _:
user_state.create_property(None)
async def test_storage_not_called_no_changes(self):
"""Verify storage not called when no changes are made"""
# Mock a storage provider, which counts read/writes
dictionary = {}
async def mock_write_result(self):
return
async def mock_read_result(self):
return {}
mock_storage = MemoryStorage(dictionary)
mock_storage.write = MagicMock(side_effect= mock_write_result)
mock_storage.read = MagicMock(side_effect= mock_read_result)
# Arrange
user_state = UserState(mock_storage)
context = TestUtilities.create_empty_context()
# Act
property_a = user_state.create_property("property_a")
self.assertEqual(mock_storage.write.call_count, 0)
await user_state.save_changes(context)
await property_a.set(context, "hello")
self.assertEqual(mock_storage.read.call_count, 1) # Initial save bumps count
self.assertEqual(mock_storage.write.call_count, 0) # Initial save bumps count
await property_a.set(context, "there")
self.assertEqual(mock_storage.write.call_count, 0) # Set on property should not bump
await user_state.save_changes(context)
self.assertEqual(mock_storage.write.call_count, 1) # Explicit save should bump
value_a = await property_a.get(context)
self.assertEqual("there", value_a)
self.assertEqual(mock_storage.write.call_count, 1) # Gets should not bump
await user_state.save_changes(context)
self.assertEqual(mock_storage.write.call_count, 1)
await property_a.delete(context) # Delete alone no bump
self.assertEqual(mock_storage.write.call_count, 1)
await user_state.save_changes(context) # Save when dirty should bump
self.assertEqual(mock_storage.write.call_count, 2)
self.assertEqual(mock_storage.read.call_count, 1)
await user_state.save_changes(context) # Save not dirty should not bump
self.assertEqual(mock_storage.write.call_count, 2)
self.assertEqual(mock_storage.read.call_count, 1)
async def test_state_set_no_load(self):
"""Should be able to set a property with no Load"""
# Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
context = TestUtilities.create_empty_context()
# Act
property_a = user_state.create_property("property_a")
await property_a.set(context, "hello")
async def test_state_multiple_loads(self):
"""Should be able to load multiple times"""
# Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
context = TestUtilities.create_empty_context()
# Act
property_a = user_state.create_property("property_a")
await user_state.load(context)
await user_state.load(context)
async def test_State_GetNoLoadWithDefault(self):
"""Should be able to get a property with no Load and default"""
# Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
context = TestUtilities.create_empty_context()
# Act
property_a = user_state.create_property("property_a")
value_a = await property_a.get(context, lambda : "Default!")
self.assertEqual("Default!", value_a)
async def test_State_GetNoLoadNoDefault(self):
"""Cannot get a string with no default set"""
# Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
context = TestUtilities.create_empty_context()
# Act
property_a = user_state.create_property("property_a")
value_a = await property_a.get(context)
# Assert
self.assertIsNone(value_a)
async def test_State_POCO_NoDefault(self):
"""Cannot get a POCO with no default set"""
# Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
context = TestUtilities.create_empty_context()
# Act
test_property = user_state.create_property("test")
value = await test_property.get(context)
# Assert
self.assertIsNone(value)
async def test_State_bool_NoDefault(self):
"""Cannot get a bool with no default set"""
# Arange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
context = TestUtilities.create_empty_context()
# Act
test_property = user_state.create_property("test")
value = await test_property.get(context)
# Assert
self.assertFalse(value)
"""
TODO: Check if default int functionality is needed
async def test_State_int_NoDefault(self):
""Cannot get a int with no default set""
# Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
context = TestUtilities.create_empty_context()
# Act
test_property = user_state.create_property("test")
value = await test_property.get(context)
# Assert
self.assertEqual(0, value)
"""
async def test_State_SetAfterSave(self):
"""Verify setting property after save"""
# Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
context = TestUtilities.create_empty_context()
# Act
property_a = user_state.create_property("property-a")
property_b = user_state.create_property("property-b")
await user_state.load(context)
await property_a.set(context, "hello")
await property_b.set(context, "world")
await user_state.save_changes(context)
await property_a.set(context, "hello2")
async def test_State_MultipleSave(self):
"""Verify multiple saves"""
# Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
context = TestUtilities.create_empty_context()
# Act
property_a = user_state.create_property("property-a")
property_b = user_state.create_property("property-b")
await user_state.load(context)
await property_a.set(context, "hello")
await property_b.set(context, "world")
await user_state.save_changes(context)
await property_a.set(context, "hello2")
await user_state.save_changes(context)
value_a = await property_a.get(context)
self.assertEqual("hello2", value_a)
async def test_LoadSetSave(self):
# Arrange
dictionary = {}
user_state = UserState(MemoryStorage(dictionary))
context = TestUtilities.create_empty_context()
# Act
property_a = user_state.create_property("property-a")
property_b = user_state.create_property("property-b")
await user_state.load(context)
await property_a.set(context, "hello")
await property_b.set(context, "world")
await user_state.save_changes(context)
# Assert
obj = dictionary["EmptyContext/users/empty@empty.context.org"]
self.assertEqual("hello", obj["property-a"])
self.assertEqual("world", obj["property-b"])
async def test_LoadSetSaveTwice(self):
# Arrange
dictionary = {}
context = TestUtilities.create_empty_context()
# Act
user_state = UserState(MemoryStorage(dictionary))
property_a = user_state.create_property("property-a")
property_b = user_state.create_property("property-b")
propertyC = user_state.create_property("property-c")
await user_state.load(context)
await property_a.set(context, "hello")
await property_b.set(context, "world")
await propertyC.set(context, "test")
await user_state.save_changes(context)
# Assert
obj = dictionary["EmptyContext/users/empty@empty.context.org"]
self.assertEqual("hello", obj["property-a"])
self.assertEqual("world", obj["property-b"])
# Act 2
user_state2 = UserState(MemoryStorage(dictionary))
property_a2 = user_state2.create_property("property-a")
property_b2 = user_state2.create_property("property-b")
await user_state2.load(context)
await property_a2.set(context, "hello-2")
await property_b2.set(context, "world-2")
await user_state2.save_changes(context)
# Assert 2
obj2 = dictionary["EmptyContext/users/empty@empty.context.org"]
self.assertEqual("hello-2", obj2["property-a"])
self.assertEqual("world-2", obj2["property-b"])
self.assertEqual("test", obj2["property-c"])
async def test_LoadSaveDelete(self):
# Arrange
dictionary = {}
context = TestUtilities.create_empty_context()
# Act
user_state = UserState(MemoryStorage(dictionary))
property_a = user_state.create_property("property-a")
property_b = user_state.create_property("property-b")
await user_state.load(context)
await property_a.set(context, "hello")
await property_b.set(context, "world")
await user_state.save_changes(context)
# Assert
obj = dictionary["EmptyContext/users/empty@empty.context.org"]
self.assertEqual("hello", obj["property-a"])
self.assertEqual("world", obj["property-b"])
# Act 2
user_state2 = UserState(MemoryStorage(dictionary))
property_a2 = user_state2.create_property("property-a")
property_b2 = user_state2.create_property("property-b")
await user_state2.load(context)
await property_a2.set(context, "hello-2")
await property_b2.delete(context)
await user_state2.save_changes(context)
# Assert 2
obj2 = dictionary["EmptyContext/users/empty@empty.context.org"]
self.assertEqual("hello-2", obj2["property-a"])
with self.assertRaises(KeyError) as _:
obj2["property-b"]
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
__title__ = "flask-boilerplate"
__description__ = "Boilerplate for Flask API"
__url__ = "https://github.com/openboilerplates/flask-boilerplate"
__version__ = "1.0.4"
__author__ = "Fakabbir Amin"
__author_email__ = "fakabbir@gmail.com"
__license__ = "MIT License"
setuptools.setup(
name=__title__,
version=__version__,
author=__author__,
author_email=__author_email__,
description=__description__,
url=__url__,
packages=setuptools.find_packages(),
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
python_requires='>=3.6',
)
|
"""
author OW
last reviewed/run 19-04-2020
"""
import matplotlib.pyplot as plt
import numpy as np
from oneibl.one import ONE
import ibllib.plots as iblplt
from brainbox.processing import bincount2D
from brainbox.io import one as bbone
T_BIN = 0.05
D_BIN = 5
# get the data from flatiron and the current folder
one = ONE()
eid = one.search(subject='CSHL045', date='2020-02-26', number=1)[0]
spikes, clusters, trials = bbone.load_ephys_session(eid, one=one, dataset_types=['spikes.depth'])
pname = list(spikes.keys())[0]
# compute raster map as a function of site depth
R, times, depths = bincount2D(spikes[pname]['times'], spikes[pname]['depths'], T_BIN, D_BIN)
# plot raster map
plt.imshow(R, aspect='auto', cmap='binary', vmax=T_BIN / 0.001 / 4,
extent=np.r_[times[[0, -1]], depths[[0, -1]]], origin='lower')
# plot trial start and reward time
reward = trials['feedback_times'][trials['feedbackType'] == 1]
iblplt.vertical_lines(trials['intervals'][:, 0], ymin=0, ymax=depths[-1],
color='k', linewidth=0.5, label='trial starts')
iblplt.vertical_lines(reward, ymin=0, ymax=depths[-1], color='m', linewidth=0.5,
label='valve openings')
plt.xlabel('Time (s)')
plt.ylabel('Cluster #')
plt.legend()
|
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Space Simulation - Onboard Queue *
#******************************************************************************
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import CCSDS.PACKET, CCSDS.TIME
import PUS.PACKET, PUS.SERVICES
import SPACE.IF
import UTIL.SYS, UTIL.TASK, UTIL.TCO, UTIL.TIME
#############
# constants #
#############
CHECK_CYCLIC_PERIOD_MS = 100
###########
# classes #
###########
# =============================================================================
class OnboardQueueImpl(SPACE.IF.OnboardQueue):
"""Implementation of the onboard computer"""
# ---------------------------------------------------------------------------
def __init__(self):
"""Initialise attributes only"""
self.ttTtimeFormat = CCSDS.TIME.timeFormat(UTIL.SYS.s_configuration.TM_TT_TIME_FORMAT)
self.ttTimeByteOffset = int(UTIL.SYS.s_configuration.TC_TT_TIME_BYTE_OFFSET)
self.ttByteOffset = int(UTIL.SYS.s_configuration.TC_TT_PKT_BYTE_OFFSET)
self.queue = {}
self.checkCyclicCallback()
# ---------------------------------------------------------------------------
def getQueue(self):
"""
returns the onboard queue:
implementation of SPACE.IF.OnboardQueue.getQueue
"""
return self.queue
# ---------------------------------------------------------------------------
def pushMngPacket(self, tcPacketDu):
"""
consumes a management telecommand packet:
implementation of SPACE.IF.OnboardQueue.pushMngPacket
"""
LOG_INFO("pushMngPacket", "OBQ")
LOG("APID = " + str(tcPacketDu.applicationProcessId), "OBQ")
LOG("TYPE = " + str(tcPacketDu.serviceType), "OBQ")
LOG("SUBTYPE = " + str(tcPacketDu.serviceSubType), "OBQ")
LOG("SSC = " + str(tcPacketDu.sequenceControlCount), "OBQ")
# check if the packet is a TT uplink command
if tcPacketDu.serviceSubType in PUS.SERVICES.TC_OBQ_UPLINK_SUBTYPES:
# extract the embedded TT packet from the TT uplink command
# consider also the CRC (2 bytes)
ttPacketMaxSize = len(tcPacketDu) - self.ttByteOffset - 2
if ttPacketMaxSize <= CCSDS.PACKET.PRIMARY_HEADER_BYTE_SIZE:
LOG_ERROR("cannot extract TT packet, not enought bytes", "OBQ")
LOG(str(tcPacketDu), "OBQ")
return
ttPacketData = tcPacketDu.getBytes(self.ttByteOffset, ttPacketMaxSize)
ttPacketDu = PUS.PACKET.TCpacket(ttPacketData)
ttPacketSize = CCSDS.PACKET.PRIMARY_HEADER_BYTE_SIZE + ttPacketDu.packetLength + 1
# consistency checks
if ttPacketSize < CCSDS.PACKET.PRIMARY_HEADER_BYTE_SIZE:
LOG_ERROR("packetLength of TT packet too small", "OBQ")
LOG(str(ttPacketDu), "OBQ")
return
if ttPacketSize > ttPacketMaxSize:
LOG_ERROR("packetLength of TT packet too large", "OBQ")
LOG(str(ttPacketDu), "OBQ")
return
# resize the ttPacketDu to match the packetLength
ttPacketDu.setLen(ttPacketSize)
if not ttPacketDu.checkChecksum():
LOG_ERROR("invalid TT packet CRC", "OBQ")
LOG(str(ttPacketDu), "OBQ")
return
# calculate the execution time
obtExecTime = tcPacketDu.getTime(self.ttTimeByteOffset, self.ttTtimeFormat)
ttExecTime = UTIL.TCO.correlateFromOBTmissionEpoch(obtExecTime)
SPACE.IF.s_onboardQueue.insertTTpacket(ttExecTime, ttPacketDu)
# ---------------------------------------------------------------------------
def insertTTpacket(self, ttExecTime, ttPacketDu):
"""
inserts a time-tagged telecommand packet into the command queue
"""
LOG("insertTTpacket(" + UTIL.TIME.getASDtimeStr(ttExecTime) + ")", "OBQ")
self.queue[ttExecTime] = ttPacketDu
UTIL.TASK.s_processingTask.notifyGUItask("TT_PACKET")
# ---------------------------------------------------------------------------
def checkCyclicCallback(self):
"""timer triggered"""
UTIL.TASK.s_processingTask.createTimeHandler(CHECK_CYCLIC_PERIOD_MS,
self.checkCyclicCallback)
# check if execution times in the queue are expired
ttExecTimes = self.queue.keys()
ttExecTimes = sorted(ttExecTimes)
actualTime = UTIL.TIME.getActualTime()
cmdsDeleted = False
for ttExecTime in ttExecTimes:
if ttExecTime <= actualTime:
# execution time has expired ---> process the packet
ttPacketDu = self.queue[ttExecTime]
SPACE.IF.s_onboardComputer.processTCpacket(
ttPacketDu,
SPACE.IF.s_configuration.obqAck1,
SPACE.IF.s_configuration.obqAck2,
SPACE.IF.s_configuration.obqAck3,
SPACE.IF.s_configuration.obqAck4)
# remove command
del self.queue[ttExecTime]
cmdsDeleted = True
else:
break
if cmdsDeleted:
UTIL.TASK.s_processingTask.notifyGUItask("TT_PACKET")
#############
# functions #
#############
def init():
"""initialise singleton(s)"""
SPACE.IF.s_onboardQueue = OnboardQueueImpl()
|
import matplotlib.pyplot as plt
import numpy as np
x = np.array([1, 2, 3, 4], dtype=np.uint8)
y = x**2 + 1
plt.plot(x, y)
plt.grid('on')
plt.show()
|
N = int(input())
lst = [int(x)-1 for x in input().strip().split()][::-1]
lst[-2], lst[-1] = lst[-1], lst[-2]
ans = ((1<<N) - 1) * 2
for i in range(N-1, -1, -1):
if lst[2*i] != 2 * i:
if i == 0:
ans += 1
else:
ans += (1<<i) * 2
lst[2*i-2], lst[2*i-1] = lst[2*i-1], lst[2*i-2]
print(ans)
|
#!/usr/bin/env python
""" generated source for module Proposition """
# package: org.ggp.base.util.propnet.architecture.components
import org.ggp.base.util.gdl.grammar.GdlSentence
import org.ggp.base.util.propnet.architecture.Component
#
# * The Proposition class is designed to represent named latches.
#
@SuppressWarnings("serial")
class Proposition(Component):
""" generated source for class Proposition """
name = GdlSentence()
value = bool()
#
# * Creates a new Proposition with name <tt>name</tt>.
# *
# * @param name
#
def __init__(self, name):
""" generated source for method __init__ """
super(Proposition, self).__init__()
self.name = name
self.value = False
#
# * Getter method.
# *
#
def getName(self):
""" generated source for method getName """
return self.name
#
# * Setter method.
# *
# * This should only be rarely used; the name of a proposition
# * is usually constant over its entire lifetime.
# *
#
def setName(self, newName):
""" generated source for method setName """
self.name = newName
#
# *
# * @see org.ggp.base.util.propnet.architecture.Component#getValue()
#
def getValue(self):
""" generated source for method getValue """
return self.value
#
# * Setter method.
# *
# * @param value
#
def setValue(self, value):
""" generated source for method setValue """
self.value = value
#
# * @see org.ggp.base.util.propnet.architecture.Component#toString()
#
def __str__(self):
""" generated source for method toString """
return toDot("circle", "red" if self.value else "white", self.name.__str__())
Proposition.# The name of the Proposition.
Proposition.# The value of the Proposition.
Proposition.# * The name of the Proposition.
Proposition.# * @return The name of the Proposition.
Proposition.# * @return The name of the Proposition.
Proposition.# * Returns the current value of the Proposition.
Proposition.# * The new value of the Proposition.
|
from __future__ import absolute_import
from .data_obs_client import DataObsClient
from .sql_client import SQLClient
__all__ = [
'SQLClient',
'DataObsClient'
]
|
import json
filename = 'names.json'
with open(filename) as f:
username = json.load(f)
print(f"Welcome back {username}! ")
|
from .plots import four_plots, three_plots, two_plots, one_plot
def summary(data):
'''
Function to display summaryof the dataframe
Returns: Summary object
Args::
Takes in the dataframe object
'''
print(f'Title: Petrophysical Summary of the Parameters Evaluated')
return data.describe()
def log_plot(logs, GR=True, NPHI=True, RHOB=True, RT=True, no_plots=4):
'''
Plot log signatures of petrophysical parameters.
Args::
Function accepts a dataframe and a depth argument.
Plots the GR, Porosity, Density and Resistivity logs respectively
Pass True for the depth value if dataframe has a depth column,
default is fault (uses index as depth)
no_plots: No of plots to display depending on petrophysical parameters to be visualized
Default and max value is 4
'''
try:
'''
Setting up all possible combinations of required plots
'''
if GR and NPHI and RHOB and RT:
no_plots = 4
elif GR and NPHI and RHOB or GR and RHOB and RT or GR and NPHI and RT or NPHI and RHOB and RT:
no_plots = 3
elif GR and NPHI or GR and RHOB or GR and RT or NPHI and RHOB or NPHI and RT or RHOB and GR or RHOB and RT:
no_plots = 2
elif GR or NPHI or RHOB or RT:
no_plots = 1
else:
no_plots = 0
raise InvalidEntryError(f'Enter an integer in the range (1-4). Set one or more petrophysical arguments to True')
#if number of plots is equal to four
if no_plots == 4:
four_plots(logs)
#if number of plots is equal to four
elif no_plots == 3:
if GR and NPHI and RHOB:
three_plots(logs, 'GR', 'NPHI', 'RHOB')
elif GR and NPHI and RT:
three_plots(logs, 'GR', 'NPHI', 'RT')
elif GR and RHOB and RT:
three_plots(logs, 'GR', 'RHOB', 'RT')
elif NPHI and RHOB and RT:
three_plots(logs, 'NPHI', 'RHOB', 'RT')
#if number of plots is equal to two (possible combinations)
elif no_plots == 2:
if GR and NPHI:
two_plots(logs, 'GR', 'NPHI')
elif GR and RHOB:
two_plots(logs, 'GR', 'RHOB')
elif GR and RT:
two_plots(logs, 'GR', 'RT')
elif NPHI and RHOB:
two_plots(logs, 'NPHI', 'RHOB')
elif NPHI and RT:
two_plots(logs, 'NPHI', 'RT')
elif RHOB and RT:
two_plots(logs, 'RHOB', 'RT')
#if number of plots is equal to 1 (possible combinations)
elif no_plots == 1:
if GR:
one_plot(logs, 'GR')
elif NPHI:
one_plot(logs, 'NPHI')
elif RHOB:
one_plot(logs, 'RHOB')
else:
one_plot(logs, 'RT')
except ModuleNotFoundError as err:
print(f'Install module. {err}')
except AttributeError as err:
print(f'NameError: Attritubute not found. Specify attribute. {err}')
|
from django_pg import models
class Game(models.Model):
label = models.CharField(max_length=100)
number = models.PositiveIntegerField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Moogle(models.Model):
game = models.ForeignKey(Game)
label = models.CharField(max_length=100)
sex = models.CharField(max_length=1, choices=(
('m', 'Male'),
('f', 'Female'),
))
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
select_related = 'game'
class Letter(models.Model):
game = models.ForeignKey(Game)
moogle = models.ForeignKey(Moogle)
addressee = models.CharField(max_length=20)
sender = models.CharField(max_length=20)
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
select_related = ['game', 'moogle']
|
"""Define one dimensional geometric entities."""
__author__ = "Daniel Ching, Doga Gursoy"
__copyright__ = "Copyright (c) 2016, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = [
'Line',
'Segment',
]
import logging
from math import sqrt
import numpy as np
from xdesign.geometry.entity import *
from xdesign.geometry.point import *
logger = logging.getLogger(__name__)
class LinearEntity(Entity):
"""Define a base class for linear entities.
e.g. :class:`.Line`, :class:`.Segment`, and :class:`.Ray`.
The constructor takes two unique :class:`.Point`.
Attributes
----------
p1 : Point
p2 : Point
"""
def __init__(self, p1, p2):
if not isinstance(p1, Point) or not isinstance(p2, Point):
raise TypeError("p1 and p2 must be Points")
if p1 == p2:
raise ValueError('Requires two unique Points.')
if p1.dim != p2.dim:
raise ValueError('Two Points must have same dimensionality.')
self.p1 = p1
self.p2 = p2
self._dim = p1.dim
def __repr__(self):
return "{}({}, {})".format(
type(self).__name__, repr(self.p1), repr(self.p2)
)
@property
def vertical(self):
"""Return True if line is vertical."""
return self.p1.x == self.p2.x
@property
def horizontal(self):
"""Return True if line is horizontal."""
return self.p1.y == self.p2.y
@property
def slope(self):
"""Return the slope of the line."""
if self.vertical:
return np.inf
else:
return ((self.p2.y - self.p1.y) / (self.p2.x - self.p1.x))
@property
def points(self):
"""Return the 2-tuple of points defining this linear entity."""
return (self.p1, self.p2)
@property
def length(self):
"""Return the length of the segment between p1 and p2."""
return self.p1.distance(self.p2)
@property
def tangent(self):
"""Return the unit tangent vector."""
dx = (self.p2._x - self.p1._x) / self.length
return Point(dx)
@property
def normal(self):
"""Return the unit normal vector."""
dx = (self.p2._x - self.p1._x) / self.length
R = np.array([[0, 1], [-1, 0]])
n = np.dot(R, dx)
return Point(n)
@property
def numpy(self):
"""Return row-size numpy array of p1 and p2."""
return np.stack((self.p1._x, self.p2._x), axis=0)
@property
def list(self):
"""Return an list of coordinates where p1 is the first D coordinates
and p2 is the next D coordinates."""
return np.concatenate((self.p1._x, self.p2._x), axis=0)
def translate(self, vector):
"""Translate the :class:`.LinearEntity` by the given vector."""
self.p1.translate(vector)
self.p2.translate(vector)
def rotate(self, theta, point=None, axis=None):
"""Rotate the :class:`.LinearEntity` by theta radians around an axis
defined by an axis and a point."""
self.p1.rotate(theta, point, axis)
self.p2.rotate(theta, point, axis)
class Line(LinearEntity):
"""Line in 2D cartesian space.
The constructor takes two unique :class:`.Point`.
Attributes
----------
p1 : Point
p2 : Point
"""
def __init__(self, p1, p2):
super(Line, self).__init__(p1, p2)
def __str__(self):
"""Return line equation."""
if self.vertical:
return "x = %s" % self.p1.x
elif self.dim == 2:
return "y = %sx + %s" % (self.slope, self.yintercept)
else:
A, B = self.standard
return "%sx " % '+ '.join([str(n) for n in A]) + "= " + str(B)
def __eq__(self, line):
return (self.slope, self.yintercept) == (line.slope, line.yintercept)
def intercept(self, n):
"""Calculates the intercept for the nth dimension."""
if n > self._dim:
return 0
else:
A, B = self.standard
if A[n] == 0:
return np.inf
else:
return B / A[n]
@property
def xintercept(self):
"""Return the x-intercept."""
if self.horizontal:
return np.inf
else:
return self.p1.x - 1 / self.slope * self.p1.y
@property
def yintercept(self):
"""Return the y-intercept."""
if self.vertical:
return np.inf
else:
return self.p1.y - self.slope * self.p1.x
@property
def standard(self):
"""Returns coeffients for the first N-1 standard equation coefficients.
The Nth is returned separately."""
A = np.stack([self.p1._x, self.p2._x], axis=0)
return calc_standard(A)
def distance(self, other):
"""Returns the closest distance between entities."""
# REF: http://geomalgorithms.com/a02-_lines.html
if not isinstance(other, Point):
raise NotImplementedError("Line to point distance only.")
d = np.cross(self.tangent._x, other._x - self.p1._x)
if self.dim > 2:
return sqrt(d.dot(d))
else:
return abs(d)
class Ray(Line):
"""Ray in 2-D cartesian space.
It is defined by two distinct points.
Attributes
----------
p1 : Point (source)
p2 : Point (point direction)
"""
def __init__(self, p1, p2):
super(Ray, self).__init__(p1, p2)
@property
def source(self):
"""The point from which the ray emanates."""
return self.p1
@property
def direction(self):
"""The direction in which the ray emanates."""
return self.p2 - self.p1
def distance(self, other):
# REF: http://geomalgorithms.com/a02-_lines.html
v = self.p2._x - self.p1._x
w = other._x - self.p1._x
c1 = np.dot(w, v)
if c1 <= 0:
return self.p1.distance(other)
else:
return super(Ray, self).distance(other)
class Segment(Line):
"""Defines a finite line segment from two unique points."""
def __init__(self, p1, p2):
super(Segment, self).__init__(p1, p2)
@property
def midpoint(self):
"""Return the midpoint of the line segment."""
return Point.midpoint(self.p1, self.p2)
def distance(self, other):
"""Return the distance to the other."""
# REF: http://geomalgorithms.com/a02-_lines.html
v = self.p2._x - self.p1._x
w = other._x - self.p1._x
c1 = np.dot(w, v)
c2 = np.dot(v, v)
if c1 <= 0:
return self.p1.distance(other)
elif c2 <= c1:
return self.p2.distance(other)
else:
return super(Segment, self).distance(other)
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from xgboost import XGBClassifier
from data import load_data_train_test_data, load_test_data, write_accuracy, write_logloss, \
load_train_data_with_PCA_per_type
from visualize import plot_cnf
train_x, train_y, test_x, test_y, genres, scaler_rythym, scaler_chroma, scaler_mfcc = load_train_data_with_PCA_per_type()
cls = XGBClassifier()
cls.fit(train_x, train_y)
print("Training Score: {:.3f}".format(cls.score(train_x, train_y)))
print("Test score: {:.3f}".format(cls.score(test_x, test_y)))
scores = cross_val_score(cls, train_x, train_y, cv=5, scoring='accuracy')
print("Cross val accuracy: ", scores.mean(), scores.std())
plot_cnf(cls, test_x, test_y)
test_data = load_test_data()
rythym = test_data[:, :168]
chroma = test_data[:, 169:216]
mfcc = test_data[:, 217:]
rythym = scaler_rythym.fit_transform(rythym)
chroma = scaler_chroma.fit_transform(chroma)
mfcc = scaler_mfcc.fit_transform(mfcc)
rythym = preprocessing.normalize(rythym, norm='l2')
chroma = preprocessing.normalize(chroma, norm='l2')
mfcc = preprocessing.normalize(mfcc, norm='l2')
# rythym = pca_rythym.fit_transform(rythym)
# chroma = pca_chroma.fit_transform(chroma)
# mfcc = pca_mfcc.fit_transform(mfcc)
test_data = np.concatenate((rythym, chroma, mfcc), axis=1)
N = test_data.shape[0]
predictions = cls.predict(test_data)
predictions = predictions.reshape((predictions.shape[0], 1))
accuracy_data = predictions.astype(np.uint64)
write_accuracy(accuracy_data)
y_pred = cls.predict_proba(test_data)
write_logloss(y_pred)
# plt.show()
|
# Generated by Django 2.2.9 on 2021-03-10 21:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20200804_1527'),
]
operations = [
migrations.AddField(
model_name='parametros',
name='especificacoes_itens_kits',
field=models.FileField(blank=True, null=True, upload_to='', verbose_name='Especificações de itens dos kits'),
),
]
|
"""
#lookup.py
This file contains all the mappings between hickle/HDF5 metadata and python types.
There are four dictionaries and one set that are populated here:
1) types_dict
types_dict: mapping between python types and dataset creation functions, e.g.
types_dict = {
list: create_listlike_dataset,
int: create_python_dtype_dataset,
np.ndarray: create_np_array_dataset
}
2) hkl_types_dict
hkl_types_dict: mapping between hickle metadata and dataset loading functions, e.g.
hkl_types_dict = {
"<type 'list'>" : load_list_dataset,
"<type 'tuple'>" : load_tuple_dataset
}
3) container_types_dict
container_types_dict: mapping required to convert the PyContainer object in hickle.py
back into the required native type. PyContainer is required as
some iterable types are immutable (do not have an append() function).
Here is an example:
container_types_dict = {
"<type 'list'>": list,
"<type 'tuple'>": tuple
}
4) container_key_types_dict
container_key_types_dict: mapping specifically for converting hickled dict data back into
a dictionary with the same key type. While python dictionary keys
can be any hashable object, in HDF5 a unicode/string is required
for a dataset name. Example:
container_key_types_dict = {
"<type 'str'>": str,
"<type 'unicode'>": unicode
}
5) types_not_to_sort
type_not_to_sort is a list of hickle type attributes that may be hierarchical,
but don't require sorting by integer index.
## Extending hickle to add support for other classes and types
The process to add new load/dump capabilities is as follows:
1) Create a file called load_[newstuff].py in loaders/
2) In the load_[newstuff].py file, define your create_dataset and load_dataset functions,
along with all required mapping dictionaries.
3) Add an import call here, and populate the lookup dictionaries with update() calls:
# Add loaders for [newstuff]
try:
from .loaders.load_[newstuff[ import types_dict as ns_types_dict
from .loaders.load_[newstuff[ import hkl_types_dict as ns_hkl_types_dict
types_dict.update(ns_types_dict)
hkl_types_dict.update(ns_hkl_types_dict)
... (Add container_types_dict etc if required)
except ImportError:
raise
"""
import six
def return_first(x):
""" Return first element of a list """
return x[0]
def load_nothing(h_hode):
pass
types_dict = {}
hkl_types_dict = {}
types_not_to_sort = [b'dict', b'csr_matrix', b'csc_matrix', b'bsr_matrix']
container_types_dict = {
b"<type 'list'>": list,
b"<type 'tuple'>": tuple,
b"<type 'set'>": set,
b"<class 'list'>": list,
b"<class 'tuple'>": tuple,
b"<class 'set'>": set,
b"csr_matrix": return_first,
b"csc_matrix": return_first,
b"bsr_matrix": return_first
}
# Technically, any hashable object can be used, for now sticking with built-in types
container_key_types_dict = {
b"<type 'str'>": str,
b"<type 'float'>": float,
b"<type 'bool'>": bool,
b"<type 'int'>": int,
b"<type 'complex'>": complex,
b"<class 'str'>": str,
b"<class 'float'>": float,
b"<class 'bool'>": bool,
b"<class 'int'>": int,
b"<class 'complex'>": complex
}
if six.PY2:
container_key_types_dict[b"<type 'unicode'>"] = unicode
container_key_types_dict[b"<type 'long'>"] = long
# Add loaders for built-in python types
if six.PY2:
from .loaders.load_python import types_dict as py_types_dict
from .loaders.load_python import hkl_types_dict as py_hkl_types_dict
else:
from .loaders.load_python3 import types_dict as py_types_dict
from .loaders.load_python3 import hkl_types_dict as py_hkl_types_dict
types_dict.update(py_types_dict)
hkl_types_dict.update(py_hkl_types_dict)
# Add loaders for numpy types
from .loaders.load_numpy import types_dict as np_types_dict
from .loaders.load_numpy import hkl_types_dict as np_hkl_types_dict
from .loaders.load_numpy import check_is_numpy_array
types_dict.update(np_types_dict)
hkl_types_dict.update(np_hkl_types_dict)
#######################
## ND-ARRAY checking ##
#######################
ndarray_like_check_fns = [
check_is_numpy_array
]
def check_is_ndarray_like(py_obj):
is_ndarray_like = False
for ii, check_fn in enumerate(ndarray_like_check_fns):
is_ndarray_like = check_fn(py_obj)
if is_ndarray_like:
break
return is_ndarray_like
#######################
## loading optional ##
#######################
def register_class(myclass_type, hkl_str, dump_function, load_function,
to_sort=True, ndarray_check_fn=None):
""" Register a new hickle class.
Args:
myclass_type type(class): type of class
dump_function (function def): function to write data to HDF5
load_function (function def): function to load data from HDF5
is_iterable (bool): Is the item iterable?
hkl_str (str): String to write to HDF5 file to describe class
to_sort (bool): If the item is iterable, does it require sorting?
ndarray_check_fn (function def): function to use to check if
"""
types_dict.update({myclass_type: dump_function})
hkl_types_dict.update({hkl_str: load_function})
if to_sort == False:
types_not_to_sort.append(hkl_str)
if ndarray_check_fn is not None:
ndarray_like_check_fns.append(ndarray_check_fn)
def register_class_list(class_list):
""" Register multiple classes in a list
Args:
class_list (list): A list, where each item is an argument to
the register_class() function.
Notes: This just runs the code:
for item in mylist:
register_class(*item)
"""
for class_item in class_list:
register_class(*class_item)
def register_class_exclude(hkl_str_to_ignore):
""" Tell loading funciton to ignore any HDF5 dataset with attribute 'type=XYZ'
Args:
hkl_str_to_ignore (str): attribute type=string to ignore and exclude from loading.
"""
hkl_types_dict[hkl_str_to_ignore] = load_nothing
def register_exclude_list(exclude_list):
""" Ignore HDF5 datasets with attribute type='XYZ' from loading
ArgsL
exclude_list (list): List of strings, which correspond to hdf5/hickle
type= attributes not to load.
"""
for hkl_str in exclude_list:
register_class_exclude(hkl_str)
########################
## Scipy sparse array ##
########################
try:
from .loaders.load_scipy import class_register, exclude_register
register_class_list(class_register)
register_exclude_list(exclude_register)
except ImportError:
pass
except NameError:
pass
####################
## Astropy stuff ##
####################
try:
from .loaders.load_astropy import class_register
register_class_list(class_register)
except ImportError:
pass
##################
## Pandas stuff ##
##################
try:
from .loaders.load_pandas import class_register
register_class_list(class_register)
except ImportError:
pass
|
import logging
def assertSimilar(a, b):
if a != b:
raise AssertionError('Assertion failed: Value mismatch: %r (%s) != %r (%s)' % (a, type(a), b, type(b)))
def assertEqual(a, b):
if type(a) == type(b):
assertSimilar(a, b)
else:
raise AssertionError('Assertion failed: Type mismatch %r (%s) != %r (%s)' % (a, type(a), b, type(b)))
|
from StatementGame.database import conn, c, d, get_table_id
import StatementGame.question as question
def get_round(rid, game):
d.execute("SELECT * FROM round WHERE round={} AND game={}".format(rid, game))
rs = d.fetchone()
if rs is None:
return new_round(rid, game)
return rs
def new_round(rid, game):
p = question.get_question_pair()
c.execute("INSERT INTO round (game, round, regularStatement, falseStatement) VALUES ({}, {}, {}, {})".format(
game, rid, p[0]["id"], p[1]["id"]
))
conn.commit()
return get_round_id(c.lastrowid)
def get_round_id(rid):
d.execute("SELECT * FROM round WHERE id={}".format(rid))
return d.fetchone()
def get_responses(rid):
d.execute("select * from response join player on player.id=response.player where round={}".format(rid))
return d.fetchall()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.