content stringlengths 5 1.05M |
|---|
# Generated by Django 3.0.5 on 2020-04-30 14:54
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('pages', '0004_remove_home_app'),
]
operations = [
migrations.AlterField(
model_name='staticpage',
name='content',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('post_index', wagtail.core.blocks.StructBlock([('index', wagtail.core.blocks.PageChooserBlock(page_type=['blog.PostIndex'])), ('shown_posts', wagtail.core.blocks.IntegerBlock(min_value=1))]))], blank=True, null=True),
),
migrations.AlterField(
model_name='staticpage',
name='content_en',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('post_index', wagtail.core.blocks.StructBlock([('index', wagtail.core.blocks.PageChooserBlock(page_type=['blog.PostIndex'])), ('shown_posts', wagtail.core.blocks.IntegerBlock(min_value=1))]))], blank=True, null=True),
),
migrations.AlterField(
model_name='staticpage',
name='content_pl',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('post_index', wagtail.core.blocks.StructBlock([('index', wagtail.core.blocks.PageChooserBlock(page_type=['blog.PostIndex'])), ('shown_posts', wagtail.core.blocks.IntegerBlock(min_value=1))]))], blank=True, null=True),
),
]
|
import neovim
@neovim.plugin
class TestPlugin:
def __init__(self, nvim):
self.nvim = nvim
@neovim.function("TestFunction", sync=True)
def test_function(self, args):
return 3
@neovim.command("TestCommand", range='', nargs='*')
def test_command(self, args, rng):
self.nvim.current.line = ('Hello with args: {}, range: {}'.format(args, rng))
@neovim.autocmd('BufEnter', pattern='*.py', eval='expand("<afile>")', sync=True)
def on_bufenter(self, filename):
self.nvim.out_write("testplugin is in {} \n".format(filename))
|
from math import exp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
# data
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
data = np.array(df.iloc[:100, [0,1,-1]])
# print(data)
return data[:,:2], data[:,-1]
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
class LogisticRegressionClf:
def __init__(self, max_iter = 60,learning_rate = 0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
def sigmoid(self,x):
return 1/ (1 + exp(-x))
def data_matrix(self,x):
data_mat=[]
for d in x:
data_mat.append([1.0,*d]) # 这里的 1 是偏置的位置
return data_mat
def fit(self,X,y):
data_mat = self.data_matrix(X) # m*n
self.weights = np.zeros((len(data_mat[0]),1), dtype=np.float32)
for iter in range(self.max_iter):
for i in range(len(X)):
result = self.sigmoid(np.dot(data_mat[i],self.weights))
error = y[i] - result
self.weights += self.learning_rate * error * np.transpose([data_mat[i]]) # 梯度下降法
print('iter: {}, error: {}'.format(iter, error))
print('LogisticRegreesion Model learning rate: {}, max_iter: {}'.format(self.learning_rate,self.max_iter))
def score(self,X_test,y_test):
right = 0
X_test = self.data_matrix(X_test)
for x,y in zip(X_test,y_test):
result = np.dot(x , self.weights)
if (result > 0 and y==1) or (result < 0 and y == 0 ):
right +=1
return right / len(X_test)
lr_clf = LogisticRegressionClf()
lr_clf.fit(X_train, y_train)
print(lr_clf.score(X_test,y_test))
x_ponits = np.arange(4, 8)
y_ = -(lr_clf.weights[1]*x_ponits + lr_clf.weights[0])/lr_clf.weights[2]
plt.plot(x_ponits, y_)
plt.scatter(X[:50,0],X[:50,1], color='blue' ,label='0')
plt.scatter(X[50:,0],X[50:,1], color= 'orange',label='1')
plt.legend()
plt.show() |
import torch
import torch.nn as nn
from .rl_base_strategy import RLBaseStrategy, Timestep, TimestepUnit
from .buffers import Rollout
from torch.optim.optimizer import Optimizer
from typing import Union, Optional, Sequence, List
from avalanche_rl.training.plugins.strategy_plugin import RLStrategyPlugin
from torch.optim import Optimizer
from torch.distributions import Categorical
from avalanche_rl.training import default_rl_logger
from avalanche_rl.models.actor_critic import A2CModel
class A2CStrategy(RLBaseStrategy):
def __init__(
self, model: A2CModel, optimizer: Optimizer,
per_experience_steps: Union[int, Timestep, List[Timestep]],
max_steps_per_rollout: int = 5,
value_criterion=nn.MSELoss(),
device='cpu',
plugins: Optional[Sequence[RLStrategyPlugin]] = [],
eval_every: int = -1, eval_episodes: int = 1,
policy_loss_weight: float = 0.5,
value_loss_weight: float = 0.5,
evaluator=default_rl_logger, **kwargs):
# multiple steps per rollout are supported through time dimension flattening
# e.g. working with tensors of shape `n_envs`*`timesteps`x`obs_shape`
super().__init__(
model, optimizer, per_experience_steps=per_experience_steps,
# only support max steps as to avoid getting rollouts of different length
rollouts_per_step=-1,
max_steps_per_rollout=max_steps_per_rollout,
device=device, plugins=plugins,
eval_every=eval_every, eval_episodes=eval_episodes,
evaluator=evaluator, **kwargs)
for exp_step in self.per_experience_steps:
exp_step.unit == TimestepUnit.STEPS, 'A2C only supports expressing training duration in steps not episodes'
self.value_criterion = value_criterion
self.ac_w = policy_loss_weight
self.cr_w = value_loss_weight
def sample_rollout_action(self, observations: torch.Tensor):
"""
This will process a batch of observations and produce a batch
of actions to better leverage GPU as in 'batched' A2C, as in
`n_envs` x D -> `n_envs` x A.
Args:
observations (torch.Tensor): [description]
Returns:
[type]: [description]
"""
# sample action from policy network
with torch.no_grad():
_, policy_logits = self._model_forward(
self.model, observations, compute_value=False)
# (alternative np.random.choice(num_outputs, p=np.squeeze(dist)))
return Categorical(logits=policy_logits).sample().cpu().numpy()
def update(self, rollouts: List[Rollout]):
# perform gradient step(s) over gathered rollouts
self.loss = 0.
for rollout in rollouts:
# move samples to device for processing and expect tensor of shape `timesteps`x`n_envs`xD`
rollout = rollout.to(self.device)
# print("Rollout Observation shape", rollout.observations.shape)
values, policy_logits = self._model_forward(
self.model, rollout.observations)
# ~log(softmax(taken_action_logits))
# print("Rollout Actions shape", rollout.actions.shape)
# FIXME: remove view
log_prob = Categorical(
logits=policy_logits).log_prob(
rollout.actions.view(-1,))
# compute next states values
next_values, _ = self._model_forward(
self.model, rollout.next_observations, compute_policy=False)
# mask terminal states values
next_values[rollout.dones.view(-1,)] = 0.
# Actor/Policy Loss Term in A2C: A(s_t, a_t) * grad log (pi(a_t|s_t))
boostrapped_returns = rollout.rewards + self.gamma * next_values
advantages = boostrapped_returns - values
# get advantages of taken actions a_t FIXME: this whill need view(-1,1)
advantages = advantages.gather(dim=1, index=rollout.actions)
# print("Rollout adv shape", advantages.shape, log_prob.shape, policy_logits.shape)
policy_loss = -(advantages * log_prob).mean()
# Value Loss Term: (R_t + gamma * V(S_{t+1}) - V(S_t))^2
# value_loss = advantages.pow(2)
value_loss = self.value_criterion(boostrapped_returns, values)
# accumulate gradients for multi-rollout case
self.loss += self.ac_w * policy_loss + self.cr_w * value_loss
|
import logging
import time
import requests
from bs4 import BeautifulSoup
from ghost import Ghost
WAIT_TIMEOUT_SECONDS = 10
SSL_LABS_URL = "https://www.ssllabs.com/ssltest/analyze.html?hideResults=on&d="
SSL_LABS_API_URL = "https://api.ssllabs.com/api/v2/analyze"
CHROME_USER_AGENT = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36"
SSLLABS_APPLE_MITM_MESSAGE = "Due to a recently discovered bug in Apple's code, your browser is exposed to MITM attacks"
logger = logging.getLogger("SslLabsReport")
class HostReport():
def __init__(self, host, grade, grade_ignore_trust, errors, warnings, protocols, ciphers):
self.host = host
self.grade = grade
self.grade_ignore_trust = grade_ignore_trust
self.warnings = warnings
self.errors = errors
self.protocols = protocols
self.ciphers = ciphers
pass
def analyze_all(hosts):
res = []
for host in hosts:
res.append(analyze(host))
return res
def analyze(host):
start_global = time.time()
logger.info("Report requested for host %s", host)
start = time.time()
logger.info("[%s] Invoking rest api", host)
grade, grade_trust_ignored, protocols, ciphers = _request_api_result(host)
logger.info("[%s] Rest api result received in %.2f seconds. Grade: %s (%s) %s %s", host, time.time() - start, grade,
grade_trust_ignored, str(protocols), str(ciphers))
logger.info("[%s] requesting html page", host)
markup = _get_html_page(host)
# logger.debug("Html page: %s", str(markup))
errors, warnings = _fetch_report(markup)
logger.info("[%s] html page processed in %.2f seconds", host, time.time() - start)
report = HostReport(host, grade, grade_trust_ignored, errors, warnings, protocols, ciphers)
logger.info("[%s] report completed in %.2f seconds: %s (%s), %s, %s", host, time.time() - start_global, grade,
grade_trust_ignored, str(errors), str(warnings))
return report
def _wait_for_result(session, page):
if page.http_status == 200:
start = time.time()
wait_success, text = session.wait_for_selector("#rating", WAIT_TIMEOUT_SECONDS)
if wait_success:
logger.info("Assessment data found in page in %.2f seconds", time.time() - start)
return page.content
else:
raise Exception("Wait failure")
else:
raise Exception("Bad Response status %d", page.http_status)
def _get_html_page(host):
ghost = Ghost()
with ghost.start() as session:
start = time.time()
url = SSL_LABS_URL + host
logger.debug("Request url: %s", url)
page, extra_resources = session.open(
url,
headers={"User-Agent": CHROME_USER_AGENT},
user_agent=CHROME_USER_AGENT,
wait=True
)
logger.info("[%s] Html page retrieved in %.2f seconds,"
" starting wait for test completion", host, time.time() - start)
return _wait_for_result(session, page)
def _fetch_report(html_markup):
soup = BeautifulSoup(html_markup, 'html.parser')
rating_el = soup.find_all("div", id="rating")[0]
logger.debug("#rating el found: %s", str(rating_el))
errors = soup.find_all("div", class_="errorBox")
logger.debug("Errors found: %s", str(errors))
errors = [er for er in errors if not er.text.strip().startswith(SSLLABS_APPLE_MITM_MESSAGE)]
text_transform = lambda x: ' '.join(x.text.split())
errors = map(text_transform, errors)
logger.debug("Refined errors: %s", str(errors))
warnings = soup.find_all("div", class_="warningBox")
logger.debug("Warnings found: %s", str(warnings))
warnings = map(text_transform, warnings)
logger.debug("Refined warnings: %s", str(warnings))
return errors, warnings
def _request_api_result(host, publish="off", startNew="off", all="done", ignoreMismatch="on"):
payload = {'host': host, 'publish': publish, 'all': all, 'ignoreMismatch': ignoreMismatch}
result = requests.get(SSL_LABS_API_URL, params=payload).json()
if 'errors' in result:
raise Exception("Incorrect api call: " + str(result))
while result['status'] != 'READY' and result['status'] != 'ERROR':
retry_interval_seconds = 30
logger.info("[%s] Scan in progress, next check in %d seconds", host, retry_interval_seconds)
time.sleep(retry_interval_seconds)
result = requests.get(SSL_LABS_API_URL, params=payload).json()
logger.debug("[%s] Result: %s", host, str(result))
endpoint = result["endpoints"][0]
return endpoint['grade'], endpoint['gradeTrustIgnored'], _protocols(endpoint), _ciphers(endpoint)
def _protocols(result):
return map(lambda p: p['name'] + p['version'], result['details']['protocols'])
def _ciphers(result):
return map(lambda p: p['name'], result['details']['suites']['list'])
|
#!/usr/bin/env python
from abc import ABC
from contextlib import redirect_stdout, redirect_stderr
from unittest import TestCase, main
from unittest.mock import Mock
from cli_command_parser import Command, CommandConfig, Context
from cli_command_parser.core import get_config, get_parent, get_params
from cli_command_parser.error_handling import no_exit_handler, extended_error_handler
from cli_command_parser.exceptions import CommandDefinitionError, NoSuchOption, ParamConflict
from cli_command_parser.parameters import Action, ActionFlag, SubCommand, Positional, Flag
class TestCommands(TestCase):
def test_true_on_action_handled(self):
mock = Mock(__name__='foo')
class Foo(Command):
action = Action()
action(mock)
foo = Foo.parse(['foo'])
self.assertFalse(mock.called)
self.assertTrue(foo.main())
self.assertTrue(mock.called)
def test_actions_taken_incremented_on_action_flag_handled(self):
mock = Mock()
class Foo(Command):
foo = ActionFlag()(mock)
foo = Foo.parse(['--foo'])
self.assertFalse(mock.called)
self.assertEqual(0, foo.ctx.actions_taken)
self.assertEqual(1, foo())
self.assertEqual(1, foo.ctx.actions_taken)
self.assertTrue(mock.called)
def test_false_on_no_action(self):
mock = Mock()
class Foo(Command):
foo = ActionFlag()(mock)
foo = Foo.parse([])
with foo.ctx:
self.assertFalse(foo.main())
self.assertFalse(mock.called)
def test_parse_and_run(self):
mock = Mock(__name__='bar')
class Foo(Command):
action = Action()
action.register(mock)
Foo.parse_and_run(['bar'])
self.assertEqual(mock.call_count, 1)
def test_choice_in_sub_class_warns_on_no_sub_cmd_param(self):
class Foo(Command):
pass
with self.assertWarnsRegex(Warning, expected_regex='has no SubCommand parameter'):
class Bar(Foo, choice='bar'):
pass
def test_multiple_actions_rejected(self):
class Foo(Command):
a = Action()
b = Action()
with self.assertRaises(CommandDefinitionError):
Foo.parse([])
def test_multiple_sub_cmds_rejected(self):
class Foo(Command):
a = SubCommand()
b = SubCommand()
with self.assertRaises(CommandDefinitionError):
Foo.parse([])
def test_action_with_sub_cmd_rejected(self):
class Foo(Command):
a = SubCommand()
b = Action()
with self.assertRaises(CommandDefinitionError):
Foo.parse([])
def test_choice_with_no_parent_warns(self):
with self.assertWarnsRegex(Warning, 'because it has no parent Command'):
class Foo(Command, choice='foo'):
pass
def test_positional_after_sub_cmd_rejected(self):
with self.assertRaisesRegex(CommandDefinitionError, 'may not follow the sub command'):
class Foo(Command):
sub = SubCommand()
pos = Positional()
class Bar(Foo, choice='bar'):
pass
def test_two_actions_rejected(self):
class Foo(Command):
foo = Action()
bar = Action()
foo(Mock(__name__='baz'))
with self.assertRaisesRegex(CommandDefinitionError, 'Only 1 Action xor SubCommand is allowed'):
Foo.parse([])
def test_action_with_sub_command_rejected(self):
class Foo(Command):
foo = Action()
bar = SubCommand()
foo(Mock(__name__='baz'))
with self.assertRaisesRegex(CommandDefinitionError, 'Only 1 Action xor SubCommand is allowed'):
Foo.parse([])
def test_no_error_handler_run(self):
class Foo(Command, error_handler=None):
bar = Flag()
__call__ = Mock()
Foo.parse_and_run([])
self.assertTrue(Foo.__call__.called)
def test_no_error_handler_main(self):
class Foo(Command, error_handler=None):
bar = Flag()
main = Mock()
Foo.parse_and_run([])
self.assertTrue(Foo.main.called)
def test_no_run_after_parse_error(self):
class Foo(Command, error_handler=no_exit_handler):
bar = Flag()
__call__ = Mock()
mock = Mock(close=Mock())
with redirect_stdout(mock), redirect_stderr(mock):
Foo.parse_and_run(['-B'])
self.assertFalse(Foo.__call__.called)
def test_no_warn_on_parent_without_choice(self):
class Foo(Command):
pass
class Bar(Foo):
pass
self.assertEqual(get_params(Bar).command_parent, Foo)
def test_double_config_rejected(self):
with self.assertRaisesRegex(CommandDefinitionError, 'Cannot combine .* with keyword config'):
class Foo(Command, config=CommandConfig(), multiple_action_flags=True):
pass
def test_config_defaults(self):
class Foo(Command):
pass
config = Foo.config()
self.assertDictEqual(config.as_dict(), CommandConfig().as_dict())
def test_config_from_kwarg(self):
default = CommandConfig().multiple_action_flags
class Foo(Command, multiple_action_flags=not default):
pass
self.assertEqual(Foo.config().multiple_action_flags, not default)
def test_config_from_dict(self):
default = CommandConfig().multiple_action_flags
class Foo(Command, config={'multiple_action_flags': not default}):
pass
self.assertEqual(Foo.config().multiple_action_flags, not default)
def test_config_explicit(self):
default = CommandConfig().multiple_action_flags
class Foo(Command, config=CommandConfig(multiple_action_flags=not default)):
pass
self.assertEqual(Foo.config().multiple_action_flags, not default)
def test_config_inherited(self):
default_config = CommandConfig()
class Foo(Command, multiple_action_flags=not default_config.multiple_action_flags):
pass
self.assertEqual(Foo.config().action_after_action_flags, default_config.action_after_action_flags)
self.assertNotEqual(Foo.config().multiple_action_flags, default_config.multiple_action_flags)
class Bar(Foo, action_after_action_flags=not default_config.action_after_action_flags):
pass
self.assertNotEqual(Bar.config().action_after_action_flags, default_config.action_after_action_flags)
self.assertNotEqual(Bar.config().multiple_action_flags, default_config.multiple_action_flags)
# Ensure Foo config has not changed:
self.assertEqual(Foo.config().action_after_action_flags, default_config.action_after_action_flags)
self.assertNotEqual(Foo.config().multiple_action_flags, default_config.multiple_action_flags)
def test_default_error_handler_returned(self):
self.assertIs(extended_error_handler, Context().get_error_handler())
def test_no_help(self):
class Foo(Command, add_help=False, error_handler=None):
pass
with self.assertRaises(NoSuchOption):
Foo.parse_and_run(['-h'])
def test_sub_command_adds_help(self):
class Foo(Command, ABC):
pass
class Bar(Foo):
pass
with redirect_stdout(Mock()), self.assertRaises(SystemExit):
Bar.parse_and_run(['-h'])
def test_argv_results_in_sub_context(self):
class Foo(Command):
pass
for context in (Context(['a'], Foo, ignore_unknown=True), Context(['a'], ignore_unknown=True)):
with context as ctx:
foo = Foo.parse_and_run(['b'])
self.assertIs(ctx, foo.ctx.parent)
self.assertListEqual(['a'], ctx.argv)
self.assertListEqual(['b'], foo.ctx.argv)
def test_no_argv_results_in_keeping_context(self):
class Foo(Command):
pass
with Context(['a'], Foo, ignore_unknown=True) as ctx:
foo = Foo.parse_and_run()
self.assertIs(ctx, foo.ctx)
self.assertListEqual(['a'], ctx.argv)
self.assertListEqual(['a'], foo.ctx.argv)
def test_no_argv_no_cmd_resuls_in_sub_context(self):
class Foo(Command):
pass
with Context(['a'], ignore_unknown=True) as ctx:
foo = Foo.parse_and_run()
self.assertIs(ctx, foo.ctx.parent)
self.assertListEqual(['a'], ctx.argv)
self.assertListEqual(['a'], foo.ctx.argv)
def test_get_config(self):
cfg = CommandConfig()
class Foo(Command, config=cfg):
pass
self.assertIs(cfg, get_config(Foo))
self.assertIs(cfg, get_config(Foo()))
def test_get_parent(self):
class Foo(Command):
pass
self.assertIs(Command, get_parent(Foo))
self.assertIs(Command, get_parent(Foo()))
def test_multiple_non_required_positionals_rejected(self):
for a, b in (('?', '?'), ('?', '*'), ('*', '?'), ('*', '*')):
with self.subTest(a=a, b=b):
class Foo(Command):
foo = Positional(nargs=a)
bar = Positional(nargs=b)
with self.assertRaises(CommandDefinitionError):
Foo.params()
def test_after_main_not_called_after_exc(self):
class Foo(Command):
_after_main_ = Mock()
def main(self):
raise RuntimeError('test')
with self.assertRaisesRegex(RuntimeError, 'test'):
Foo.parse_and_run([])
self.assertFalse(Foo._after_main_.called)
def test_after_main_called_after_exc(self):
class Foo(Command, always_run_after_main=True):
_after_main_ = Mock()
def main(self):
raise RuntimeError('test')
with self.assertRaisesRegex(RuntimeError, 'test'):
Foo.parse_and_run([])
self.assertTrue(Foo._after_main_.called)
def test_action_after_action_flags_exc(self):
act_flag_mock = Mock()
action_mock = Mock(__name__='b')
class Foo(Command, action_after_action_flags=False, error_handler=None):
a = ActionFlag('-a')(act_flag_mock)
c = Action()
c(action_mock)
with self.assertRaisesRegex(ParamConflict, 'combining an action with action flags is disabled'):
Foo.parse_and_run(['b', '-a'])
self.assertFalse(act_flag_mock.called)
self.assertFalse(action_mock.called)
def test_action_after_action_flags_ok(self):
act_flag_mock = Mock()
action_mock = Mock(__name__='b')
class Foo(Command):
a = ActionFlag('-a')(act_flag_mock)
c = Action()
c(action_mock)
Foo.parse_and_run(['b', '-a'])
self.assertTrue(act_flag_mock.called)
self.assertTrue(action_mock.called)
if __name__ == '__main__':
try:
main(warnings='ignore', verbosity=2, exit=False)
except KeyboardInterrupt:
print()
|
# NAME : Batter Up
# URL : https://open.kattis.com/problems/batterup
# =============================================================================
# Use a list comprehension to map strings to integers then do simple math on
# the list.
# =============================================================================
def main():
input()
at_bats = [int(a) for a in input().split() if a != "-1"]
print(sum(at_bats) / len(at_bats))
if __name__ == "__main__":
main()
|
#
# Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
import os
import stat
import argparse
import os
import shutil
import subprocess
import platform
def ParseArguments():
argMap = {}
parser = argparse.ArgumentParser(description="AWSNativeSDK Run all Integration Tests")
parser.add_argument("--buildDir", action="store")
parser.add_argument("--configuration", action="store")
args = vars( parser.parse_args() )
argMap[ "buildDir" ] = args[ "buildDir" ] or "./build"
argMap[ "configuration" ] = args[ "configuration" ] or "Debug"
return argMap
def AddExecutableBit(file):
st = os.stat(file)
os.chmod(file, st.st_mode | stat.S_IEXEC)
def Main():
arguments = ParseArguments()
configDir = ""
exeExtension = ""
#Visual Studio puts executables into a configuration sub-dir, so append that.
if platform.system() == "Windows":
configDir = arguments["configuration"]
exeExtension = ".exe"
dynamoDbTest = arguments["buildDir"] + "/aws-cpp-sdk-dynamodb-integration-tests/" + configDir + "/runDynamoDBIntegrationTests" + exeExtension
AddExecutableBit(dynamoDbTest)
subprocess.check_call(dynamoDbTest)
sqsTest = arguments["buildDir"] + "/aws-cpp-sdk-sqs-integration-tests/" + configDir + "/runSqsIntegrationTests" + exeExtension
AddExecutableBit(sqsTest)
subprocess.check_call(sqsTest)
s3Test = arguments["buildDir"] + "/aws-cpp-sdk-s3-integration-tests/" + configDir + "/runS3IntegrationTests" + exeExtension
AddExecutableBit(s3Test)
subprocess.check_call(s3Test)
lambdaTest = arguments["buildDir"] + "/aws-cpp-sdk-lambda-integration-tests/" + configDir + "/runLambdaIntegrationTests" + exeExtension
AddExecutableBit(lambdaTest)
subprocess.check_call(lambdaTest)
cognitoTest = arguments["buildDir"] + "/aws-cpp-sdk-cognitoidentity-integration-tests/" + configDir + "/runCognitoIntegrationTests" + exeExtension
AddExecutableBit(cognitoTest)
subprocess.check_call(cognitoTest)
transferTest = arguments["buildDir"] + "/aws-cpp-sdk-transfer-tests/" + configDir + "/runTransferIntegrationTests" + exeExtension
AddExecutableBit(transferTest)
subprocess.check_call(transferTest)
#These will cost you lots of money, don't run them unless you decide you want to test this functionality
#cloudFrontTests = arguments["buildDir"] + "/aws-cpp-sdk-cloudfront-integration-tests/" + configDir + "/runCloudfrontIntegrationTests" + exeExtension
#AddExecutableBit(cloudFrontTests)
#subprocess.check_call(cloudFrontTests)
#redshiftTests = arguments["buildDir"] + "/aws-cpp-sdk-redshift-integration-tests/" + configDir + "/runRedshiftIntegrationTests" + exeExtension
#AddExecutableBit(redshiftTests)
#subprocess.check_call(redshiftTests)
# Run from powershell; make sure msbuild is in PATH environment variable
Main()
|
#!/usr/bin/env python
# encoding: utf-8
"""
delete-node-in-a-linked-list.py
Created by Shuailong on 2016-02-04.
https://leetcode.com/problems/delete-node-in-a-linked-list/.
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
def main():
pass
if __name__ == '__main__':
main()
|
import argparse
import json
import logging
from pathlib import Path, PurePath
from tqdm import tqdm
from utils.log import setup_logging
def main():
# Use first line of file docstring as description if it exists.
parser = argparse.ArgumentParser(
description=__doc__.split('\n')[0] if __doc__ else '',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input-json', required=True)
parser.add_argument('--output-json', required=True)
parser.add_argument('--new-extension', required=True)
args = parser.parse_args()
input_path = Path(args.input_json)
output_path = Path(args.output_json)
if not args.new_extension.startswith('.'):
args.new_extension = '.' + args.new_extension
log_path = args.output_json + '.log'
setup_logging(log_path)
logging.info('Args:\n%s' % vars(args))
assert input_path.exists()
assert not output_path.exists()
with open(input_path, 'r') as f:
data = json.load(f)
for image in tqdm(data['images']):
image['file_name'] = str(
PurePath(image['file_name']).with_suffix(args.new_extension))
with open(output_path, 'w') as f:
json.dump(data, f)
if __name__ == "__main__":
main()
|
import sys
import runner
if __name__ == '__main__':
args = sys.argv
if len(args) != 3:
print("""
Usage:
python main.py <output_file.csv> <config_file.json>
""")
exit()
runner.run(
args[2],
"data/skills.json",
"data/armour_sets.json",
"data/armour.json",
"data/charms.json",
args[1]
)
|
# -*- coding: utf-8 -*-
from model.person import Person
import pytest
import random
import string
def test_add_contact(app, json_contacts, check_ui,db):
contact = json_contacts
old_contacts = db.get_contact_list()
app.contact.add_contact_fill_form(contact)
assert len(old_contacts) + 1 == app.contact.count()
if check_ui:
def clean(contact):
return Person(id=contact.id,firstname=contact.firstname.strip(),lastname=contact.lastname.strip())
old_contacts.append(contact)
new_contacts = map(clean,db.get_contact_list())
assert sorted(new_contacts, key = Person.id_or_max) == sorted(app.contact.get_contact_list(), key = Person.id_or_max)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import os
import numpy as np
import shutil
import argparse
from DataHolder import DataHolder
from Config import Config
from Trainer import Trainer
from DFN import DFN
from CNN import CNN
from util import reconstruct_from_record, accuracy_per_category
from util import int2command
def lr_search(name_tfrecords,
records,
height,
width,
channels,
architecture,
activations,
conv_architecture,
kernel_sizes,
pool_kernel,
batch_size,
epochs,
num_steps,
save_step,
optimizer,
experiments,
conv,
divisor):
"""
Script to run different experiments
to search a learning rate value,
the result is saved on the file learning_rate_results.txt
:param name_tfrecords: name of the used tfrecords
:type name_tfrecords: str
:param records: list of paths to train, test, and valid tfrecords
:type records: list of str
:param height: image height
:type heights: int
:param width: image width
:type width: int
:param channels: image channels
:type channels: int
:param architecture: network architecture
:type architecture: list of int
:param activations: list of different tf functions
:type activations: list of tf.nn.sigmoid, tf.nn.relu, tf.nn.tanh
:param conv_architecture: convolutional architecture
:type conv_architecture: list of int
:param kernel_sizes: filter sizes
:type kernel_sizes: list of int
:param pool_kernel: pooling filter sizes
:type pool_kernel: list of int
:param batch_size: batch size for training
:type batch_size: int
:param epochs: number of epochs
:type epochs: int
:param num_steps: number of iterations for each epoch
:type num_steps: int
:param save_step: when step % save_step == 0, the model
parameters are saved.
:type save_step: int
:param optimizer: a optimizer from tensorflow.
:type optimizer: tf.train.GradientDescentOptimizer,
tf.train.AdadeltaOptimizer,
tf.train.AdagradOptimizer,
tf.train.AdagradDAOptimizer,
tf.train.AdamOptimizer,
tf.train.FtrlOptimizer,
tf.train.ProximalGradientDescentOptimizer,
tf.train.ProximalAdagradOptimizer,
tf.train.RMSPropOptimizer
:param experiments: number of experiments to be made
:type experiments: int
:param conv: param to control if the model will be a CNN
or DFN
:type conv: bool
:param divisor: param to resize the learning rate
:type divisor: float
"""
LR = np.random.random_sample([experiments]) / divisor
LR.sort()
numeric_result = []
results = []
info = []
LR = list(LR)
if conv:
net_name = "CNN"
else:
net_name = "DFN"
header = "\nSearching learning rate for the model {} in the {} data\n".format(net_name, # noqa
name_tfrecords) # noqa
print(header)
for lr in LR:
config = Config(height=height,
width=width,
channels=channels,
learning_rate=lr,
architecture=architecture,
activations=activations,
conv_architecture=conv_architecture,
kernel_sizes=kernel_sizes,
pool_kernel=pool_kernel,
batch_size=batch_size,
epochs=epochs,
num_steps=num_steps,
save_step=save_step,
optimizer=optimizer)
data = DataHolder(config,
records=records)
name = "lr = {0:.6f}".format(lr)
print(name + ":\n")
graph = tf.Graph()
if conv:
network = CNN(graph, config)
else:
network = DFN(graph, config)
trainer = Trainer(graph, config, network, data)
trainer.fit(verbose=True)
valid_acc = trainer.get_valid_accuracy()
numeric_result.append(valid_acc)
name += ': valid_acc = {0:.6f} | '.format(valid_acc)
test_images, test_labels, _ = reconstruct_from_record(data.get_test_tfrecord()) # noqa
test_images = test_images.astype(np.float32) / 255
test_pred = trainer.predict(test_images)
acc_cat = accuracy_per_category(test_pred, test_labels, categories=3)
for i, cat_result in enumerate(acc_cat):
name += int2command[i] + ": = {0:.6f}, ".format(cat_result)
results.append(name)
if os.path.exists("checkpoints"):
shutil.rmtree("checkpoints")
info.append(str(config))
best_result = max(list(zip(numeric_result, LR, info)))
result_string = """In an experiment with {0} learning rate values
the best one is {1} with valid accuracy of {2}.
\nThe training uses the following params:
\n{3}\n""".format(experiments,
best_result[1],
best_result[0],
best_result[2])
file = open("learning_rate_results.txt", "w")
file.write(header)
file.write("Results with different values for learning rate\n")
for result in results:
result += "\n"
file.write(result)
file.write("\n")
file.write(result_string)
file.close()
def main():
"""
Main script to perform learnig rate search.
"""
parser = argparse.ArgumentParser(description='Perform learnig rate search')
parser.add_argument("-n",
"--name_tfrecords",
type=str,
default="data",
help="name for tfrecords (default=data)") # noqa
parser.add_argument("-ex",
"--experiments",
type=int,
default=10,
help="number of experiments")
parser.add_argument('-a',
'--architecture',
type=int,
nargs='+',
help='sizes for hidden layers and output layer, should end with at least "3" !, (default=[3])', # noqa
default=[3])
parser.add_argument('-ac',
'--activations',
type=str,
nargs='+',
help='activations: relu, sigmoid, tanh (defaul=None)',
default=None)
parser.add_argument("-he",
"--height",
type=int,
default=90,
help="image height (default=90)")
parser.add_argument("-w",
"--width",
type=int,
default=160,
help="image width (default=160)")
parser.add_argument("-c",
"--channels",
type=int,
default=3,
help="number of channels (default=3)")
parser.add_argument('-conva',
'--conv_architecture',
type=int,
nargs='+',
help='filters for conv layers (default=[32, 64])', # noqa
default=[32, 64])
parser.add_argument('-k',
'--kernel_sizes',
type=int,
nargs='+',
help='kernel sizes for conv layers (default=None - 5 for every layer)', # noqa
default=None)
parser.add_argument('-p',
'--pool_kernel',
type=int,
nargs='+',
help='kernel sizes for pooling layers (default=None - 2 for every layer)', # noqa
default=None)
parser.add_argument("-b",
"--batch_size",
type=int,
default=32,
help="batch size (default=32)")
parser.add_argument("-e",
"--epochs",
type=int,
default=5,
help="epochs for training (default=5)")
parser.add_argument("-ns",
"--num_steps",
type=int,
default=1000,
help="number of steps for each epoch (default=1000)")
parser.add_argument("-ss",
"--save_step",
type=int,
default=100,
help="number of steps to save variables (default=100)")
opt_list = """optimizers: GradientDescent,
Adadelta,
Adagrad,
Adam,
Ftrl,
ProximalGradientDescent,
ProximalAdagrad,
RMSProp"""
parser.add_argument("-o",
"--optimizer",
type=str,
default="GradientDescent",
help=opt_list + "(default=GradientDescent)")
parser.add_argument("-conv",
"--conv",
action="store_true",
default=False,
help="Use convolutional network (default=False)")
parser.add_argument("-di",
"--divisor",
type=float,
default=100.0,
help="value to divide the learning rate array (default=100.0)") # noqa
args = parser.parse_args()
records = ["_train.tfrecords", "_valid.tfrecords", "_test.tfrecords"]
new_records = []
for record in records:
record = args.name_tfrecords + record
new_records.append(record)
optimizer_dict = {"GradientDescent": tf.train.GradientDescentOptimizer, # noqa
"Adadelta": tf.train.AdadeltaOptimizer,
"Adagrad": tf.train.AdagradOptimizer,
"Adam": tf.train.AdamOptimizer,
"Ftrl": tf.train.FtrlOptimizer,
"ProximalGradientDescent": tf.train.ProximalGradientDescentOptimizer, # noqa
"ProximalAdagrad": tf.train.ProximalAdagradOptimizer, # noqa
"RMSProp": tf.train.RMSPropOptimizer} # noqa
activations_dict = {"relu": tf.nn.relu,
"sigmoid": tf.nn.sigmoid,
"tanh": tf.nn.tanh}
if args.activations is not None:
activations = [activations_dict[act] for act in args.activations]
else:
activations = args.activations
optimizer = optimizer_dict[args.optimizer]
lr_search(name_tfrecords=args.name_tfrecords,
records=new_records,
height=args.height,
width=args.width,
channels=args.channels,
experiments=args.experiments,
architecture=args.architecture,
activations=activations,
conv_architecture=args.conv_architecture,
kernel_sizes=args.kernel_sizes,
pool_kernel=args.pool_kernel,
batch_size=args.batch_size,
epochs=args.epochs,
num_steps=args.num_steps,
save_step=args.save_step,
optimizer=optimizer,
conv=args.conv,
divisor=args.divisor)
if __name__ == "__main__":
main()
|
from typing import Deque, Any
from collections import deque
queue: Deque[Any] = deque()
queue.append('A')
queue.append('B')
queue.append('C')
print(queue)
print('Removido', queue.popleft())
print('Removido', queue.popleft())
print('Removido', queue.popleft())
|
import komand
from .schema import GetProjectInput, GetProjectOutput, Input, Output, Component
from ...util import project
class GetProject(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_project",
description=Component.DESCRIPTION,
input=GetProjectInput(),
output=GetProjectOutput(),
)
def run(self, params={}):
return {Output.PROJECT: project.Project.get(self.connection.config, params.get(Input.NAME))}
|
# coding=utf-8
# Copyright 2021 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hessian-free optimization algorithm."""
from functools import partial # pylint: disable=g-importing-member
import math
from typing import NamedTuple
import jax
from jax import jit
from jax import jvp
from jax import lax
from jax import vjp
from jax.flatten_util import ravel_pytree
import jax.numpy as jnp
from optax import apply_updates
from optax._src import base
@jit
def residual_norm_test(step, rs_norm, obj_val, obj_arr, tol):
"""Residual norm test, terminates CG if sqrt(rs_norm) < tol.
Args:
step: An integer value of the iteration step counter.
rs_norm: A residual norm.
obj_val: A current objective value.
obj_arr: A jax.numpy array of objective values in recent steps.
tol: The convergence tolerance.
Returns:
A bool value indicating if the test is satisfied.
"""
del step, obj_val, obj_arr
return jnp.less(jnp.sqrt(rs_norm), tol)
@jit
def relative_per_iteration_progress_test(step, rs_norm, obj_val, obj_arr, tol):
"""Relative per-iteration progress test proposed by Martens (2010).
Terminate CG if:
step > k, f_value(step) < 0, and
(f_value(step) - f_value(step-k)) / f_value(step) < k * eps.
For more inforamtion, see Section 4.4 of
https://www.cs.toronto.edu/~jmartens/docs/Deep_HessianFree.pdf.
Args:
step: An integer value of the iteration step counter.
rs_norm: A residual norm.
obj_val: A current objective value.
obj_arr: A jax.numpy array of objective values in recent steps.
tol: The convergence tolerance.
Returns:
A bool value indicating if the test is satisfied.
"""
del rs_norm
# k = max(10, ceil(0.1 * step))
k = lax.max(10, jnp.int32(lax.ceil(0.1 * step)))
arr_len = len(obj_arr)
step_condition = jnp.less(k, step)
negativity_condition = jnp.less(obj_val, 0.)
progress_condition = jnp.less(
k * obj_val * tol,
obj_val - obj_arr[(step + arr_len - k) % arr_len])
return step_condition & negativity_condition & progress_condition
_TERMINATION_CRITERIA = {
'residual_norm_test': (False, residual_norm_test),
'relative_per_iteration_progress_test':
(True, relative_per_iteration_progress_test),
}
def require_obj_arr(criterion_name):
"""Indicates if the criterion function requires an objective array.
Args:
criterion_name: (str) e.g. residual_norm_test.
Returns:
A bool indicating if the criterion functions requires an objective array.
Raises:
ValueError if criterion_name is unrecognized.
"""
try:
return _TERMINATION_CRITERIA[criterion_name][0]
except KeyError:
raise ValueError('Unrecognized criterion name: {}'.format(criterion_name))
def get_termination_criterion_fn(criterion_name):
"""Get the termination criterion function based on the criterion_name.
Args:
criterion_name: (str) e.g. residual_norm_test.
Returns:
The termination criterion function.
Raises:
ValueError if criterion_name is unrecognized.
"""
try:
return _TERMINATION_CRITERIA[criterion_name][1]
except KeyError:
raise ValueError('Unrecognized criterion name: {}'.format(criterion_name))
@partial(jit, static_argnums=(0, 3, 6, 7))
def mf_conjgrad_solver(matmul_fn,
b,
x0,
max_iter,
tol=1e-6,
residual_refresh_frequency=10,
precond_fn=None,
termination_criterion='residual_norm_test'):
"""Solves Ax = b using 'matrix-free' preconditioned conjugate gradient method.
This implements the preconditioned conjugate gradient algorithm in page 32 of
http://www.cs.toronto.edu/~jmartens/docs/thesis_phd_martens.pdf in a 'matrix-
free' manner. 'Matrix-free' means that this method does not require explicit
knowledge of matrix A and preconditioner P. Instead, it iteratively calls
linear operators matmul_fn and precond_fn, which return Ax and the solution of
Py=r for any given x and r, respectively. A termination criterion function
name can be passed as an argument.
Args:
matmul_fn: A linear operator that returns Ax given x.
b: A numpy vector of shape (n,).
x0: An initial guess numpy vector of shape (n,).
max_iter: The number of iterations to run.
tol: The convergence tolerance.
residual_refresh_frequency: A frequency to refresh the residual.
precond_fn: A linear operator that returns the solution of Py=r for any r.
termination_criterion: A termination criterion function name.
Returns:
An approximate solution to the linear system Ax=b.
"""
if precond_fn is None:
precond_fn = lambda x: x
x = x0
r = matmul_fn(x) - b
y = precond_fn(r)
p = -y
alpha = 0
beta = 1
rs_norm = jnp.dot(r, y)
def cg_objective(x, r):
"""Returns the CG objective function value."""
return jnp.dot(x, r - b) / 2
use_obj_arr = require_obj_arr(termination_criterion)
termination_criterion_fn = get_termination_criterion_fn(termination_criterion)
obj_val = cg_objective(x, r)
obj_arr = jnp.array([])
if use_obj_arr:
obj_arr = jnp.zeros(max(10, math.ceil(0.1 * max_iter)))
arr_len = len(obj_arr)
# define an array to save iterates for CG backtracking
# an iterate at every ceil(initial_save_step * gamma^j)-th step where j >= 0,
# and the last iterate will be saved.
# if the last iteration is ceil(initial_save_step * gamma^j*) for some j*,
# only one copy will be saved.
# the max number of copies is ceil(log(max_iter / initial_save_step, gamma))
# this amounts to 10/13/16/19/28 copies for 50/100/200/500/5000 max_iter
# when gamma = 1.3 and initial_save_step = 5
gamma = 1.3
initial_save_step = 5.0
x_arr = jnp.zeros(
(math.ceil(math.log(max_iter / initial_save_step, gamma)) + 1, len(x0)))
# index to track the last saved element in the array
x_arr_idx = -1
next_save_step = initial_save_step
def termination_condition(state):
*_, step, rs_norm, obj_val, obj_arr = state
return jnp.logical_and(
jnp.less(step, max_iter),
jnp.equal(
termination_criterion_fn(
rs_norm=rs_norm, tol=tol, step=step,
obj_val=obj_val, obj_arr=obj_arr), False))
def update_obj_arr(step, obj_val, obj_arr):
if use_obj_arr:
return obj_arr.at[step % arr_len].set(obj_val)
return obj_arr
def update_x_arr(x, x_arr, x_arr_idx):
return x_arr.at[x_arr_idx, :].set(x)
@jit
def one_step_conjgrad(state):
"""One step of conjugate gradient iteration."""
x, x_arr, x_arr_idx, save_step, next_save_step, r, y, p, alpha, beta, step, rs_norm, obj_val, obj_arr = state
obj_arr = update_obj_arr(step, obj_val, obj_arr)
step += 1
# Compute Ap
matmul_product = matmul_fn(p)
# Update x
alpha = rs_norm / jnp.dot(p, matmul_product)
x += alpha * p
# Update r, y and the square of residual norm
refresh_residual = jnp.equal(
jnp.remainder(step, residual_refresh_frequency), 0)
r = jnp.where(refresh_residual,
matmul_fn(x) - b,
r + alpha * matmul_product)
y = precond_fn(r)
rs_norm_new = jnp.dot(r, y)
# Compute the objective value
obj_val = cg_objective(x, r)
# Update p
beta = rs_norm_new / rs_norm
p = beta * p - y
# Save iterates for CG backtracking
save_step = jnp.equal(step, jnp.int32(lax.ceil(next_save_step)))
x_arr_idx = jnp.where(save_step, x_arr_idx + 1, x_arr_idx)
x_arr = jnp.where(save_step, update_x_arr(x, x_arr, x_arr_idx), x_arr)
next_save_step *= jnp.where(save_step, gamma, 1)
return (x, x_arr, x_arr_idx, save_step, next_save_step, r, y, p, alpha,
beta, step, rs_norm_new, obj_val, obj_arr)
init_state = x, x_arr, x_arr_idx, False, next_save_step, r, y, p, alpha, beta, 0, rs_norm, obj_val, obj_arr
x, x_arr, x_arr_idx, save_step, *_ = lax.while_loop(termination_condition,
one_step_conjgrad,
init_state)
# Save the last iterate if not saved yet.
x_arr_idx = jnp.where(save_step, x_arr_idx, x_arr_idx + 1)
x_arr = jnp.where(save_step, x_arr, update_x_arr(x, x_arr, x_arr_idx))
return x_arr, x_arr_idx
def hvp(f, x, v):
"""Returns the product of Hessian matrix and a vector.
Args:
f: A callable function that takes a numpy vector of shape (n,).
x: A numpy vector of shape (n,) where the Hessian is evaluated.
v: A numpy vector of shape (n,).
Returns:
The product of Hessian matrix and a vector
"""
return jax.jvp(jax.grad(f), [x], [v])[1]
def gvp(variables, outputs, damping, forward_fn, loss_fn, v):
"""Returns the product of generalized Gauss-Newton matrix and a vector.
Args:
variables: A dict of variables is passed directly into flax_module.apply,
required to have a key 'params' that is a pytree of model parameters.
outputs: A numpy vector of network outputs computed by forward_fn(params).
damping: A damping parameter.
forward_fn: A function that maps params to outputs.
loss_fn: A loss function.
v: A numpy vector of shape (n,).
Returns:
The product of Generalized Gauss-Newton matrix and a vector
"""
_, unravel_fn = ravel_pytree(variables)
jv = jvp(forward_fn, [variables], [unravel_fn(v)])[1]
hjv = hvp(loss_fn, outputs, jv)
gvp_fn = vjp(forward_fn, variables)[1]
return ravel_pytree(gvp_fn(hjv)[0])[0] + damping * v
def cg_backtracking(
p_arr, p_arr_idx, forward_fn, loss_fn, variables, unravel_fn):
"""Backtracks CG iterates (Section 4.6, Martens (2010)).
This function iteratively compares the function values of two consecutive
iterates. If the function value of the iterate at idx is smaller than the
function value of the iterate at idx - 1, then the iterate at idx is returned
as a search direction. Otherwise, we decrease idx by 1 and repeat the
comparison. If no iterate satisfies the condition, the first element in p_arr
will be returned.
Args:
p_arr: An array of CG iterates of shape (m, n).
p_arr_idx: The index of the last element in p_arr.
forward_fn: A function that maps params to outputs.
loss_fn: A function that maps outputs to a loss value.
variables: A dict of variables is passed directly into flax_module.apply,
required to have a key 'params' that is a pytree of model parameters.
unravel_fn: A function that maps a numpy vector of shape (n,) to a pytree.
Returns:
The backtracked iterate as a pytree and a vector with its function value.
"""
# Initialize the search direction and compute the objective value along it.
flattened_p = p_arr[p_arr_idx]
# We need to make a new dict in order to avoid possible unintended
# side-effects in the calling function that could happen if we reassigned the
# keys of "variables", but to avoid copying all the (possibly large) values in
# the original "variables" we reassign them in the new dict instead of using
# copy.deepcopy. We should only be calling with train=False in the forward_fn
# so there should not be any updates to possible "batch_stats" in variables.
updated_variables = {
'params': apply_updates(variables['params'], unravel_fn(flattened_p))
}
for k, v in variables.items():
if k != 'params':
updated_variables[k] = v
obj_val = loss_fn(forward_fn(updated_variables))
def termination_condition_cg_backtracking(state):
*_, idx, keep_backtracking = state
return jnp.logical_and(keep_backtracking, jnp.greater_equal(idx, 0))
def one_step_cg_backtracking(state):
"""One step of cg backtracking iteration."""
flattened_p, obj_val, idx, keep_backtracking = state
# Compute the objective value for the iterate to be compared with.
flattened_p_prev = p_arr[idx]
updated_variables = {
'params': apply_updates(
variables['params'], unravel_fn(flattened_p_prev))
}
for k, v in variables.items():
if k != 'params':
updated_variables[k] = v
obj_val_prev = loss_fn(forward_fn(updated_variables))
# Compare the objective values.
keep_backtracking = jnp.greater_equal(obj_val, obj_val_prev)
# Update flattened_p and obj_val if obj_val >= obj_val_prev.
flattened_p = jnp.where(keep_backtracking, flattened_p_prev, flattened_p)
obj_val = jnp.where(keep_backtracking, obj_val_prev, obj_val)
return flattened_p, obj_val, idx - 1, keep_backtracking
init_state = flattened_p, obj_val, p_arr_idx - 1, True
flattened_p, obj_val, *_ = lax.while_loop(
termination_condition_cg_backtracking, one_step_cg_backtracking,
init_state)
return flattened_p, obj_val
class HessianFreeState(NamedTuple):
"""State for Hessian-free updates.
p0: An intial guess to the search direction generated by Hessian-free updates.
damping: A damping parameter.
"""
p0: jnp.DeviceArray
damping: float
def hessian_free(flax_module,
loss_fn,
learning_rate=1.0,
max_iter=100,
tol=0.0005,
residual_refresh_frequency=10,
termination_criterion='relative_per_iteration_progress_test'):
"""Hessian-free optimizer.
Args:
flax_module: A flax linen.nn.module.
loss_fn: A loss function.
learning_rate: A learning rate.
max_iter: The number of CG iterations.
tol: The convergence tolerance.
residual_refresh_frequency: A frequency to refresh the residual.
termination_criterion: A function chekcing a termination criterion.
Returns:
A base.GradientTransformation object of (init_fn, update_fn) tuple.
"""
def init_fn(params):
"""Initializes the HessianFreeState object for Hessian-free updates."""
return HessianFreeState(
p0=ravel_pytree(params)[0],
damping=1)
@jit
def update_fn(grads, state, variables_batch_tuple):
"""Transforms the grads and updates the HessianFreeState object.
Args:
grads: pytree of model parameter gradients.
state: optimizer state (damping and p0 are the used attributes).
variables_batch_tuple: a tuple of (Dict[str, Any], batch) where the dict
of variables is passed directly into flax_module.apply, and batch is the
current minibatch. It is required to have a key 'params'. We need to put
these into a tuple here so that we can be compatible with the optax API.
Returns:
A tuple of (pytree of the model updates, new HessianFreeState).
"""
variables, batch = variables_batch_tuple
def forward_fn(variables, inputs):
return flax_module.apply(variables, inputs, train=False)
outputs = forward_fn(variables, batch['inputs'])
flattened_grads, unravel_fn = ravel_pytree(grads)
partial_forward_fn = partial(forward_fn, inputs=batch['inputs'])
partial_loss_fn = partial(loss_fn, targets=batch['targets'])
matmul_fn = partial(gvp, variables, outputs, state.damping,
partial_forward_fn, partial_loss_fn)
p_arr, p_arr_idx = mf_conjgrad_solver(matmul_fn, -flattened_grads, state.p0,
max_iter, tol,
residual_refresh_frequency, None,
termination_criterion)
## CG backtracking
# CG solution to be used to initialize the next CG run.
p_sol = p_arr[p_arr_idx]
# CG backtracking uses a logarithmic amount of memory to save CG iterates.
# If this causes OOM, we can consider computing the objective value at
# each save step in the CG loop and keeping the best one.
flattened_p, obj_val = cg_backtracking(p_arr,
p_arr_idx,
partial_forward_fn,
partial_loss_fn,
variables,
unravel_fn)
# update the damping parameter
reduction_f = obj_val - partial_loss_fn(outputs)
reduction_q = jnp.dot(flattened_p,
flattened_grads + 0.5 * matmul_fn(flattened_p))
damping_new = state.damping * jnp.where(reduction_f / reduction_q < 0.25,
3.0 / 2.0, 2.0 / 3.0)
return unravel_fn(flattened_p * learning_rate), HessianFreeState(
p_sol, damping_new)
return base.GradientTransformation(init_fn, update_fn)
|
from django.urls import path, re_path
from app import views, mobileViews
urlpatterns = [
re_path(r'^.*\.html', views.gentella_html, name='gentella'),
path('', views.index, name='index'),
# -------------------------------------------------------------------------------
# cordova
# mobile이지만 뺵업 느낌으로 놔둠
path('signup', mobileViews.signup, name='signup'),
path('login', mobileViews.login, name='login'),
path('updateUserinfo', mobileViews.updateUserinfo, name='updateUserinfo'),
path('checkUser', mobileViews.checkUser, name='checkUser'),
path('checkEmail', mobileViews.checkEmail, name='checkEmail'),
path('walletAddrAdd', mobileViews.walletAddrAdd, name='walletAddrAdd'),
path('userLoginDateUp', mobileViews.userLoginDateUp, name='userLoginDateUp'),
path('checkOTPCode', mobileViews.checkOTPCode, name='checkOTPCode'),
path('otpCodeSave', mobileViews.otpCodeSave, name='otpCodeSave'),
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# admin
path('userManageHtml', views.userManageHtml, name='userManageHtml'),
path('userDetailHtml', views.userDetailHtml, name='userDetailHtml'),
# 2021.12.28 HSW 작성 - Admin -> 멤버관리 -> 작가관리 -----
path('authorManageHtml', views.authorManageHtml, name='authorManageHtml'),
path('authorDetailHtml', views.authorDetailHtml, name='authorDetailHtml'),
# HSW 작성 끝 --------------------------------------------
# -------------------------------------------------------------------------------
]
|
"""Author model for Zinnia"""
from django.apps import apps
from django.conf import settings
from django.db import models
from django.urls import reverse
from zinnia.managers import EntryRelatedPublishedManager
from zinnia.managers import entries_published
def safe_get_user_model():
"""
Safe loading of the User model, customized or not.
"""
user_app, user_model = settings.AUTH_USER_MODEL.split('.')
return apps.get_registered_model(user_app, user_model)
class AuthorPublishedManager(models.Model):
"""
Proxy model manager to avoid overriding of
the default User's manager and issue #307.
"""
published = EntryRelatedPublishedManager()
class Meta:
abstract = True
class Author(safe_get_user_model(),
AuthorPublishedManager):
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
def entries_published(self):
"""
Returns author's published entries.
"""
return entries_published(self.entries)
def get_absolute_url(self):
"""
Builds and returns the author's URL based on his username.
"""
try:
return super(Author, self).get_absolute_url()
except AttributeError:
return reverse('zinnia:author_detail', args=[self.get_username()])
def __str__(self):
"""
If the user has a full name, use it instead of the username.
"""
return (self.get_username())
class Meta:
"""
Author's meta informations.
"""
proxy = True
|
import csv
import json
import copy
INPUT_CSV = '20210712.csv' # input: timetable csv
OUTPUT_JSON = '20210712.json' # output: timetable json
PERIOD = 'summer' # choices: 'semester', 'summer', 'winter'
def timeList(dic, week, fro, to):
left = dic[week][fro][to]
right = dic[week][to][fro]
time = []
i = 0
j = 0
while i < len(left) or j < len(right):
if i >= len(left):
while j < len(right):
time.append({
'left': '',
'right': right[j]
})
j = j + 1
break
elif j >= len(right):
while i < len(left):
time.append({
'left': left[i],
'right': '',
})
i = i + 1
break
row = {
'left': '',
'right': ''
}
if left[i][0:5] == right[j][0:5]:
row['left'] = left[i]
row['right'] = right[j]
i = i + 1
j = j + 1
elif left[i][0:5] < right[j][0:5]:
row['left'] = left[i]
i = i + 1
else:
row['right'] = right[j]
j = j + 1
time.append(row)
return time
def tojson(filename):
# load csv file, parse and preprocess data
# routes
routes = {
'handan': {
'jiangwan': [],
'fenglin': [],
'zhangjiang': []
},
'jiangwan': {
'handan': [],
'zhangjiang': []
},
'fenglin': {
'handan': [],
'zhangjiang': []
},
'zhangjiang': {
'handan': [],
'jiangwan': [],
'fenglin': []
}
}
# empty dic
dic = {
'weekday': copy.deepcopy(routes),
'weekend': copy.deepcopy(routes)
}
# another empty dic
data = copy.deepcopy(dic)
# load csv and get dic
with open(filename, newline='') as file:
# parse data
reader = csv.DictReader(file)
for row in reader:
dic[row['week']][row['from']][row['to']].append(row['time'])
# transform dic -> data
for week in data:
for fro in data[week]:
for to in data[week][fro]:
data[week][fro][to] = timeList(dic, week, fro, to)
# return data list
data['period'] = PERIOD
return data
if __name__ == '__main__':
# timetable
timeTable = tojson(INPUT_CSV)
with open(OUTPUT_JSON, 'w') as file:
json.dump(timeTable, file, indent=4)
# json.dump(timeTable, file)
|
from __future__ import division, print_function
import numpy as np
from sqlalchemy import String, Integer, Float, Column
from sqlalchemy.schema import ForeignKey
from sqlalchemy.orm import relationship
from .connect import Base
from astropy.coordinates import SkyCoord
__all__ = ['Run', 'Tract', 'Patch', 'Source']
class Run(Base):
__tablename__ = 'run'
# Table columns
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
# Relationships
Tracts = relationship('Tract', cascade='all, delete-orphan')
class Tract(Base):
__tablename__ = 'tract'
# Table columns
id = Column(Integer, primary_key=True)
hsc_id = Column(Integer, nullable=False)
# Relationships
run_id = Column(Integer, ForeignKey('run.id'), nullable=False)
run = relationship('Run')
patches = relationship('Patch', cascade='all, delete-orphan')
class Patch(Base):
__tablename__ = 'patch'
# Table columns
id = Column(Integer, primary_key=True)
hsc_id = Column(String, nullable=False)
x0 = Column(Float, nullable=False)
y0 = Column(Float, nullable=False)
good_data_frac = Column(Float, nullable=True)
small_frac = Column(Float, nullable=True)
cleaned_frac = Column(Float, nullable=True)
bright_obj_frac = Column(Float, nullable=True)
# TODO
# add number of detected sources column
# Relationships
tract_id = Column(Integer, ForeignKey('tract.id'), nullable=False)
tract = relationship('Tract')
sources = relationship('Source', cascade='all, delete-orphan')
class Source(Base):
__tablename__ = 'source'
# Table columns
id = Column(Integer, primary_key=True)
x_image = Column(Float, nullable=False)
y_image = Column(Float, nullable=False)
x_hsc = Column(Float, nullable=False)
y_hsc = Column(Float, nullable=False)
ra = Column(Float, nullable=False)
dec = Column(Float, nullable=False)
a_image = Column(Float, nullable=True)
b_image = Column(Float, nullable=True)
theta_image = Column(Float, nullable=True)
ellipticity = Column(Float, nullable=True)
kron_radius = Column(Float, nullable=True)
petro_radius = Column(Float, nullable=True)
flags = Column(Integer, nullable=False)
mag_auto_g = Column(Float, nullable=True)
mag_auto_r = Column(Float, nullable=True)
mag_auto_i = Column(Float, nullable=True)
magerr_auto_g = Column(Float, nullable=True)
magerr_auto_r = Column(Float, nullable=True)
magerr_auto_i = Column(Float, nullable=True)
mag_petro_g = Column(Float, nullable=True)
mag_petro_r = Column(Float, nullable=True)
mag_petro_i = Column(Float, nullable=True)
magerr_petro_g = Column(Float, nullable=True)
magerr_petro_r = Column(Float, nullable=True)
magerr_petro_i = Column(Float, nullable=True)
mag_ap0_g = Column(Float, nullable=True)
mag_ap1_g = Column(Float, nullable=True)
mag_ap2_g = Column(Float, nullable=True)
mag_ap3_g = Column(Float, nullable=True)
mag_ap4_g = Column(Float, nullable=True)
mag_ap5_g = Column(Float, nullable=True)
mag_ap6_g = Column(Float, nullable=True)
mag_ap7_g = Column(Float, nullable=True)
mag_ap8_g = Column(Float, nullable=True)
mag_ap9_g = Column(Float, nullable=True)
magerr_ap0_g = Column(Float, nullable=True)
magerr_ap1_g = Column(Float, nullable=True)
magerr_ap2_g = Column(Float, nullable=True)
magerr_ap3_g = Column(Float, nullable=True)
magerr_ap4_g = Column(Float, nullable=True)
magerr_ap5_g = Column(Float, nullable=True)
magerr_ap6_g = Column(Float, nullable=True)
magerr_ap7_g = Column(Float, nullable=True)
magerr_ap8_g = Column(Float, nullable=True)
magerr_ap9_g = Column(Float, nullable=True)
mag_ap0_r = Column(Float, nullable=True)
mag_ap1_r = Column(Float, nullable=True)
mag_ap2_r = Column(Float, nullable=True)
mag_ap3_r = Column(Float, nullable=True)
mag_ap4_r = Column(Float, nullable=True)
mag_ap5_r = Column(Float, nullable=True)
mag_ap6_r = Column(Float, nullable=True)
mag_ap7_r = Column(Float, nullable=True)
mag_ap8_r = Column(Float, nullable=True)
mag_ap9_r = Column(Float, nullable=True)
magerr_ap0_r = Column(Float, nullable=True)
magerr_ap1_r = Column(Float, nullable=True)
magerr_ap2_r = Column(Float, nullable=True)
magerr_ap3_r = Column(Float, nullable=True)
magerr_ap4_r = Column(Float, nullable=True)
magerr_ap5_r = Column(Float, nullable=True)
magerr_ap6_r = Column(Float, nullable=True)
magerr_ap7_r = Column(Float, nullable=True)
magerr_ap8_r = Column(Float, nullable=True)
magerr_ap9_r = Column(Float, nullable=True)
mag_ap0_i = Column(Float, nullable=True)
mag_ap1_i = Column(Float, nullable=True)
mag_ap2_i = Column(Float, nullable=True)
mag_ap3_i = Column(Float, nullable=True)
mag_ap4_i = Column(Float, nullable=True)
mag_ap5_i = Column(Float, nullable=True)
mag_ap6_i = Column(Float, nullable=True)
mag_ap7_i = Column(Float, nullable=True)
mag_ap8_i = Column(Float, nullable=True)
mag_ap9_i = Column(Float, nullable=True)
magerr_ap0_i = Column(Float, nullable=True)
magerr_ap1_i = Column(Float, nullable=True)
magerr_ap2_i = Column(Float, nullable=True)
magerr_ap3_i = Column(Float, nullable=True)
magerr_ap4_i = Column(Float, nullable=True)
magerr_ap5_i = Column(Float, nullable=True)
magerr_ap6_i = Column(Float, nullable=True)
magerr_ap7_i = Column(Float, nullable=True)
magerr_ap8_i = Column(Float, nullable=True)
magerr_ap9_i = Column(Float, nullable=True)
fwhm_g = Column(Float, nullable=True)
fwhm_r = Column(Float, nullable=True)
fwhm_i = Column(Float, nullable=True)
flux_radius_10_g = Column(Float, nullable=True)
flux_radius_20_g = Column(Float, nullable=True)
flux_radius_30_g = Column(Float, nullable=True)
flux_radius_40_g = Column(Float, nullable=True)
flux_radius_50_g = Column(Float, nullable=True)
flux_radius_60_g = Column(Float, nullable=True)
flux_radius_65_g = Column(Float, nullable=True)
flux_radius_70_g = Column(Float, nullable=True)
flux_radius_80_g = Column(Float, nullable=True)
flux_radius_90_g = Column(Float, nullable=True)
flux_radius_10_r = Column(Float, nullable=True)
flux_radius_20_r = Column(Float, nullable=True)
flux_radius_30_r = Column(Float, nullable=True)
flux_radius_40_r = Column(Float, nullable=True)
flux_radius_50_r = Column(Float, nullable=True)
flux_radius_60_r = Column(Float, nullable=True)
flux_radius_65_r = Column(Float, nullable=True)
flux_radius_70_r = Column(Float, nullable=True)
flux_radius_80_r = Column(Float, nullable=True)
flux_radius_90_r = Column(Float, nullable=True)
flux_radius_10_i = Column(Float, nullable=True)
flux_radius_20_i = Column(Float, nullable=True)
flux_radius_30_i = Column(Float, nullable=True)
flux_radius_40_i = Column(Float, nullable=True)
flux_radius_50_i = Column(Float, nullable=True)
flux_radius_60_i = Column(Float, nullable=True)
flux_radius_65_i = Column(Float, nullable=True)
flux_radius_70_i = Column(Float, nullable=True)
flux_radius_80_i = Column(Float, nullable=True)
flux_radius_90_i = Column(Float, nullable=True)
gini_full = Column(Float, nullable=True)
gini_1 = Column(Float, nullable=True)
gini_1p5 = Column(Float, nullable=True)
gini_2 = Column(Float, nullable=True)
gini_1p5_circ = Column(Float, nullable=True)
gini_2_circ = Column(Float, nullable=True)
acorr_peak = Column(Float, nullable=True)
acorr_bkgd = Column(Float, nullable=True)
acorr_ratio = Column(Float, nullable=True)
ebv = Column(Float, nullable=True)
A_g = Column(Float, nullable=True)
A_r = Column(Float, nullable=True)
A_i = Column(Float, nullable=True)
# Relationships
patch_id = Column(Integer, ForeignKey('patch.id'), nullable=False)
patch = relationship('Patch')
@property
def skycoord(self):
return SkyCoord(ra=self.ra, dec=self.dec, unit='deg')
@property
def hr_angle_string(self):
return self.skycoord.to_string('hmsdms')
class Synth(Base):
__tablename__ = 'synth'
# Table columns
id = Column(Integer, primary_key=True)
synth_id = Column(Integer, nullable=False)
mask_bright_object = Column(Integer, nullable=True)
mask_cleaned = Column(Integer, nullable=True)
mask_small = Column(Integer, nullable=True)
mask_no_data = Column(Integer, nullable=True)
mask_sat = Column(Integer, nullable=True)
mask_suspect = Column(Integer, nullable=True)
# Relationships
patch_id = Column(Integer, ForeignKey('patch.id'), nullable=False)
patch = relationship('Patch')
|
from core import Printer, Run
Printer.main()
while True:
args = input("=> ")
run = Run(args)
run.run()
if (run.exitStatus != 0):
print("\nGood bye :D\n")
break
|
#!/usr/bin/env false
"""TODO: Write
"""
# Internal packages (absolute references, distributed with Python)
# External packages (absolute references, NOT distributed with Python)
# Library modules (absolute references, NOT packaged, in project)
from src_gen.script.bash.complete import generate_activation as activation
# Project modules (relative references, NOT packaged, in project)
def generate(directory):
activation(directory, "declare.bash")
"""DisabledContent
"""
|
"""
RTC typing class
his class includes support for using ESP32 RTC peripherals and memory
The content of the RTC memory is preserved during the deep sleep.
Up to 64 32-bit integers can be saved in RTC memory.
One string of up to 2048 characters can be saved in RTC memory.
The string can be, for example, json string containing the parameters which has to be restored after deep sleep wake-up.
Integers and string saved in RTC memory are protected by 16-bit CRC.
"""
class RTC:
def init(self, date):
"""
Set the system time and date.
date argument is the tuple containing the time and date information:
(year, month, day [,hour [,minute [, second ]]])
"""
pass
def now(self):
"""
Return the current time as tuple:
(year, month, day, hour, minute, second)
"""
pass
def ntp_sync(self, server, update_period, tz):
"""
server the NTP server domain name or IP, for example "pool.ntp.org"
update_period optional, time update interval in seconds; default: 0
tz optional, time zone string; default: the one set in menuconfig
Note: for update_period < 300, the time will be synced only once
rtc = machine.RTC()
"""
pass
def ntp_sync(self, server="hr.pool.ntp.org", tz="CET-1CEST"):
"""
server the NTP server domain name or IP, for example "pool.ntp.org"
update_period optional, time update interval in seconds; default: 0
tz optional, time zone string; default: the one set in menuconfig
Note: for update_period < 300, the time will be synced only once
rtc = machine.RTC()
rtc.ntp_sync(server="hr.pool.ntp.org", tz="CET-1CEST")
rtc.synced()
True
utime.gmtime()
(2018, 1, 29, 16, 3, 18, 2, 29)
utime.localtime()
(2018, 1, 29, 17, 3, 30, 2, 29)
"""
pass
def synced(self):
""" True
utime.gmtime()
(2018, 1, 29, 16, 3, 18, 2, 29)
utime.localtime()
(2018, 1, 29, 17, 3, 30, 2, 29)
"""
pass
def synced(self):
"""
Return True if the system time was synced from NTP server, False if not.
"""
pass
def wake_on_ext0(self, pin, level):
"""
Enable external interrupt #0 on gpio level.
pin a Pin object to be used for wake up level is the pin state on which the interrupt will be activated 0 | 1
Valid pins are: 0, 2, 4, 12-15, 25-27, 32-39
To disable external interrupt #0, execute rtc.wake_on_ext0(None)
"""
pass
def wake_on_ext1(self, pins, level):
"""
Enable external interrupt #1 on multiple pins.
pins tuple of Pin objects to be used as wakeup source: (Pin(x), Pin(y), ..., Pin(z))
level is the pin state on which the interrupt wil be activated 0 | 1
Valid pins are: 0, 2, 4, 12-15, 25-27, 32-39
If level is set to 0, all pins must be at low level to wake up.
If level is set to 1, any pin at high level will wake up.
To disable external interrupt #1, execute rtc.wake_on_ext1(None)
"""
pass
def write(self, pos, value):
"""
Write integer (32-bit) value to the position pos in RTC memory.
Return True on success, False if failed.
"""
pass
def read(self, pos):
"""
Read integer (32-bit) from the position pos in RTC memory.
Returns None if no value has not been written to the RTC integer memory yet or the RTC memory was corrupted (bad CRC), otherwise returns the integer written to the position or 0 (default value).
"""
pass
def write_string(self, text):
"""
Write the string text to RTC memory.
Return True on success, False if failed.
"""
pass
|
'''
Speed: 77.11%
Memory: 82.57%
Time Complexity: O(n)
'''
class Solution:
def getRow(self, k: int) -> List[int]:
res = [1]*(k+1)
#res[0]=1
for i in range(1,k):
for j in range(i,0,-1):
res[j]+=res[j-1]
return res |
import re
import pytest
from vidispine.errors import InvalidInput, NotFound
from vidispine.utils import generate_metadata
def test_update_collection(
vidispine, cassette, create_metadata_field, collection
):
create_metadata_field('field_one')
create_metadata_field('field_two')
fields = {
'title': 'Foo bar',
'field_one': 'eggs',
'field_two': 123
}
vidispine.collection.update_metadata(collection, generate_metadata(fields))
assert cassette.all_played
def test_update_collection_field_does_not_exist(
vidispine, cassette, create_metadata_field, collection
):
create_metadata_field('field_one')
create_metadata_field('field_two')
fields = {
'title': 'Foo bar',
'field_one': 'eggs',
'field_two': 123,
'field_three': 'stuff'
}
with pytest.raises(NotFound) as err:
vidispine.collection.update_metadata(
collection, generate_metadata(fields)
)
re.match(
r'Endpoint not found: PUT'
r' - http://localhost:8080/API/collection/VX-\d+$/metadata', err
)
assert cassette.all_played
def test_update_collection_collection_not_found(vidispine, cassette):
fields = {
'title': 'Foo bar',
'field_one': 'eggs',
'field_two': 123
}
with pytest.raises(NotFound) as err:
vidispine.collection.update_metadata(
'VX-1000000', generate_metadata(fields)
)
err.match(r'Not Found: PUT')
assert cassette.all_played
def test_update_collection_invalid_input(vidispine):
with pytest.raises(InvalidInput) as err:
vidispine.collection.update_metadata('VX-1000000', {})
err.match('Please supply metadata.')
def test_update_item(
vidispine, cassette, create_metadata_field, item
):
create_metadata_field('field_one')
create_metadata_field('field_two')
fields = {
'title': 'Foo bar',
'field_one': 'eggs',
'field_two': 123
}
vidispine.item.update_metadata(item, generate_metadata(fields))
assert cassette.all_played
def test_update_item_field_does_not_exist(
vidispine, cassette, create_metadata_field, item
):
create_metadata_field('field_one')
create_metadata_field('field_two')
fields = {
'title': 'Foo bar',
'field_one': 'eggs',
'field_two': 123,
'field_three': 'stuff'
}
with pytest.raises(NotFound) as err:
vidispine.item.update_metadata(
item, generate_metadata(fields)
)
re.match(
r'Endpoint not found: PUT'
r' - http://localhost:8080/API/item/VX-\d+$/metadata', err
)
assert cassette.all_played
def test_update_item_item_not_found(vidispine, cassette):
fields = {
'title': 'Foo bar',
'field_one': 'eggs',
'field_two': 123
}
with pytest.raises(NotFound) as err:
vidispine.item.update_metadata(
'VX-1000000', generate_metadata(fields)
)
err.match(r'Not Found: PUT')
assert cassette.all_played
def test_update_item_invalid_input(vidispine):
with pytest.raises(InvalidInput) as err:
vidispine.item.update_metadata('VX-1000000', {})
err.match('Please supply metadata.')
|
#!/usr/bin/env python3
# https://codeforces.com/problemset/problem/1066/A
def f(ll):
n,v,l,r = ll #1e9
lc = (l-1)//v
rc = r//v
tc = n//v
return lc + tc - rc
q = int(input()) #1e4
for _ in range(q):
l = list(map(int,input().split()))
print(f(l))
|
import os
import unittest
from unittest import mock
from src.common.markdown_parser import MarkdownParser
class TestMarkdownParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.md = MarkdownParser(init_md=False)
cls.md.namespace = "dnd"
def test_base_dir(self):
from src.common.markdown_parser import BASE_DIR
self.assertEqual("my_wiki", os.path.basename(BASE_DIR))
@mock.patch("os.path.isfile")
def test_convert_wiki_links(self, isfile_mock):
def mock_func(path):
path = path.replace("\\", "/") # Normalize OS paths
if path.endswith(r"my_wiki/data/dnd/class/cleric.md"):
return True
if path.endswith(r"my_wiki/data/dnd/mutants.md"):
return False
if path.endswith(r"my_wiki/data/dnd/mutants.toml"):
return False
if path.endswith(r"my_wiki/data/dnd/spell/enlarge-reduce.md"):
return False
if path.endswith(r"my_wiki/data/dnd/spell/enlarge-reduce.toml"):
return True
raise ValueError(path)
isfile_mock.side_effect = mock_func
pre_markdown = """
[[[class:cleric#toc|Table of Contents]]]
[[[class:cleric#domains]]]
[[[class:cleric]]]
[[[Mutants]]]
[[[spell:enlarge/reduce]]]
"""
expected = """
<a class="wiki-link" href="/dnd/class/cleric#toc">Table of Contents</a>
<a class="wiki-link" href="/dnd/class/cleric#domains">domains</a>
<a class="wiki-link" href="/dnd/class/cleric">cleric</a>
<a class="wiki-link-broken" href="/dnd/Mutants">Mutants</a>
<a class="wiki-link" href="/dnd/spell/enlarge-reduce">enlarge/reduce</a>
"""
actual = self.md.convert_wiki_links(pre_markdown)
self.assertEqual(expected, actual)
def test_add_header_links(self):
pre_markdown = """
Test header 1
# Test 1
## This Is A Test!
### Also a test
"""
expected = """
<p>Test header 1</p>
<h1 id="test-1">Test 1<a href="#test-1" class="header-link">¶</a></h1>
<h2 id="this-is-a-test">This Is A Test!<a href="#this-is-a-test" class="header-link">¶</a></h2>
<h3 id="also-a-test">Also a test<a href="#also-a-test" class="header-link">¶</a></h3>
"""
md = MarkdownParser()
actual = md.parse_md(pre_markdown)
self.assertEqual(expected.strip(" ").lstrip("\n"), actual)
def test_parse_accordion(self):
pre_markdown = """
[[accordion Test Title]]
# Header
* Item 1
* *Item 2*
* **Item 3**
Text block
[[/accordion]]
"""
expected = """<button class="accordion-button">Test Title</button>
<div class="accordion-panel">
<h1 id="header">Header<a href="#header" class="header-link">¶</a></h1>
<ul>
<li>Item 1</li>
<li><em>Item 2</em></li>
<li><strong>Item 3</strong></li>
</ul>
<p>Text block</p>
</div>
"""
md = MarkdownParser()
actual = md.parse_md(pre_markdown)
self.assertEqual(expected, actual)
def test_convert_popup_links(self):
text = """
* [Ulkoria Stronemarrow](^ulkoria_stronemarrow.jpg), representative for the Watchful Order of Magists and Protectors
## [Faction NPCs](Faction NPCs)
## Enemy NPCs
* [Kenku](^kenku.jpg)
* [Gazer](^gazer.jpg)
* [Sword]($load|effect|WARFARE WEAPON SWORD SCRAPE PIRATE CUTLASS CIVIL WAR 01.mp3)
[Pause All]($pause|all)
[Some visual aids](^some_visual_aids.jpg) all on the [same line](^same-line.jpg).
[Wiki link](/dnd/wiki-link) before a [Visual aid](^visual_aid.jpg).
"""
expected = """
* <span class="visual-aid-link" title="visual_aid|ulkoria_stronemarrow.jpg|Ulkoria Stronemarrow">Ulkoria Stronemarrow<span class="visual-aid-hover"><img class="visual-aid-hover-img" src="/static/img/visual_aids/ulkoria_stronemarrow.jpg"></span></span>, representative for the Watchful Order of Magists and Protectors
## [Faction NPCs](Faction NPCs)
## Enemy NPCs
* <span class="visual-aid-link" title="visual_aid|kenku.jpg|Kenku">Kenku<span class="visual-aid-hover"><img class="visual-aid-hover-img" src="/static/img/visual_aids/kenku.jpg"></span></span>
* <span class="visual-aid-link" title="visual_aid|gazer.jpg|Gazer">Gazer<span class="visual-aid-hover"><img class="visual-aid-hover-img" src="/static/img/visual_aids/gazer.jpg"></span></span>
* <span class="visual-aid-link" title="load|effect|WARFARE WEAPON SWORD SCRAPE PIRATE CUTLASS CIVIL WAR 01.mp3">Sword</span>
<span class="visual-aid-link" title="pause|all">Pause All</span>
<span class="visual-aid-link" title="visual_aid|some_visual_aids.jpg|Some visual aids">Some visual aids<span class="visual-aid-hover"><img class="visual-aid-hover-img" src="/static/img/visual_aids/some_visual_aids.jpg"></span></span> all on the <span class="visual-aid-link" title="visual_aid|same-line.jpg|same line">same line<span class="visual-aid-hover"><img class="visual-aid-hover-img" src="/static/img/visual_aids/same-line.jpg"></span></span>.
[Wiki link](/dnd/wiki-link) before a <span class="visual-aid-link" title="visual_aid|visual_aid.jpg|Visual aid">Visual aid<span class="visual-aid-hover"><img class="visual-aid-hover-img" src="/static/img/visual_aids/visual_aid.jpg"></span></span>.
"""
md = MarkdownParser()
actual = md.convert_popup_links(text)
self.assertEqual(expected, actual)
def test_build_bibliography(self):
text = """
<p><strong>Garrote.</strong>[((bibcite homebrew))] Can only be used on ...</p>
<p><strong>Lance.</strong>[((bibcite errata))] You have disadvantage ...</p>
[[bibliography]]
: errata : <a href="https://media.wizards.com/2018/dnd/downloads/PH-Errata.pdf">2018 PHB Errata</a>
: homebrew : Homebrew
[[/bibliography]]
"""
expected = """
<p><strong>Garrote.</strong>[<a href="#homebrew">2</a>] Can only be used on ...</p>
<p><strong>Lance.</strong>[<a href="#errata">1</a>] You have disadvantage ...</p>
<p><strong>Bibliography</strong></p>
<ol>
<li><a id="errata" /><a href="https://media.wizards.com/2018/dnd/downloads/PH-Errata.pdf">2018 PHB Errata</a></li>
<li><a id="homebrew" />Homebrew</li>
</ol>
"""
md = MarkdownParser()
actual = md.build_bibliography(text)
self.assertEqual(expected, actual)
def test_convert_wiki_divs(self):
text = """
[[div class="test"]]
**Test** text
[[/div]]
**Before text** [[span class="test"]]_middle text_[[/span]] *after text*
"""
expected = """<div class="test">
<p><strong>Test</strong> text</p>
</div>
<p><strong>Before text</strong> <span class="test"><em>middle text</em></span> <em>after text</em></p>
"""
md = MarkdownParser()
text = md.parse_md(text)
actual = md.convert_wiki_divs(text)
self.assertEqual(expected, actual)
def test_add_breadcrumbs(self):
text = """[[breadcrumb /dnd/class/Druid|Druid]]
Fake text"""
expected = """⟵ [Druid](/dnd/class/Druid)
Fake text"""
md = MarkdownParser()
self.assertEqual(expected, md.add_breadcrumbs(text))
|
import json
import pprint
import core.utility as util
def visualize_dict_results(title: str, results: dict, outfile: str):
"""
Print the results stored in the given dict and write them to the output file.
"""
print(title)
pprint.pprint(results)
with open(outfile, "w") as file:
file.write(json.dumps(results, ensure_ascii=False, indent=3))
print()
|
import numpy as np
def make_full_window(audio_data:np.ndarray, feature_window:int, feature_step:int):
"""
Takes in a 1d numpy array as input and add appends zeros
until it is divisible by the feature_step input
"""
assert audio_data.shape[0] == audio_data.size, "input data is not 1-d"
remainder = (audio_data.shape[0] - feature_window) % feature_step
num_zeros = feature_step - remainder
zero_steps = np.zeros((num_zeros, ), dtype=np.float32)
return np.concatenate((audio_data, zero_steps), axis=0) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-27 09:40
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ReleaseModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateField(unique=True)),
('Name', models.CharField(error_messages={'unique': 'Invalid release name - This name already exists in the DB.'}, max_length=20, unique=True)),
('ChangeList', models.CharField(max_length=200)),
('DjangoImage', models.FileField(storage=django.core.files.storage.FileSystemStorage(location='C:\\Users\\mzabaleta\\workspace\\diy4dot0\\src\\releases\\files'), upload_to='')),
('RaspImage', models.FileField(storage=django.core.files.storage.FileSystemStorage(location='C:\\Users\\mzabaleta\\workspace\\diy4dot0\\src\\releases\\files'), upload_to='')),
],
),
]
|
#!/usr/bin/env python
import json
import logging
import subprocess
import sys
import textwrap
import xmlrpclib
USAGE = 'Usage: year_in_review.py [--json] <YEAR>'
# Note: Most of the bugzila api code comes from Scrumbugz.
cache = {}
log = logging.getLogger(__name__)
BZ_URL = 'http://bugzilla.mozilla.org/xmlrpc.cgi'
SESSION_COOKIES_CACHE_KEY = 'bugzilla-session-cookies'
PRODUCTS = [
'support.mozilla.org'
]
BZ_RESOLUTIONS = [
'',
'FIXED',
'INVALID',
'WONTFIX',
'DUPLICATE',
'WORKSFORME',
'INCOMPLETE',
'SUPPORT',
'EXPIRED',
'MOVED'
]
BZ_FIELDS = [
'id',
'status',
'resolution',
'summary',
'whiteboard',
'assigned_to',
'priority',
'severity',
'product',
'component',
'blocks',
'depends_on',
'creator',
'creation_time',
'last_change_time',
'target_milestone',
]
UNWANTED_COMPONENT_FIELDS = [
'sort_key',
'is_active',
'default_qa_contact',
'default_assigned_to',
'description'
]
class SessionTransport(xmlrpclib.SafeTransport):
"""
XML-RPC HTTPS transport that stores auth cookies in the cache.
"""
_session_cookies = None
@property
def session_cookies(self):
if self._session_cookies is None:
cookie = cache.get(SESSION_COOKIES_CACHE_KEY)
if cookie:
self._session_cookies = cookie
return self._session_cookies
def parse_response(self, response):
cookies = self.get_cookies(response)
if cookies:
self._session_cookies = cookies
cache.set(SESSION_COOKIES_CACHE_KEY,
self._session_cookies, 0)
log.debug('Got cookie: %s', self._session_cookies)
return xmlrpclib.Transport.parse_response(self, response)
def send_host(self, connection, host):
cookies = self.session_cookies
if cookies:
for cookie in cookies:
connection.putheader('Cookie', cookie)
log.debug('Sent cookie: %s', cookie)
return xmlrpclib.Transport.send_host(self, connection, host)
def get_cookies(self, response):
cookie_headers = None
if hasattr(response, 'msg'):
cookies = response.msg.getheaders('set-cookie')
if cookies:
log.debug('Full cookies: %s', cookies)
cookie_headers = [c.split(';', 1)[0] for c in cookies]
return cookie_headers
class BugzillaAPI(xmlrpclib.ServerProxy):
def get_bug_ids(self, **kwargs):
"""Return list of ids of bugs from a search."""
kwargs.update({
'include_fields': ['id'],
})
log.debug('Searching bugs with kwargs: %s', kwargs)
bugs = self.Bug.search(kwargs)
return [bug['id'] for bug in bugs.get('bugs', [])]
def get_bugs(self, **kwargs):
defaults = {
'include_fields': BZ_FIELDS,
}
get_history = kwargs.pop('history', True)
get_comments = kwargs.pop('comments', True)
defaults.update(kwargs)
if 'ids' in defaults:
defaults['permissive'] = True
log.debug('Getting bugs with kwargs: %s', defaults)
bugs = self.Bug.get(defaults)
else:
log.debug('Searching bugs with kwargs: %s', defaults)
bugs = self.Bug.search(defaults)
bug_ids = [bug['id'] for bug in bugs.get('bugs', [])]
if not bug_ids:
return bugs
# mix in history and comments
history = comments = {}
if get_history:
history = self.get_history(bug_ids)
if get_comments:
comments = self.get_comments(bug_ids)
for bug in bugs['bugs']:
bug['history'] = history.get(bug['id'], [])
bug['comments'] = comments.get(bug['id'], {}).get('comments', [])
bug['comments_count'] = len(comments.get(bug['id'], {})
.get('comments', []))
return bugs
def get_history(self, bug_ids):
log.debug('Getting history for bugs: %s', bug_ids)
try:
history = self.Bug.history({'ids': bug_ids}).get('bugs')
except xmlrpclib.Fault:
log.exception('Problem getting history for bug ids: %s', bug_ids)
return {}
return dict((h['id'], h['history']) for h in history)
def get_comments(self, bug_ids):
log.debug('Getting comments for bugs: %s', bug_ids)
try:
comments = self.Bug.comments({
'ids': bug_ids,
'include_fields': ['id', 'creator', 'time', 'text'],
}).get('bugs')
except xmlrpclib.Fault:
log.exception('Problem getting comments for bug ids: %s', bug_ids)
return {}
return dict((int(bid), cids) for bid, cids in comments.iteritems())
def wrap(text, indent=' '):
text = text.split('\n\n')
text = [textwrap.fill(part, expand_tabs=True, initial_indent=indent,
subsequent_indent=indent)
for part in text]
return '\n\n'.join(text)
def parse_whiteboard(whiteboard):
bits = {
'u': '',
'c': '',
'p': '',
's': ''
}
for part in whiteboard.split(' '):
part = part.split('=')
if len(part) != 2:
continue
if part[0] in bits:
bits[part[0]] = part[1]
return bits
def bugzilla_stats(year):
stats = []
bugzilla = BugzillaAPI(
BZ_URL,
transport=SessionTransport(use_datetime=True),
allow_none=True)
# -------------------------------------------
# Bugs created this year
# -------------------------------------------
bugs = bugzilla.get_bugs(
product=PRODUCTS,
creation_time='%s-01-01' % year,
include_fields=['id', 'creator', 'creation_time'],
history=False,
comments=False)
bugs = bugs['bugs']
total = 0
creators = {}
for bug in bugs:
# We can only get creation_time >= somedate, so we need to nix
# the bugs that are after the year we're looking for.
if bug['creation_time'].year != int(year):
continue
total += 1
creators[bug['creator']] = creators.get(bug['creator'], 0) + 1
creators = sorted(creators.items(), key=lambda item: item[1], reverse=True)
stats.append(('Bugs created', {
'total': total,
'breakdown': [
{'name': mem[0].split('@')[0], 'count': mem[1]}
for mem in creators[:10]]
}))
# -------------------------------------------
# Bugs resolved this year
# -------------------------------------------
bugs = bugzilla.get_bugs(
product=PRODUCTS,
last_change_time='%s-01-01' % year,
include_fields=['id', 'summary', 'assigned_to', 'last_change_time', 'resolution'],
status=['RESOLVED', 'VERIFIED', 'CLOSED'],
history=True,
comments=False)
bugs = bugs['bugs']
total = 0
peeps = {}
resolutions = {}
traceback_bugs = []
research_bugs = []
tracker_bugs = []
for bug in bugs:
# We can only get last_change_time >= somedate, so we need to
# nix the bugs that are after the year we're looking for.
if bug['last_change_time'].year != int(year):
continue
if bug['summary'].lower().startswith('[traceback]'):
traceback_bugs.append(bug)
if bug['summary'].lower().startswith('[research]'):
research_bugs.append(bug)
if bug['summary'].lower().startswith('[tracker]'):
tracker_bugs.append(bug)
for hist in bug['history']:
for change in hist['changes']:
if not change['field_name'] == 'resolution':
continue
# I think this history item comes from clearing the
# resolution. i.e. reopening.
if change['added'] == '':
continue
total += 1
# If the bug is marked FIXED, we assume that whoever
# it was assigned to should get the "credit". If it
# wasn't marked FIXED, then it's probably someone
# doing triage and so whoever changed the resolution
# should get "credit".
if (change['added'] == 'FIXED'
and not 'nobody' in bug['assigned_to']):
person = bug['assigned_to']
else:
person = hist['who']
peeps_dict = peeps.setdefault(person, {})
key = change['added']
peeps_dict[key] = peeps_dict.get(key, 0) + 1
resolutions[change['added']] = resolutions.get(
change['added'], 0) + 1
peeps = sorted(peeps.items(), key=lambda item: sum(item[1].values()), reverse=True)
stats.append(('Bugs resolved', {
'total': total,
'breakdown': [
{'name': mem[0].split('@')[0],
'total': sum(mem[1].values()),
'breakdown': mem[1].items()}
for mem in peeps[:10]
]
}))
# -------------------------------------------
# Resolution stats
# -------------------------------------------
resolutions = sorted(resolutions.items(), key=lambda item: item[1])
stats.append(('Bugs resolved breakdown', resolutions))
# -------------------------------------------
# Research bugs
# -------------------------------------------
stats.append(('Research bugs', [
{'id': bug['id'], 'summary': bug['summary']}
for bug in research_bugs
]))
# -------------------------------------------
# Trackers
# -------------------------------------------
stats.append(('Tracker bugs', [
{'id': bug['id'], 'summary': bug['summary']}
for bug in tracker_bugs
]))
return stats
def git(*args):
return subprocess.check_output(args)
def git_stats(year):
stats = []
# Get the shas for all the commits we're going to look at.
all_commits = subprocess.check_output([
'git', 'log',
'--after=%s-01-01' % year,
'--before=%s-01-01' % (int(year) + 1),
'--format=%H'
])
all_commits = all_commits.splitlines()
# Person -> # commits
committers = {}
# Person -> (# files changed, # inserted, # deleted)
changes = {}
for commit in all_commits:
author = git('git', 'log', '--format=%an',
'{0}~..{1}'.format(commit, commit))
author = author.strip()
# FIXME - this is lame. what's going on is that there are
# merge commits which have multiple authors, so we just grab
# the second one.
if '\n' in author:
author = author.splitlines()[1]
committers[author] = committers.get(author, 0) + 1
diff_data = git('git', 'diff', '--numstat', '--find-copies-harder',
'{0}~..{1}'.format(commit, commit))
total_added = 0
total_deleted = 0
total_files = 0
for line in diff_data.splitlines():
added, deleted, fn = line.split('\t')
if fn.startswith('vendor/'):
continue
if added != '-':
total_added += int(added)
if deleted != '-':
total_deleted += int(deleted)
total_files += 1
old_changes = changes.get(author, (0, 0, 0))
changes[author] = (
old_changes[0] + total_added,
old_changes[1] + total_deleted,
old_changes[2] + total_files
)
print 'Total commits:', len(all_commits)
print ''
committers = sorted(
committers.items(), key=lambda item: item[1], reverse=True)
committers_data = []
for person, count in committers:
committers_data.append({
'name': person,
'data': {
'commits': count,
'added': changes[person][0],
'deleted': changes[person][1],
'files': changes[person][2]
}
})
stats.append(('Git commit data', {
'total commits': len(all_commits),
'total lines added': sum([item[0] for item in changes.values()]),
'total lines deleted': sum([item[1] for item in changes.values()]),
'total files changed': sum([item[2] for item in changes.values()])
}))
stats.append(('Git committer data', committers_data))
return stats
def main(argv):
# XXX: This helps debug bugzilla xmlrpc bits.
# logging.basicConfig(level=logging.DEBUG)
do_json = False
if not argv:
print USAGE
print 'Error: Must specify the year. e.g. 2012'
return 1
if '--json' in argv:
print '>>> OMGWTFBBQ! You want it in JSON!'
do_json = True
argv.remove('--json')
year = argv[0]
output = []
output.append(('Year', year))
print '>>> Generating bugzilla stats....'
output.extend(bugzilla_stats(year))
print '>>> Generating git stats....'
output.extend(git_stats(year))
print ''
if do_json:
print json.dumps(output, indent=2)
else:
for mem in output:
print ''
print mem[0]
print '=' * len(mem[0])
print ''
# FIXME - this is gross
print json.dumps(mem[1], indent=2)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
from flask import g, render_template, request, redirect, url_for, current_app
from datetime import date
from maintain_frontend.add_land_charge.validation.expiry_validator import ExpiryValidator
from maintain_frontend.decorators import requires_permission
from maintain_frontend.constants.permissions import Permissions
from maintain_frontend.add_land_charge.routing.review_router import ReviewRouter
def register_routes(bp):
bp.add_url_rule('/add-local-land-charge/does-charge-expire', view_func=get_expiry, methods=['GET'])
bp.add_url_rule('/add-local-land-charge/does-charge-expire', view_func=post_expiry, methods=['POST'])
@requires_permission([Permissions.add_llc])
def get_expiry():
current_app.logger.info('Endpoint called')
if g.session.add_charge_state is None:
current_app.logger.info('Redirecting to: {}'.format(url_for('add_land_charge.new')))
return redirect(url_for('add_land_charge.new'))
request_body = {}
if g.session.add_charge_state.expiry_date is not None:
request_body['does_charge_expire'] = 'yes'
request_body['charge_expiry_day'] = g.session.add_charge_state.expiry_date.day
request_body['charge_expiry_month'] = g.session.add_charge_state.expiry_date.month
request_body['charge_expiry_year'] = g.session.add_charge_state.expiry_date.year
current_app.logger.info("Displaying page 'expiry.html'")
return render_template('expiry.html',
request_body=request_body,
submit_url=url_for('add_land_charge.post_expiry'))
@requires_permission([Permissions.add_llc])
def post_expiry():
current_app.logger.info("Endpoint called with does_charge_expire = '{}', charge_expiry_day = '{}', "
"charge_expiry_month = '{}', charge_expiry_year = '{}'".format(
request.form.get('does_charge_expire', ''),
request.form.get('charge_expiry_day', ''),
request.form.get('charge_expiry_month', ''),
request.form.get('charge_expiry_year', '')
))
if g.session.add_charge_state is None:
current_app.logger.info('Redirecting to: {}'.format(url_for('add_land_charge.new')))
return redirect(url_for('add_land_charge.new'))
does_charge_expire = request.form.get('does_charge_expire', '')
charge_expiry_day = request.form.get('charge_expiry_day', '')
charge_expiry_month = request.form.get('charge_expiry_month', '')
charge_expiry_year = request.form.get('charge_expiry_year', '')
validation_errors = ExpiryValidator.validate(
does_charge_expire,
charge_expiry_day,
charge_expiry_month,
charge_expiry_year
)
current_app.logger.info('Running validation')
if validation_errors.errors:
current_app.logger.warning('Validation errors occurred')
return render_template(
'expiry.html',
validation_errors=validation_errors.errors,
validation_summary_heading=validation_errors.summary_heading_text,
submit_url=url_for('add_land_charge.post_expiry'),
request_body=request.form
), 400
if does_charge_expire == 'yes':
charge_expiry_date = None
if (
charge_expiry_day and
charge_expiry_month and
charge_expiry_year
):
charge_expiry_date = date(
int(charge_expiry_year),
int(charge_expiry_month),
int(charge_expiry_day)
)
current_app.logger.info('Update expiry_date in session object')
ReviewRouter.update_edited_field('expiry_date', charge_expiry_date)
g.session.add_charge_state.expiry_date = charge_expiry_date
g.session.commit()
else:
g.session.add_charge_state.expiry_date = None
g.session.commit()
return redirect(ReviewRouter.get_redirect_url('add_land_charge.get_additional_info'))
|
#!/usr/bin/env python
import multiprocessing
def worker():
print('new worker')
for i in range(8):
multiprocessing.Process(target = worker).start()
|
import numpy as np
import sounddevice as sd
from pynkTrombone.voc import Voc
import hashlib
sd.default.samplerate = 44100
#TODO: Add Lips, Add Tongue Shapes, Add Glottal Closing.
def remap(value, max_out, min_out, max_in=1.0, min_in=-1.0):
return (max_out - min_out) * (value + 1.0) / 2.0 + min_out
def main():
vocal = Voc(sd.default.samplerate)
outdata = np.array([], dtype=np.float32)
# minimum = [0.0, 200.0, 0.6, 12.0, 2.0, 0.01]
# maximum = [1.0, 800.0, 0.9, 30.0, 3.5, 0.04]
# [touch, frequency, tenseness, tongue_index, tongue_diameter, velum] = activations
for i in range(200):
# Tenseness
t = remap(np.sin(i / 30.0 - np.pi/2), 0.9, 0.6)
# vocal.tenseness = t
# Velum
v = remap(np.sin(i / 20.0 - np.pi/2), 5.0, 0.00)
# vocal.velum = v
# Frequency
f = remap(np.sin(i / 20.0 - np.pi/2), 1000.0,100.0)
# vocal.frequency = f
# Tongue
td = remap(np.sin(i / 2.0 - np.pi/2), 3.5, 2.0)
ti = remap(np.sin(i / 50.0 - np.pi/2), 32.0, 10.0)
# vocal.tongue_shape(ti, td)
# Lips
l = remap(np.sin(i / 2.0 - np.pi/2), 3.5, 0)
# vocal.tract.lips = l
# Epiglottis
e = remap(np.sin(i / 5.0 - np.pi/2), 3.5, 0)
# vocal.tract.epiglottis = e
# Trachea
t = remap(np.sin(i / 5.0 - np.pi/2), 3.5, 0)
vocal.tract.trachea = t
print(t, v, f, ti, td, l)
out = np.array(vocal.compute(randomize=True), dtype=np.float32)
outdata = np.append(outdata, out.reshape(-1, 1))
m = hashlib.sha256()
m.update(outdata)
print(m.digest())
# assert m.digest() == b'8i\xa9\t\x0e\xc2f\xc4\x03na\xeb\xcb\xe8\x89\x92\xde\xf2\x8a\xdb\xbcl\xb8,(\x93\xf5\x16\xd9\xb0S\xec'
outdata = np.repeat(outdata.reshape(-1, 1), 2, axis=1)
print(outdata, outdata.shape)
sd.play(outdata, samplerate=sd.default.samplerate, blocking=True)
print("Done")
if __name__ == '__main__':
main()
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Active-Space Reduction interface."""
from typing import List, Optional, Tuple, Union
from qiskit_nature.deprecation import DeprecatedType, warn_deprecated_same_type_name
from .second_quantization import ActiveSpaceTransformer as NewActiveSpaceTransformer
class ActiveSpaceTransformer(NewActiveSpaceTransformer):
"""**DEPRECATED**: Please use the `second_quantization` module instead!
Please use :class:`~qiskit_nature.transformers.second_quantization.ActiveSpaceTransformer`
instead.
"""
def __init__(
self,
num_electrons: Optional[Union[int, Tuple[int, int]]] = None,
num_molecular_orbitals: Optional[int] = None,
active_orbitals: Optional[List[int]] = None,
) -> None:
warn_deprecated_same_type_name(
"0.2.0",
DeprecatedType.CLASS,
"ActiveSpaceTransformer",
"from qiskit_nature.transformers.second_quantization as a direct replacement",
)
super().__init__(num_electrons, num_molecular_orbitals, active_orbitals)
|
from contextlib import contextmanager
from thenewboston_node.core.clients.node import NodeClient
@contextmanager
def force_node_client(node_client: NodeClient):
try:
NodeClient.set_instance_cache(node_client)
yield
finally:
NodeClient.clear_instance_cache()
|
# Copyright 2017 Bernhard Walter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import HCBase
from .buttons import ButtonOptions
from .types import CSSObject
from traitlets import Instance
class Navigation(HCBase):
buttonOptions = Instance(ButtonOptions, allow_none=True)
menuItemHoverStyle = CSSObject(None, allow_none=True)
menuItemStyle = CSSObject(None, allow_none=True)
menuStyle = CSSObject(None, allow_none=True)
|
"""Video demo utils for showing live object detection from a camera
python3 video_demo.py --restore-weights=weights/<weights.h5>
"""
import ssd
import numpy as np
import cv2
import argparse
import datetime
import skimage
import label_utils
import config
from ssd import SSD
from boxes import show_boxes
from skimage.io import imread
from model_utils import ssd_parser
class VideoDemo():
def __init__(self,
detector,
camera=0,
width=640,
height=480,
record=False,
filename="demo.mp4"):
self.camera = camera
self.detector = detector
self.width = width
self.height = height
self.record = record
self.filename = filename
self.videowriter = None
self.initialize()
def initialize(self):
self.capture = cv2.VideoCapture(self.camera)
if not self.capture.isOpened():
print("Error opening video camera")
return
# cap.set(cv2.CAP_PROP_FPS, 5)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
if self.record:
self.videowriter = cv2.VideoWriter(self.filename,
cv2.VideoWriter_fourcc('m', 'p', '4', 'v'),
10,
(self.width, self.height),
isColor=True)
def loop(self):
font = cv2.FONT_HERSHEY_DUPLEX
pos = (10,30)
font_scale = 0.9
font_color = (0, 0, 0)
line_type = 1
while True:
start_time = datetime.datetime.now()
ret, image = self.capture.read()
#filename = "temp.jpg"
#cv2.imwrite(filename, image)
#img = skimage.img_as_float(imread(filename))
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0
class_names, rects = self.detector.evaluate(image=img)
#elapsed_time = datetime.datetime.now() - start_time
#hz = 1.0 / elapsed_time.total_seconds()
#hz = "%0.2fHz" % hz
#cv2.putText(image,
# hz,
# pos,
# font,
# font_scale,
# font_color,
# line_type)
items = {}
for i in range(len(class_names)):
rect = rects[i]
x1 = rect[0]
y1 = rect[1]
x2 = x1 + rect[2]
y2 = y1 + rect[3]
x1 = int(x1)
x2 = int(x2)
y1 = int(y1)
y2 = int(y2)
name = class_names[i].split(":")[0]
if name in items.keys():
items[name] += 1
else:
items[name] = 1
index = label_utils.class2index(name)
color = label_utils.get_box_rgbcolor(index)
cv2.rectangle(image, (x1, y1), (x2, y2), color, 3)
# print(x1, y1, x2, y2, class_names[i])
cv2.putText(image,
name,
(x1, y1-15),
font,
0.5,
color,
line_type)
cv2.imshow('image', image)
if self.videowriter is not None:
if self.videowriter.isOpened():
self.videowriter.write(image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
continue
count = len(items.keys())
if count > 0:
xmin = 10
ymin = 10
xmax = 220
ymax = 40 + count * 30
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (255, 255, 255), thickness=-1)
prices = config.params['prices']
total = 0.0
for key in items.keys():
count = items[key]
cost = count * prices[label_utils.class2index(key)]
total += cost
display = "%0.2f :%dx %s" % (cost, count, key)
cv2.putText(image,
display,
(xmin + 10, ymin + 25),
font,
0.55,
(0, 0, 0),
1)
ymin += 30
cv2.line(image, (xmin + 10, ymin), (xmax - 10, ymin), (0,0,0), 1)
display = "P%0.2f Total" % (total)
cv2.putText(image,
display,
(xmin + 5, ymin + 25),
font,
0.75,
(0, 0, 0),
1)
# When everything done, release the capture
self.capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = ssd_parser()
help_ = "Camera index"
parser.add_argument("--camera",
default=0,
type=int,
help=help_)
help_ = "Record video"
parser.add_argument("--record",
default=False,
action='store_true',
help=help_)
help_ = "Video filename"
parser.add_argument("--filename",
default="demo.mp4",
help=help_)
args = parser.parse_args()
ssd = SSD(args)
if args.restore_weights:
ssd.restore_weights()
videodemo = VideoDemo(detector=ssd,
camera=args.camera,
record=args.record,
filename=args.filename)
videodemo.loop()
|
# -*- coding: utf-8 -*-
from newspaper import Article
import nltk
""" COMUM """
def HASH_TEXTO(NOTICIA):
import hashlib
HASH512 = NOTICIA.encode('utf-8')
HASH512 = hashlib.sha512(HASH512).hexdigest()
return HASH512
def TIRAR_ESPACOS(TITULO):
"""----------- TIRAR ACENTOS E TROCAR ESPACOS POR UNDERSCORES"""
TITULO = TITULO.replace(" ", "_")
#print(TITULO) #era do cão passa a #era_do_cão
"""tirar barras / """
TITULO = TITULO.replace("/", "BARRA")
return TITULO
def REMOVER_ACENTOS(TITULO):
import unicodedata
nkfd_form = unicodedata.normalize('NFKD', TITULO)
TITULO= u"".join([c for c in nkfd_form if not unicodedata.combining(c)]) #cão passa a cao
#print ("titulo sem acentos:%s" % TITULO)
return TITULO
def TIRAR_ACENTOS_E_OBTER_NOME_FICHEIRO(TITULO):
TITULO=TIRAR_ESPACOS(TITULO)
TITULO=REMOVER_ACENTOS(TITULO)
TITULO = ("Noticia-%s.htm" % TITULO)
print("Ficheiro guardado como: %s" % TITULO) #era_do_cao.htm
return TITULO
""" // COMUM """
from newspaper import Article
URL='http://www.cnn.com/2014/01/12/world/asia/north-korea-charles-smith/index.html'
URL="http://thehackernews.com/2017/05/browser-camera-microphone.html"
URL="http://thehackernews.com/2017/05/shadow-brokers-exploits.html"
URL="https://pplware.sapo.pt/gadgets/voa-na-boa-onde-pilotar-drone/"
URL="https://thehackernews.com/2017/12/ctb-locker-cerber-ransomware.html"
a = Article(URL, keep_article_html=True)
a.download()
a.parse()
#print (a.article_html) #JA VAI ABAIXO COMO NOTICIA
TITULO=a.title
HTML=a.article_html
AUTORES = a.authors #['Leigh Ann Caldwell', 'John Honway']
DATA = a.publish_date #datetime.datetime(2013, 12, 30, 0, 0)
NOTICIA = a.text #'Washington (CNN) -- Not everyone subscribes to a New Year's resolution...'
IMAGEM = a.top_image #'http://someCDN.com/blah/blah/blah/file.png'
print ("------------------------\n%s\n" % TITULO)
#u'<div> \n<p><strong>(CNN)</strong> -- Charles Smith insisted Sunda...'
#print ("AUTORES: %s\n\n " % AUTORES[0])
#print ("DATA: %s\n\n " % DATA)
#print ("NOTICIA: %s\n\n " % NOTICIA)
#print ("IMAGEM: %s\n\n " % IMAGEM)
print( "%s<br>\n<br/>FIM" % a.article_html)
#escrever ficheiro
FICHEIRO = TIRAR_ACENTOS_E_OBTER_NOME_FICHEIRO(TITULO)
FICHEIRO = open(FICHEIRO,'w')
#TUDO = ('<h1>%s</h1>\n<h3>Autor(es):%s</h3>\n<h3>Data:%s</h3>\n<h5>URL:%s</h5><img width="707" height="403" src="%s"/>\n<h3>%s</h3>\n\n\n\n\n<h3>Hash SHA512:%s</h3><h3>HTML:</br></h3>%s\n' % (TITULO,AUTORES[0],DATA,URL,IMAGEM,NOTICIA,HASH_TEXTO(NOTICIA),a.article_html))
TUDO = ( "%s<br>FIM" % a.article_html)
FICHEIRO.write(TUDO)
import newspaper
cbs_paper = newspaper.build(URL, memoize_articles=False)
print ("\n--------------------\nTamanho : %s\n----------------------" % cbs_paper.size())
#1030
#extraccao de feeds
print ("\n--------------------\nFeeds\n----------------------")
for feed_url in cbs_paper.feed_urls():
print (feed_url)
print ("\n--------------------\n//Feeds\n----------------------")
#util: https://media.readthedocs.org/pdf/newspaper/latest/newspaper.pdf
"""
>>> import newspaper
>>> cnn1 = newspaper.build('http://cnn.com')
>>> urls1 = set([article.url for article in cnn1.articles])
>>> cnn2 = newspaper.build('http://cnn.com')
>>> urls2 = set([article.url for article in cnn2.articles])
>>> urls1.intersection(urls2)
set() # no urls are shared between calls when caching is on
>>> cnn1_fresh = newspaper.build('http://cnn.com', memoize_articles=False)
>>> urls1_fresh = set([article.url for article in cnn1_fresh.articles])
>>> cnn2_fresh = newspaper.build('http://cnn.com', memoize_articles=False)
>>> urls2_fresh = set([article.url for article in cnn2_fresh.articles])
>>> len(urls1_fresh.intersection(urls2_fresh))
1078 # same same urls are returned because caching is on
""" |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-08-06 02:47
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.gis.db.models.fields
import django.contrib.postgres.fields.jsonb
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0023_communicationslogentry_log_type'),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_type', models.IntegerField(blank=True, choices=[(1, 'Permit'), (2, 'Licence/permit'), (3, 'Part 5'), (4, 'Emergency works'), (5, 'Part 5 - Amendment Request'), (6, 'Part 5 - Amendment Application'), (7, 'Test - Application'), (8, 'Amend Permit'), (9, 'Amend Licence'), (10, 'Renew Permit'), (11, 'Renew Licence')], null=True)),
('apply_on_behalf_of', models.IntegerField(blank=True, choices=[(1, 'On Behalf of yourself'), (2, 'On Behalf of your company / government agency'), (3, 'On Behalf of indivdual as somebody else (as an authorised agent)'), (4, 'On Behalf of a company as somebody else (as an authorised agent)'), (5, 'Internal')], null=True)),
('state', models.IntegerField(choices=[(0, 'Unknown'), (1, 'Draft'), (2, 'With Admin Officer'), (3, 'With Referrals'), (4, 'With Assessor'), (5, 'With Manager'), (6, 'Issued'), (7, 'Issued (with admin)'), (8, 'Declined'), (9, 'New'), (10, 'Approved'), (11, 'Expired'), (12, 'With Director'), (13, 'With Executive'), (14, 'Completed'), (15, 'Form Creator'), (16, 'Current'), (17, 'Deleted')], default=1, editable=False)),
('title', models.CharField(max_length=256)),
('description', models.TextField(blank=True, null=True)),
('submit_date', models.DateField()),
('expire_date', models.DateField(blank=True, null=True)),
('proposed_commence', models.DateField(blank=True, null=True)),
('proposed_end', models.DateField(blank=True, null=True)),
('issue_date', models.DateField(blank=True, null=True)),
('cost', models.CharField(blank=True, max_length=256, null=True)),
('project_no', models.CharField(blank=True, max_length=256, null=True)),
('related_permits', models.TextField(blank=True, null=True)),
('over_water', models.BooleanField(default=False)),
('vessel_or_craft_details', models.IntegerField(blank=True, null=True)),
('max_participants', models.IntegerField(blank=True, null=True)),
('proposed_location', models.SmallIntegerField(blank=True, choices=[(0, 'On Land'), (1, 'On Water'), (2, 'Both')], null=True)),
('address', models.TextField(blank=True, null=True)),
('jetties', models.TextField(blank=True, null=True)),
('jetty_dot_approval', models.NullBooleanField(default=None)),
('jetty_dot_approval_expiry', models.DateField(blank=True, null=True)),
('drop_off_pick_up', models.TextField(blank=True, null=True)),
('food', models.NullBooleanField(default=None)),
('beverage', models.NullBooleanField(default=None)),
('liquor_licence', models.NullBooleanField(default=None)),
('byo_alcohol', models.NullBooleanField(default=None)),
('sullage_disposal', models.TextField(blank=True, null=True)),
('waste_disposal', models.TextField(blank=True, null=True)),
('refuel_location_method', models.TextField(blank=True, null=True)),
('berth_location', models.TextField(blank=True, null=True)),
('anchorage', models.TextField(blank=True, null=True)),
('operating_details', models.TextField(blank=True, null=True)),
('river_lease_require_river_lease', models.NullBooleanField(default=None)),
('river_lease_reserve_licence', models.NullBooleanField(default=None)),
('river_lease_application_number', models.CharField(blank=True, max_length=30, null=True)),
('proposed_development_current_use_of_land', models.TextField(blank=True, null=True)),
('proposed_development_description', models.TextField(blank=True, null=True)),
('publish_documents', models.DateField(blank=True, null=True)),
('publish_draft_report', models.DateField(blank=True, null=True)),
('publish_final_report', models.DateField(blank=True, null=True)),
('publish_determination_report', models.DateField(blank=True, null=True)),
('routeid', models.CharField(blank=True, default=1, max_length=4, null=True)),
('assessment_start_date', models.DateField(blank=True, null=True)),
('approval_id', models.IntegerField(blank=True, null=True)),
('number_of_crafts', models.IntegerField(blank=True, null=True)),
('route_status', models.CharField(blank=True, default='Draft', max_length=256, null=True)),
('submitter_comment', models.TextField(blank=True, default='', max_length=256, null=True)),
('applicant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='applicant', to=settings.AUTH_USER_MODEL)),
('assessed_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='assessed_by', to=settings.AUTH_USER_MODEL)),
('assignee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='assignee', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ApplicationInvoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invoice_reference', models.CharField(max_length=64)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='applications.Application')),
],
),
migrations.CreateModel(
name='ApplicationPurpose',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('purpose', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Communication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comms_to', models.CharField(blank=True, max_length=256, null=True)),
('comms_from', models.CharField(blank=True, max_length=256, null=True)),
('subject', models.CharField(blank=True, max_length=256, null=True)),
('comms_type', models.IntegerField(choices=[(0, 'None'), (1, 'Phone'), (2, 'Email'), (3, 'Mail'), (4, 'System')], default=0)),
('details', models.TextField(blank=True, null=True)),
('state', models.IntegerField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='applications.Application')),
],
),
migrations.CreateModel(
name='CommunicationAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comms_to', models.CharField(blank=True, max_length=256, null=True)),
('comms_from', models.CharField(blank=True, max_length=256, null=True)),
('subject', models.CharField(blank=True, max_length=256, null=True)),
('comms_type', models.IntegerField(choices=[(0, 'None'), (1, 'Phone'), (2, 'Email'), (3, 'Mail')], default=0)),
('details', models.TextField(blank=True, null=True)),
('state', models.IntegerField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='CommunicationCompliance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comms_to', models.CharField(blank=True, max_length=256, null=True)),
('comms_from', models.CharField(blank=True, max_length=256, null=True)),
('subject', models.CharField(blank=True, max_length=256, null=True)),
('comms_type', models.IntegerField(choices=[(0, 'None'), (1, 'Phone'), (2, 'Email'), (3, 'Mail')], default=0)),
('details', models.TextField(blank=True, null=True)),
('state', models.IntegerField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='CommunicationOrganisation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comms_to', models.CharField(blank=True, max_length=256, null=True)),
('comms_from', models.CharField(blank=True, max_length=256, null=True)),
('subject', models.CharField(blank=True, max_length=256, null=True)),
('comms_type', models.IntegerField(choices=[(0, 'None'), (1, 'Phone'), (2, 'Email'), (3, 'Mail')], default=0)),
('details', models.TextField(blank=True, null=True)),
('state', models.IntegerField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('org', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='accounts.Organisation')),
],
),
migrations.CreateModel(
name='Compliance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('approval_id', models.IntegerField(blank=True, null=True)),
('title', models.CharField(blank=True, max_length=256, null=True)),
('app_type', models.IntegerField(blank=True, choices=[(1, 'Permit'), (2, 'Licence/permit'), (3, 'Part 5'), (4, 'Emergency works'), (5, 'Part 5 - Amendment Request'), (6, 'Part 5 - Amendment Application'), (7, 'Test - Application'), (8, 'Amend Permit'), (9, 'Amend Licence'), (10, 'Renew Permit'), (11, 'Renew Licence')], null=True)),
('assessed_date', models.DateField(blank=True, null=True)),
('status', models.IntegerField(choices=[(1, 'Current'), (2, 'Due'), (3, 'Future'), (4, 'Approved'), (5, 'With Assessor'), (6, 'With Manager'), (7, 'With Licence Holder'), (8, 'Overdue'), (9, 'Submitted')], default=3)),
('submit_date', models.DateTimeField(auto_now_add=True)),
('due_date', models.DateField(blank=True, null=True)),
('compliance', models.TextField(blank=True, help_text='Information to fulfil requirement of condition.', null=True)),
('comments', models.TextField(blank=True, null=True)),
('approve_date', models.DateField(blank=True, null=True)),
('applicant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='compliance_applicant', to=settings.AUTH_USER_MODEL)),
('assessed_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='compliance_assigned_by', to=settings.AUTH_USER_MODEL)),
('assignee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='compliance_assignee', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ComplianceGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('approval_id', models.IntegerField(blank=True, null=True)),
('title', models.CharField(blank=True, max_length=256, null=True)),
('app_type', models.IntegerField(blank=True, choices=[(1, 'Permit'), (2, 'Licence/permit'), (3, 'Part 5'), (4, 'Emergency works'), (5, 'Part 5 - Amendment Request'), (6, 'Part 5 - Amendment Application'), (7, 'Test - Application'), (8, 'Amend Permit'), (9, 'Amend Licence'), (10, 'Renew Permit'), (11, 'Renew Licence')], null=True)),
('status', models.IntegerField(choices=[(1, 'Current'), (2, 'Due'), (3, 'Future'), (4, 'Approved'), (5, 'With Assessor'), (6, 'With Manager'), (7, 'With Licence Holder'), (8, 'Overdue'), (9, 'Submitted')], default=3)),
('due_date', models.DateField(blank=True, null=True)),
('applicant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='compliance_group_applicant', to=settings.AUTH_USER_MODEL)),
('assignee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='compliance_group_assignee', to=settings.AUTH_USER_MODEL)),
('organisation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='accounts.Organisation')),
],
),
migrations.CreateModel(
name='Condition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('condition', models.TextField(blank=True, null=True)),
('status', models.IntegerField(choices=[(1, 'Proposed'), (2, 'Applied'), (3, 'Rejected'), (4, 'Cancelled')], default=1)),
('due_date', models.DateField(blank=True, null=True)),
('recur_pattern', models.IntegerField(blank=True, choices=[(1, 'Weekly'), (2, 'Monthly'), (3, 'Annually')], null=True)),
('recur_freq', models.PositiveIntegerField(blank=True, help_text='How frequently is the recurrence pattern applied (e.g. every 2 months)', null=True, verbose_name='recurrence frequency')),
('suspend', models.BooleanField(default=False)),
('advise', models.TextField(blank=True, null=True)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='applications.Application')),
],
),
migrations.CreateModel(
name='ConditionPredefined',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=256, null=True)),
('condition', models.TextField(blank=True, null=True)),
('status', models.IntegerField(choices=[(0, 'Inactive'), (1, 'Active')], default=1)),
],
),
migrations.CreateModel(
name='Craft',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Delegate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email_user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('organisation', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='accounts.Organisation')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lot', models.CharField(blank=True, max_length=256, null=True)),
('reserve', models.CharField(blank=True, max_length=256, null=True)),
('suburb', models.CharField(blank=True, max_length=256, null=True)),
('intersection', models.CharField(blank=True, max_length=256, null=True)),
('lga', models.CharField(blank=True, max_length=256, null=True)),
('poly', django.contrib.gis.db.models.fields.PolygonField(blank=True, null=True, srid=4326)),
('title_volume', models.CharField(blank=True, max_length=256, null=True)),
('folio', models.CharField(blank=True, max_length=30, null=True)),
('dpd_number', models.CharField(blank=True, max_length=30, null=True)),
('location', models.CharField(blank=True, max_length=256, null=True)),
('street_number_name', models.CharField(blank=True, max_length=256, null=True)),
('local_government_authority', models.CharField(blank=True, max_length=256, null=True)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='applications.Application')),
],
),
migrations.CreateModel(
name='OrganisationContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('first_name', models.CharField(blank=True, max_length=128, verbose_name='Given name(s)')),
('last_name', models.CharField(blank=True, max_length=128)),
('phone_number', models.CharField(blank=True, max_length=50, null=True)),
('mobile_number', models.CharField(blank=True, max_length=50, null=True)),
('fax_number', models.CharField(blank=True, max_length=50, null=True)),
('organisation', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='accounts.Organisation')),
],
),
migrations.CreateModel(
name='OrganisationExtras',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pin1', models.CharField(blank=True, max_length=50, null=True)),
('pin2', models.CharField(blank=True, max_length=50, null=True)),
],
),
migrations.CreateModel(
name='OrganisationPending',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=128, null=True)),
('abn', models.CharField(blank=True, max_length=50, null=True, verbose_name='ABN')),
('status', models.IntegerField(choices=[(1, 'Pending'), (2, 'Approved'), (3, 'Declined')], default=1)),
('company_exists', models.BooleanField(default=False)),
('submit_date', models.DateField(auto_now_add=True, null=True)),
('pin1', models.CharField(blank=True, max_length=50, null=True)),
('pin2', models.CharField(blank=True, max_length=50, null=True)),
('assignee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='org_pending_assignee', to=settings.AUTH_USER_MODEL)),
('billing_address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='org_pending_billing_address', to='accounts.Address')),
('email_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PublicationFeedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('address', models.CharField(max_length=256)),
('suburb', models.CharField(max_length=100)),
('state', models.IntegerField(choices=[(1, 'Western Australia'), (2, 'New South Wales'), (3, 'Victoria'), (4, 'South Australia'), (5, 'Northern Territory'), (6, 'Queensland'), (7, 'Australian Capital Territory'), (8, 'Tasmania')])),
('postcode', models.CharField(max_length=4)),
('phone', models.CharField(max_length=20)),
('email', models.EmailField(max_length=254)),
('comments', models.TextField(blank=True, null=True)),
('status', models.CharField(max_length=20)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='applications.Application')),
],
),
migrations.CreateModel(
name='PublicationNewspaper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(blank=True, null=True)),
('newspaper', models.CharField(max_length=150)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='applications.Application')),
],
),
migrations.CreateModel(
name='PublicationWebsite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='applications.Application')),
],
),
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('upload', models.FileField(max_length=512, storage=django.core.files.storage.FileSystemStorage(location=b'/home/jason/projects/statdev-ledger/statdev/private-media'), upload_to='uploads/%Y/%m/%d')),
('name', models.CharField(max_length=256)),
('category', models.IntegerField(blank=True, choices=[(1, 'Landowner consent'), (2, 'Deed'), (3, 'Assessment report'), (4, 'Referee response'), (5, 'Lodgement document'), (6, 'Draft document'), (7, 'Final document'), (8, 'Determination document'), (9, 'Completed document')], null=True)),
('metadata', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('text_content', models.TextField(blank=True, editable=False, null=True)),
],
),
migrations.CreateModel(
name='Referral',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('details', models.TextField(blank=True, null=True)),
('sent_date', models.DateField()),
('period', models.PositiveIntegerField(verbose_name='period (days)')),
('expire_date', models.DateField(blank=True, editable=False, null=True)),
('response_date', models.DateField(blank=True, null=True)),
('feedback', models.TextField(blank=True, null=True)),
('proposed_conditions', models.TextField(blank=True, null=True)),
('status', models.IntegerField(choices=[(1, 'Referred'), (2, 'Responded'), (3, 'Recalled'), (4, 'Expired'), (5, 'With Admin')], default=5)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='applications.Application')),
('records', models.ManyToManyField(blank=True, to='applications.Record')),
('referee', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='StakeholderComms',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('name', models.CharField(max_length=255)),
('sent_date', models.DateTimeField(auto_now_add=True)),
('role', models.IntegerField(choices=[(0, 'None'), (1, 'Applicant'), (2, 'Submitter'), (3, 'Referral'), (4, 'Feedback')], default=0)),
('comm_type', models.IntegerField(choices=[(0, 'None'), (1, 'Email'), (2, 'Posted')], default=0)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='applications.Application')),
],
),
migrations.CreateModel(
name='Vessel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vessel_type', models.SmallIntegerField(blank=True, choices=[(0, 'Vessel'), (1, 'Craft')], null=True)),
('name', models.CharField(max_length=256)),
('vessel_id', models.CharField(blank=True, max_length=256, null=True, verbose_name='Vessel identification')),
('size', models.PositiveIntegerField(blank=True, null=True, verbose_name='size (m)')),
('engine', models.PositiveIntegerField(blank=True, null=True, verbose_name='engine (kW)')),
('passenger_capacity', models.PositiveIntegerField(blank=True, null=True)),
('registration', models.ManyToManyField(blank=True, to='applications.Record')),
],
),
migrations.AddField(
model_name='publicationwebsite',
name='original_document',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='original_document', to='applications.Record'),
),
migrations.AddField(
model_name='publicationwebsite',
name='published_document',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='published_document', to='applications.Record'),
),
migrations.AddField(
model_name='publicationnewspaper',
name='records',
field=models.ManyToManyField(blank=True, related_name='newspaper', to='applications.Record'),
),
migrations.AddField(
model_name='publicationfeedback',
name='records',
field=models.ManyToManyField(blank=True, related_name='feedback', to='applications.Record'),
),
migrations.AddField(
model_name='organisationpending',
name='identification',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='applications.Record'),
),
migrations.AddField(
model_name='organisationpending',
name='postal_address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='org_pending_postal_address', to='accounts.Address'),
),
migrations.AddField(
model_name='organisationextras',
name='identification',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='organisation_extras_org_identification', to='applications.Record'),
),
migrations.AddField(
model_name='organisationextras',
name='organisation',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='organisation_extras_org_id', to='accounts.Organisation'),
),
migrations.AddField(
model_name='location',
name='records',
field=models.ManyToManyField(blank=True, to='applications.Record'),
),
migrations.AddField(
model_name='condition',
name='records',
field=models.ManyToManyField(blank=True, to='applications.Record'),
),
migrations.AddField(
model_name='condition',
name='referral',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='applications.Referral'),
),
migrations.AddField(
model_name='compliance',
name='compliance_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='applications.ComplianceGroup'),
),
migrations.AddField(
model_name='compliance',
name='condition',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='applications.Condition'),
),
migrations.AddField(
model_name='compliance',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='compliance_group_assignment', to='auth.Group'),
),
migrations.AddField(
model_name='compliance',
name='organisation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='accounts.Organisation'),
),
migrations.AddField(
model_name='compliance',
name='records',
field=models.ManyToManyField(blank=True, to='applications.Record'),
),
migrations.AddField(
model_name='compliance',
name='submitted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='compliance_submitted_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='communicationorganisation',
name='records',
field=models.ManyToManyField(blank=True, related_name='org_communication_docs', to='applications.Record'),
),
migrations.AddField(
model_name='communicationcompliance',
name='compliance',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='applications.Compliance'),
),
migrations.AddField(
model_name='communicationcompliance',
name='records',
field=models.ManyToManyField(blank=True, related_name='compliance_communication_docs', to='applications.Record'),
),
migrations.AddField(
model_name='communicationaccount',
name='records',
field=models.ManyToManyField(blank=True, related_name='account_communication_docs', to='applications.Record'),
),
migrations.AddField(
model_name='communicationaccount',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='communication',
name='records',
field=models.ManyToManyField(blank=True, related_name='communication_docs', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='brochures_itineries_adverts',
field=models.ManyToManyField(blank=True, related_name='brochures_itineries_adverts', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='cert_public_liability_insurance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='cert_public_liability_insurace', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='cert_survey',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='cert_survey', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='deed',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='deed', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_briefing_note',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_briefing_note', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_completion',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_completion', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_determination',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_determination', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_determination_approved',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_determination_approved', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_draft',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_draft', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_draft_signed',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_draft_signed', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_final',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_final', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_final_signed',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_final_signed', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_memo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_memo', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_new_draft',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_newdraft', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='document_new_draft_v3',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_newdraftv3', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='application_group_assignment', to='auth.Group'),
),
migrations.AddField(
model_name='application',
name='land_owner_consent',
field=models.ManyToManyField(blank=True, related_name='land_owner_consent', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='location_route_access',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='location_route_access', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='organisation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='accounts.Organisation'),
),
migrations.AddField(
model_name='application',
name='other_relevant_documents',
field=models.ManyToManyField(blank=True, related_name='other_relevant_documents', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='proposed_development_plans',
field=models.ManyToManyField(blank=True, related_name='proposed_development_plans', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='purpose',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='applications.ApplicationPurpose'),
),
migrations.AddField(
model_name='application',
name='records',
field=models.ManyToManyField(blank=True, related_name='records', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='risk_mgmt_plan',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='risk_mgmt_plan', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='river_lease_scan_of_application',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='river_lease_scan_of_application', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='safety_mgmt_procedures',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='safety_mgmt_plan', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='submitted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='Submitted_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='application',
name='supporting_info_demonstrate_compliance_trust_policies',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='supporting_info_demonstrate_compliance_trust_policies', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='swan_river_trust_board_feedback',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_swan_river_board_feedback', to='applications.Record'),
),
migrations.AddField(
model_name='application',
name='type_of_crafts',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='craft', to='applications.Craft'),
),
migrations.AddField(
model_name='application',
name='vessels',
field=models.ManyToManyField(blank=True, to='applications.Vessel'),
),
migrations.AlterUniqueTogether(
name='referral',
unique_together=set([('application', 'referee')]),
),
migrations.AlterUniqueTogether(
name='delegate',
unique_together=set([('email_user', 'organisation')]),
),
]
|
import numpy as np
from rpy2 import robjects as ro
import rpy2.rlike.container as rlc
def train(x_data, y_values, weights):
x_float_vector = [ro.FloatVector(x) for x in np.array(x_data).transpose()]
y_float_vector = ro.FloatVector(y_values)
weights_float_vector = ro.FloatVector(weights)
names = ['v' + str(i) for i in xrange(len(x_float_vector))]
d = rlc.TaggedList(x_float_vector + [y_float_vector], names + ['y'])
data = ro.DataFrame(d)
formula = 'y ~ '
for x in names:
formula += x + '+'
formula = formula[:-1]
fit_res = ro.r.glm(formula=ro.r(formula), data=data, weights=weights_float_vector, family=ro.r('binomial(link="logit")'))
## print fit_res
print "formula:", formula
x_data = np.array([(0,1,2,3,4,5,6)])
y_values = np.array([0])
weights = np.array([1])
train(x_data, y_values, weights)
|
import time
from config import cfg
from threading import Thread, Event
from queue import Queue
from typing import Dict, List
import os
import json
from multiprocessing import Queue as mpQueue
from src.base import EdgiseBase
VOLTAGE_ID_LIST: List = ['core', 'sdram_c', 'sdram_i', 'sdram_p']
class StateData:
temperature: float = 15.0
voltage: List[str] = ['volt=0.000V', 'volt=0.000V', 'volt=0.000V', 'volt=0.000V']
ip: str = "unknown"
memory_usage: List[str] = ['0', '0']
fps: float = 0.0
class DeviceState(Thread, EdgiseBase):
def __init__(self, stop_event: Event, send_q: Queue, logging_q: mpQueue, **kwargs):
self._stop_event = stop_event
self._send_q = send_q
self._state = StateData()
Thread.__init__(self)
EdgiseBase.__init__(self, name="STATE", logging_q=logging_q)
def get_ip(self):
try:
ip = os.popen('hostname -I').read().split(' ')[0] # damn that fugly...
if len(ip) < 5:
ip = 'no connection'
except Exception as e:
self.error(f"[get_ip] Exception : {e}")
ip = 'unknown'
return ip
def get_temperature(self):
platform = os.uname()
if platform[1] == "raspberrypi":
try:
tmp: str = os.popen("/opt/vc/bin/vcgencmd measure_temp").readline()
tmp = tmp.split("=")[-1]
return float(tmp.split("'")[0])
except Exception as e:
self.error(f"[get_temperature] {e}")
else:
try:
t: str = os.popen("cat /sys/class/thermal/thermal_zone0/temp ").readline()
return float(t)/1000.
except:
return 15.0
def get_voltage(self):
voltage_list = []
platform = os.uname()
if platform[1] == "raspberrypi":
try:
for _id in VOLTAGE_ID_LIST:
voltage_list.append(_id + ":" + str(os.popen(f"vcgencmd measure_volts {_id}").read())[5:-1])
return voltage_list
except Exception as e:
self.error(f"[get_voltage] Error : {e}")
return ['0', '0', '0', '0']
def get_memory_usage(self):
try:
memory_usage = os.popen('free -t -m').readlines()[-1].split()[1:3]
return memory_usage
except Exception as e:
self.error(f"[get_memory_usage] Error : {e}")
return ['0', '0']
def run(self) -> None:
while not self._stop_event.is_set():
self._state.temperature = self.get_temperature()
self._state.memory_usage = self.get_memory_usage()
self._state.ip = self.get_ip()
self._state.voltage = self.get_voltage()
message = json.dumps(self._state.__dict__)
self._send_q.put({'state': message})
self._stop_event.wait(timeout=cfg.state_sync_interval)
# time.sleep(cfg.state_sync_interval)
self.info("Quitting.")
@property
def state(self):
return self._state
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayTradeOrderPayResponse(AlipayResponse):
def __init__(self):
super(AlipayTradeOrderPayResponse, self).__init__()
self._async_payment_mode = None
self._gmt_payment = None
self._out_request_no = None
self._out_trade_no = None
self._total_amount = None
self._trade_no = None
@property
def async_payment_mode(self):
return self._async_payment_mode
@async_payment_mode.setter
def async_payment_mode(self, value):
self._async_payment_mode = value
@property
def gmt_payment(self):
return self._gmt_payment
@gmt_payment.setter
def gmt_payment(self, value):
self._gmt_payment = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
def parse_response_content(self, response_content):
response = super(AlipayTradeOrderPayResponse, self).parse_response_content(response_content)
if 'async_payment_mode' in response:
self.async_payment_mode = response['async_payment_mode']
if 'gmt_payment' in response:
self.gmt_payment = response['gmt_payment']
if 'out_request_no' in response:
self.out_request_no = response['out_request_no']
if 'out_trade_no' in response:
self.out_trade_no = response['out_trade_no']
if 'total_amount' in response:
self.total_amount = response['total_amount']
if 'trade_no' in response:
self.trade_no = response['trade_no']
|
# Base component constants
NAME = "FXLuminaire Luxor"
DOMAIN = "luxor"
DOMAIN_DATA = f"{DOMAIN}_data"
VERSION = "0.0.1"
ISSUE_URL = "https://github.com/dcramer/hass-luxor/issues"
# Platforms
LIGHT = "light"
SCENE = "scene"
PLATFORMS = [LIGHT, SCENE]
# Configuration and options
CONF_HOST = "host"
# Defaults
DEFAULT_NAME = DOMAIN
# How long to wait to actually do the refresh after requesting it.
# We wait some time so if we control multiple lights, we batch requests.
REQUEST_REFRESH_DELAY = 0.3
|
from lxml import html
import argparse
import requests
import re
import shutil
import os
class Manga:
def __init__(self,url,date,name,chapter,title):
self.url=url
self.name=name
self.chapter=chapter
self.title=title
self.date=date
def __str__(self):
return self.name+'-Chapter '+self.chapter+' '+self.title
HOST = 'https://readms.net'
NEW_RELEASES_XPATH = '//div[@class="side-nav hidden-xs"]/ul[@class="new-list"]/li[@class="active"]/a'
MANGA_PAGES_XPATH = '//div[@class="btn-group btn-reader-page"]/ul[@class="dropdown-menu"]/li'
IMG_PAGE_XPATH = '//*[@id="manga-page"]'
def get_new_releases():
page=requests.get(HOST)
tree=html.fromstring(page.content)
results = tree.xpath(NEW_RELEASES_XPATH)
new_releases=[]
for r in results:
details=[t.strip() for t in r.itertext()]
manga=Manga(r.get('href'),details[0],details[1],details[2],details[3])
new_releases.append(manga)
return new_releases
def get_manga_image(url):
page=requests.get(url)
tree=html.fromstring(page.content)
img_xpath=tree.xpath(IMG_PAGE_XPATH)
img_url=img_xpath[0].get("src")
print("Downloading image "+img_url)
response=requests.get('https:'+img_url, stream=True)
url=url.split('/')
name=url[4]
chapter=url[5]
page=url[-1]
#create directory for manga images if not exist
directory=name+'/'+chapter
if not os.path.exists(directory):
os.makedirs(directory)
file_name=url[4]+'_'+url[-1]+'.png'
with open(directory+'/'+file_name, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
def download_manga(name):
page=requests.get(HOST)
tree=html.fromstring(page.content)
results=tree.xpath(NEW_RELEASES_XPATH+'[contains(text(),"'+name+'")]')
if len(results)==1:
manga=results[0]
manga_url=HOST+manga.get('href')
#Load first page
page=requests.get(manga_url)
tree=html.fromstring(page.content)
page_elements=tree.xpath(MANGA_PAGES_XPATH)
max_page=int(re.sub('[^0-9]','',page_elements[-1].text_content()))
for i in range(1,max_page+1):
manga_url=manga_url.rsplit('/',1)[0]+'/'+str(i)
get_manga_image(manga_url)
else:
print("No latest chapter for that manga")
def display(mangas):
print('**********Latest Release**********')
for manga in mangas:
print(manga.name.ljust(25)+' Chapter-'+manga.chapter.ljust(3)+(' ('+manga.title+')'))
def main():
parser=argparse.ArgumentParser(description='CLI tool to check and download latest manga on readms.net',
formatter_class=argparse.RawTextHelpFormatter)
help_desc="latest".ljust(24)+"- Get latest manga in readms"+"\ndownload ${manga_name}".ljust(24)+" - Will download the latest chapter manga. Run first the python readms.py latest to s" \
"see available manga"
parser.add_argument('command', help=help_desc, nargs='+')
args=parser.parse_args()
command=args.command
if command[0] == "latest" and len(command) == 1:
new_releases=get_new_releases()
display(new_releases)
elif command[0] == "download" and len(command) == 2:
download_manga(command[1])
else:
parser.print_help()
if __name__ == "__main__":
main()
|
from .cluster import Cluster, MultiSearchError
from .document import Document, DynamicDocument
from .expression import (
Params, Term, Terms, Exists, Missing, Match, MultiMatch, MatchAll, Range,
Bool, Query, DisMax, Filtered, Ids, Prefix, Limit,
And, Or, Not, Sort, Boosting, Common, ConstantScore, FunctionScore,
Field, SpanFirst, SpanMulti, SpanNear, SpanNot, SpanOr, SpanTerm,
Nested, HasParent, HasChild,
QueryRescorer,
)
from .function import (
Weight, FieldValueFactor, Factor, ScriptScore, RandomScore, Script,
Gauss, Exp, Linear,
)
from .index import Index
from .result import DelayedElasticsearchException
from .search import SearchQuery
from .types import ValidationError
from .version import __version__
|
# -*- coding:utf-8 -*-
# &Author AnFany
import pandas as pd
import numpy as np
# 训练数据文件路径
train_path = 'C:/Users/GWT9\Desktop/Adult_Train.csv'
# 测试数据文件路径
test_path = 'C:/Users/GWT9\Desktop/Adult_Test.csv'
# 因为测试数据native-country中不存在Holand-Netherlands,不便于独热编码。
# 因此在测试文件中添加一个native-country为Holand-Netherlands的样本,然后在删除即可
# 为简化程序,手动添加
def handle_data(filepath, miss='fill'): # 定义处理数据的函数
data = pd.read_csv(r'%s'%filepath)
data = data.replace('?', np.nan)
# 处理缺失值
if miss == 'del': # 删除掉缺失值
miss_data = data.dropna(how='any')
else:
miss_data = data.fillna(method='ffill')
# 新建DataFrame
newdata = pd.DataFrame()
# 独热化编码
for ikey in miss_data:
if miss_data[ikey].dtype == 'object': # 独热编码
onedata = pd.get_dummies(miss_data[ikey])
newdata = pd.concat([newdata, onedata], axis=1)
else:
newdata[ikey] = miss_data[ikey]
return newdata
train_data = handle_data(train_path)
test_data = handle_data(test_path)
test_data = test_data.drop([len(test_data) - 1], inplace=False) # 删除添加的最后一个样本
# 数据标准化
# 所有特征数据标准化, 目标数据0-1化
def norm(trdata, tedata):
tr_da = pd.DataFrame()
te_da = pd.DataFrame()
for hh in trdata.columns:
if hh not in ['<=50K', '>50K']:
tr_da[hh] = (trdata[hh] - np.mean(trdata[hh])) / np.std(trdata[hh]) # 标准化
te_da[hh] = (tedata[hh] - np.mean(trdata[hh])) / np.std(trdata[hh]) # 标准化
# tr_da[hh] = (trdata[hh] - np.min(trdata[hh])) / (np.max(trdata[hh]) - np.min(trdata[hh])) # 0-1化
# te_da[hh] = (tedata[hh] - np.min(trdata[hh])) / (np.max(trdata[hh]) - np.min(trdata[hh])) # 0-1化
else:
tr_da[hh] = trdata[hh].values
te_da[hh] = tedata['%s.'%hh].values # 训练数据和测试数据的Money字段内容不同。测试数据的多个"."
return tr_da, te_da
Train_data, Test_data = norm(train_data, test_data)
# 将训练数据平均分为n份,利用K折交叉验证计算模型最终的正确率
# 将训练数据分为训练数据和验证数据
def kfold(trdata, k=10):
vadata = trdata.values
legth = len(vadata)
datadict = {}
signnuber = np.arange(legth)
for hh in range(k):
np.random.shuffle(vadata)
yanzhneg = np.random.choice(signnuber, int(legth / k), replace=False)
oneflod_yan = vadata[yanzhneg]
oneflod_xun = vadata[[hdd for hdd in signnuber if hdd not in yanzhneg]]
datadict[hh] = [oneflod_xun, oneflod_yan]
return datadict
# 存储K折交叉验证的数据字典
kfold_train_datadict = kfold(Train_data)
|
import os
import xml.etree.ElementTree as ET
CLASS_DICT = {}
HOME_DIR = os.path.expanduser('~/CourseWork/data/FlickrLogos_47/VOC2007')
ANNOTATIONS_PATH = os.path.join(HOME_DIR, 'Annotations')
#Remove xml files which were creater in previous trials
os.system('cd Annotations; rm *.xml')
with open(HOME_DIR + '/className2ClassID.txt') as file:
for line in file:
line = line.split()
CLASS_DICT[line[1]] = line[0]
# create the file structure
# Здесь теряю закрытие файла, мб переделать
size_file = open(HOME_DIR + '/ImageSize.txt').read().split('\n') # fmt: ['name width height',]
# File fmt: <x1> <y1> <x2> <y2> <class_id> <dummy_value> <mask> <difficult> <truncated>
for idx, file in enumerate(os.listdir(ANNOTATIONS_PATH)): # индекс нужен для поиска размера картинки
file_path = os.path.join(ANNOTATIONS_PATH, file)
annotation_file = open(file_path, 'r')
annotation = ET.Element('annotation')
ET.SubElement(annotation, 'folder').text = 'VOC2007'
ET.SubElement(annotation, 'filename').text = file[:7] + 'jpg' # Берем из названия цифры без расширения
size = ET.SubElement(annotation, 'size')
ET.SubElement(size, 'width').text = size_file[idx].split()[1]
ET.SubElement(size, 'height').text = size_file[idx].split()[2]
ET.SubElement(size, 'depth').text = '3' # RGB
ET.SubElement(annotation, 'segmented').text = '0'
for line in annotation_file: # Создаем столько объектов, сколько в txt файле строк
line = line.split()
object = ET.SubElement(annotation, 'object')
ET.SubElement(object, 'name').text = CLASS_DICT[line[4]] # Находим название класса по class_id
ET.SubElement(object, 'pose').text = 'Unspecified'
ET.SubElement(object, 'truncated').text = line[8]
ET.SubElement(object, 'difficult').text = line[7]
bbox = ET.SubElement(object, 'bndbox')
ET.SubElement(bbox, 'xmin').text = line[0]
ET.SubElement(bbox, 'ymin').text = line[1]
ET.SubElement(bbox, 'xmax').text = line[2]
ET.SubElement(bbox, 'ymax').text = line[3]
# create a new XML file with the results
mydata = ET.tostring(annotation, encoding='unicode', method='xml')
myfile = open(os.path.join(ANNOTATIONS_PATH, '{}.xml'.format(file[:6])), "w")
myfile.write(mydata)
myfile.close()
|
import wx
import os
import six
import sys
import math
import Utils
import Model
from GetResults import GetResults
from ReorderableGrid import ReorderableGrid
class Prizes( wx.Panel ):
rowsMax = 20
def __init__( self, parent, id=wx.ID_ANY, size=wx.DefaultSize ):
super(Prizes, self).__init__( parent, id, size=size )
vsOverall = wx.BoxSizer( wx.VERTICAL )
self.grid = ReorderableGrid( self )
self.grid.CreateGrid( 0, 10 )
self.grid.AutoSizeColumns( False )
self.grid.AutoSizeRows( False )
self.grid.Bind( wx.grid.EVT_GRID_CELL_CHANGED, self.onCellChange )
#---------------------------------------------------------------
vsOverall.Add( self.grid, 1, flag=wx.EXPAND|wx.ALL, border=4 )
self.SetSizer( vsOverall )
def onCellChange( self, event ):
race = Model.race
if not race:
return
row, col = event.GetRow(), event.GetCol()
if col & 1:
return
categories = race.getCategories( startWaveOnly=False, publishOnly=True )
if self.grid.GetNumberCols() != len(categories)*2:
Utils.AdjustGridSize( self.grid, self.rowsMax, len(categories)*2 )
if row >= self.grid.GetNumberRows() or col >= self.grid.GetNumberCols():
return
self.copyToRace()
try:
category = categories[col//2]
except IndexError:
return
self.grid.SetCellValue( row, col+1, self.getRecepient(self.grid.GetCellValue(row, col), row, category) )
wx.CallAfter( self.grid.AutoSizeColumns, False )
def getRecepient( self, prize, row, category ):
if not prize:
return ''
name = ''
results = GetResults( category )
try:
name = u'{}: {}'.format(results[row].num, results[row].full_name())
except IndexError:
pass
return name
def setCellPair( self, row, col, category ):
try:
prize = getattr( category, 'prizes', [] )[row]
except IndexError:
prize = ''
self.grid.SetCellValue( row, col, prize )
self.grid.SetCellValue( row, col+1, self.getRecepient(prize, row, category) )
def updateGrid( self ):
race = Model.race
if not race:
self.grid.ClearGrid()
return
categories = race.getCategories( startWaveOnly=False, publishOnly=True )
Utils.AdjustGridSize( self.grid, self.rowsMax, len(categories)*2 )
col = 0
for category in categories:
fullname = category.fullname
ib = fullname.rfind( '(' )
catname, catgender = fullname[:ib].strip(), fullname[ib:]
colName = '{}\n{}'.format( catname, catgender )
self.grid.SetColLabelValue( col, colName )
attr = wx.grid.GridCellAttr()
attr.SetReadOnly( False )
attr.SetAlignment( wx.ALIGN_CENTRE, wx.ALIGN_CENTRE )
self.grid.SetColAttr( col, attr )
self.grid.SetColLabelValue( col+1, _('Recipient') )
attr = wx.grid.GridCellAttr()
attr.SetReadOnly( True )
attr.SetAlignment( wx.ALIGN_LEFT, wx.ALIGN_CENTRE )
attr.SetBackgroundColour( wx.Colour(152,251,152) )
self.grid.SetColAttr( col+1, attr )
for row in range(self.rowsMax):
self.setCellPair( row, col, category )
col += 2
self.grid.AutoSizeColumns( False ) # Resize to fit the column name.
self.grid.AutoSizeRows( False )
def refresh( self ):
self.updateGrid()
def copyToRace( self ):
race = Model.race
if not race:
return
categories = race.getCategories( startWaveOnly=False, publishOnly=True )
for i, category in enumerate(categories):
prizes = []
for row in range(self.rowsMax):
v = self.grid.GetCellValue( row, i*2 ).strip()
if not v:
break
prizes.append( v )
category.prizes = prizes
def commit( self ):
self.grid.SaveEditControlValue() # Make sure the current edit is committed.
self.grid.DisableCellEditControl()
self.copyToRace()
if __name__ == '__main__':
app = wx.App(False)
app.SetAppName("CrossMgr")
Utils.disable_stdout_buffering()
race = Model.newRace()
race._populate()
fnameRiderInfo = os.path.join(Utils.getHomeDir(), 'SimulationRiderData.xlsx')
mainWin = wx.Frame(None, title="Prizes", size=(800,700) )
prizes = Prizes( mainWin )
mainWin.Show()
prizes.refresh()
app.MainLoop()
|
import numpy as np
from scipy.special import softmax
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as tdata
import pandas as pd
import time
from tqdm import tqdm
from utils import validate, get_logits_targets, sort_sum
import pdb
# Conformalize a model with a calibration set.
# Save it to a file in .cache/modelname
# The only difference is that the forward method of ConformalModel also outputs a set.
class ConformalModel(nn.Module):
def __init__(self, model, calib_loader, alpha, kreg=None, lamda=None, randomized=True, allow_zero_sets=False, pct_paramtune = 0.3, batch_size=32, lamda_criterion='size'):
super(ConformalModel, self).__init__()
self.model = model
self.alpha = alpha
self.T = torch.Tensor([1.3]) #initialize (1.3 is usually a good value)
self.T, calib_logits = platt(self, calib_loader)
self.randomized=randomized
self.allow_zero_sets=allow_zero_sets
self.num_classes = len(calib_loader.dataset.dataset.classes)
if kreg == None or lamda == None:
kreg, lamda, calib_logits = pick_parameters(model, calib_logits, alpha, kreg, lamda, randomized, allow_zero_sets, pct_paramtune, batch_size, lamda_criterion)
self.penalties = np.zeros((1, self.num_classes))
self.penalties[:, kreg:] += lamda
calib_loader = tdata.DataLoader(calib_logits, batch_size = batch_size, shuffle=False, pin_memory=True)
self.Qhat = conformal_calibration_logits(self, calib_loader)
def forward(self, *args, randomized=None, allow_zero_sets=None, **kwargs):
if randomized == None:
randomized = self.randomized
if allow_zero_sets == None:
allow_zero_sets = self.allow_zero_sets
logits = self.model(*args, **kwargs)
with torch.no_grad():
logits_numpy = logits.detach().cpu().numpy()
scores = softmax(logits_numpy/self.T.item(), axis=1)
I, ordered, cumsum = sort_sum(scores)
S = gcq(scores, self.Qhat, I=I, ordered=ordered, cumsum=cumsum, penalties=self.penalties, randomized=randomized, allow_zero_sets=allow_zero_sets)
return logits, S
# Computes the conformal calibration
def conformal_calibration(cmodel, calib_loader):
print("Conformal calibration")
with torch.no_grad():
E = np.array([])
for x, targets in tqdm(calib_loader):
logits = cmodel.model(x.cuda()).detach().cpu().numpy()
scores = softmax(logits/cmodel.T.item(), axis=1)
I, ordered, cumsum = sort_sum(scores)
E = np.concatenate((E,giq(scores,targets,I=I,ordered=ordered,cumsum=cumsum,penalties=cmodel.penalties,randomized=True, allow_zero_sets=True)))
Qhat = np.quantile(E,1-cmodel.alpha,interpolation='higher')
return Qhat
# Temperature scaling
def platt(cmodel, calib_loader, max_iters=10, lr=0.01, epsilon=0.01):
print("Begin Platt scaling.")
# Save logits so don't need to double compute them
logits_dataset = get_logits_targets(cmodel.model, calib_loader)
logits_loader = torch.utils.data.DataLoader(logits_dataset, batch_size = calib_loader.batch_size, shuffle=False, pin_memory=True)
T = platt_logits(cmodel, logits_loader, max_iters=max_iters, lr=lr, epsilon=epsilon)
print(f"Optimal T={T.item()}")
return T, logits_dataset
"""
INTERNAL FUNCTIONS
"""
### Precomputed-logit versions of the above functions.
class ConformalModelLogits(nn.Module):
def __init__(self, model, calib_loader, alpha, kreg=None, lamda=None, randomized=True, allow_zero_sets=False, naive=False, LAC=False, pct_paramtune = 0.3, batch_size=32, lamda_criterion='size'):
super(ConformalModelLogits, self).__init__()
self.model = model
self.alpha = alpha
self.randomized = randomized
self.LAC = LAC
self.allow_zero_sets = allow_zero_sets
self.T = platt_logits(self, calib_loader)
if (kreg == None or lamda == None) and not naive and not LAC:
kreg, lamda, calib_logits = pick_parameters(model, calib_loader.dataset, alpha, kreg, lamda, randomized, allow_zero_sets, pct_paramtune, batch_size, lamda_criterion)
calib_loader = tdata.DataLoader(calib_logits, batch_size=batch_size, shuffle=False, pin_memory=True)
self.penalties = np.zeros((1, calib_loader.dataset[0][0].shape[0]))
if not (kreg == None) and not naive and not LAC:
self.penalties[:, kreg:] += lamda
self.Qhat = 1-alpha
if not naive and not LAC:
self.Qhat = conformal_calibration_logits(self, calib_loader)
elif not naive and LAC:
gt_locs_cal = np.array([np.where(np.argsort(x[0]).flip(dims=(0,)) == x[1])[0][0] for x in calib_loader.dataset])
scores_cal = 1-np.array([np.sort(torch.softmax(calib_loader.dataset[i][0]/self.T.item(), dim=0))[::-1][gt_locs_cal[i]] for i in range(len(calib_loader.dataset))])
self.Qhat = np.quantile( scores_cal , np.ceil((scores_cal.shape[0]+1) * (1-alpha)) / scores_cal.shape[0] )
def forward(self, logits, randomized=None, allow_zero_sets=None):
if randomized == None:
randomized = self.randomized
if allow_zero_sets == None:
allow_zero_sets = self.allow_zero_sets
with torch.no_grad():
logits_numpy = logits.detach().cpu().numpy()
scores = softmax(logits_numpy/self.T.item(), axis=1)
if not self.LAC:
I, ordered, cumsum = sort_sum(scores)
S = gcq(scores, self.Qhat, I=I, ordered=ordered, cumsum=cumsum, penalties=self.penalties, randomized=randomized, allow_zero_sets=allow_zero_sets)
else:
S = [ np.where( (1-scores[i,:]) < self.Qhat )[0] for i in range(scores.shape[0]) ]
return logits, S
def conformal_calibration_logits(cmodel, calib_loader):
with torch.no_grad():
E = np.array([])
for logits, targets in calib_loader:
logits = logits.detach().cpu().numpy()
scores = softmax(logits/cmodel.T.item(), axis=1)
I, ordered, cumsum = sort_sum(scores)
E = np.concatenate((E,giq(scores,targets,I=I,ordered=ordered,cumsum=cumsum,penalties=cmodel.penalties,randomized=True,allow_zero_sets=True)))
Qhat = np.quantile(E,1-cmodel.alpha,interpolation='higher')
return Qhat
def platt_logits(cmodel, calib_loader, max_iters=10, lr=0.01, epsilon=0.01):
nll_criterion = nn.CrossEntropyLoss().cuda()
T = nn.Parameter(torch.Tensor([1.3]).cuda())
optimizer = optim.SGD([T], lr=lr)
for iter in range(max_iters):
T_old = T.item()
for x, targets in calib_loader:
optimizer.zero_grad()
x = x.cuda()
x.requires_grad = True
out = x/T
loss = nll_criterion(out, targets.long().cuda())
loss.backward()
optimizer.step()
if abs(T_old - T.item()) < epsilon:
break
return T
### CORE CONFORMAL INFERENCE FUNCTIONS
# Generalized conditional quantile function.
def gcq(scores, tau, I, ordered, cumsum, penalties, randomized, allow_zero_sets):
penalties_cumsum = np.cumsum(penalties, axis=1)
sizes_base = ((cumsum + penalties_cumsum) <= tau).sum(axis=1) + 1 # 1 - 1001
sizes_base = np.minimum(sizes_base, scores.shape[1]) # 1-1000
if randomized:
V = np.zeros(sizes_base.shape)
for i in range(sizes_base.shape[0]):
V[i] = 1/ordered[i,sizes_base[i]-1] * \
(tau-(cumsum[i,sizes_base[i]-1]-ordered[i,sizes_base[i]-1])-penalties_cumsum[0,sizes_base[i]-1]) # -1 since sizes_base \in {1,...,1000}.
sizes = sizes_base - (np.random.random(V.shape) >= V).astype(int)
else:
sizes = sizes_base
if tau == 1.0:
sizes[:] = cumsum.shape[1] # always predict max size if alpha==0. (Avoids numerical error.)
if not allow_zero_sets:
sizes[sizes == 0] = 1 # allow the user the option to never have empty sets (will lead to incorrect coverage if 1-alpha < model's top-1 accuracy
S = list()
# Construct S from equation (5)
for i in range(I.shape[0]):
S = S + [I[i,0:sizes[i]],]
return S
# Get the 'p-value'
def get_tau(score, target, I, ordered, cumsum, penalty, randomized, allow_zero_sets): # For one example
idx = np.where(I==target)
tau_nonrandom = cumsum[idx]
if not randomized:
return tau_nonrandom + penalty[0]
U = np.random.random()
if idx == (0,0):
if not allow_zero_sets:
return tau_nonrandom + penalty[0]
else:
return U * tau_nonrandom + penalty[0]
else:
return U * ordered[idx] + cumsum[(idx[0],idx[1]-1)] + (penalty[0:(idx[1][0]+1)]).sum()
# Gets the histogram of Taus.
def giq(scores, targets, I, ordered, cumsum, penalties, randomized, allow_zero_sets):
"""
Generalized inverse quantile conformity score function.
E from equation (7) in Romano, Sesia, Candes. Find the minimum tau in [0, 1] such that the correct label enters.
"""
E = -np.ones((scores.shape[0],))
for i in range(scores.shape[0]):
E[i] = get_tau(scores[i:i+1,:],targets[i].item(),I[i:i+1,:],ordered[i:i+1,:],cumsum[i:i+1,:],penalties[0,:],randomized=randomized, allow_zero_sets=allow_zero_sets)
return E
### AUTOMATIC PARAMETER TUNING FUNCTIONS
def pick_kreg(paramtune_logits, alpha):
gt_locs_kstar = np.array([np.where(np.argsort(x[0]).flip(dims=(0,)) == x[1])[0][0] for x in paramtune_logits])
kstar = np.quantile(gt_locs_kstar, 1-alpha, interpolation='higher') + 1
return kstar
def pick_lamda_size(model, paramtune_loader, alpha, kreg, randomized, allow_zero_sets):
# Calculate lamda_star
best_size = iter(paramtune_loader).__next__()[0][1].shape[0] # number of classes
# Use the paramtune data to pick lamda. Does not violate exchangeability.
for temp_lam in [0.001, 0.01, 0.1, 0.2, 0.5]: # predefined grid, change if more precision desired.
conformal_model = ConformalModelLogits(model, paramtune_loader, alpha=alpha, kreg=kreg, lamda=temp_lam, randomized=randomized, allow_zero_sets=allow_zero_sets, naive=False)
top1_avg, top5_avg, cvg_avg, sz_avg = validate(paramtune_loader, conformal_model, print_bool=False)
if sz_avg < best_size:
best_size = sz_avg
lamda_star = temp_lam
return lamda_star
def pick_lamda_adaptiveness(model, paramtune_loader, alpha, kreg, randomized, allow_zero_sets, strata=[[0,1],[2,3],[4,6],[7,10],[11,100],[101,1000]]):
# Calculate lamda_star
lamda_star = 0
best_violation = 1
# Use the paramtune data to pick lamda. Does not violate exchangeability.
for temp_lam in [0, 1e-5, 1e-4, 8e-4, 9e-4, 1e-3, 1.5e-3, 2e-3]: # predefined grid, change if more precision desired.
conformal_model = ConformalModelLogits(model, paramtune_loader, alpha=alpha, kreg=kreg, lamda=temp_lam, randomized=randomized, allow_zero_sets=allow_zero_sets, naive=False)
curr_violation = get_violation(conformal_model, paramtune_loader, strata, alpha)
if curr_violation < best_violation:
best_violation = curr_violation
lamda_star = temp_lam
return lamda_star
def pick_parameters(model, calib_logits, alpha, kreg, lamda, randomized, allow_zero_sets, pct_paramtune, batch_size, lamda_criterion):
num_paramtune = int(np.ceil(pct_paramtune * len(calib_logits)))
paramtune_logits, calib_logits = tdata.random_split(calib_logits, [num_paramtune, len(calib_logits)-num_paramtune])
calib_loader = tdata.DataLoader(calib_logits, batch_size=batch_size, shuffle=False, pin_memory=True)
paramtune_loader = tdata.DataLoader(paramtune_logits, batch_size=batch_size, shuffle=False, pin_memory=True)
if kreg == None:
kreg = pick_kreg(paramtune_logits, alpha)
if lamda == None:
if lamda_criterion == "size":
lamda = pick_lamda_size(model, paramtune_loader, alpha, kreg, randomized, allow_zero_sets)
elif lamda_criterion == "adaptiveness":
lamda = pick_lamda_adaptiveness(model, paramtune_loader, alpha, kreg, randomized, allow_zero_sets)
return kreg, lamda, calib_logits
def get_violation(cmodel, loader_paramtune, strata, alpha):
df = pd.DataFrame(columns=['size', 'correct'])
for logit, target in loader_paramtune:
# compute output
output, S = cmodel(logit) # This is a 'dummy model' which takes logits, for efficiency.
# measure accuracy and record loss
size = np.array([x.size for x in S])
I, _, _ = sort_sum(logit.numpy())
correct = np.zeros_like(size)
for j in range(correct.shape[0]):
correct[j] = int( target[j] in list(S[j]) )
batch_df = pd.DataFrame({'size': size, 'correct': correct})
df = df.append(batch_df, ignore_index=True)
wc_violation = 0
for stratum in strata:
temp_df = df[ (df['size'] >= stratum[0]) & (df['size'] <= stratum[1]) ]
if len(temp_df) == 0:
continue
stratum_violation = abs(temp_df.correct.mean()-(1-alpha))
wc_violation = max(wc_violation, stratum_violation)
return wc_violation # the violation
|
from django.shortcuts import get_object_or_404
from wsgiref.util import FileWrapper
from django.http import StreamingHttpResponse, BadHeaderError
from unidecode import unidecode
from wagtail.wagtaildocs.models import Document, document_served
def serve(request, document_id, document_filename):
doc = get_object_or_404(Document, id=document_id)
wrapper = FileWrapper(doc.file)
response = StreamingHttpResponse(wrapper, content_type='application/octet-stream')
try:
response['Content-Disposition'] = 'attachment; filename=%s' % doc.filename
except BadHeaderError:
# Unicode filenames can fail on Django <1.8, Python 2 due to
# https://code.djangoproject.com/ticket/20889 - try with an ASCIIfied version of the name
response['Content-Disposition'] = 'attachment; filename=%s' % unidecode(doc.filename)
response['Content-Length'] = doc.file.size
# Send document_served signal
document_served.send(sender=Document, instance=doc, request=request)
return response
|
from subprocess import call
frdr = file('values.txt','r') #Read Log File
fwtr = file('gnu.txt','w+') #Write Coords in Required format
while True:
line = frdr.readline()
if len(line) == 0:
break;
row = line.split(' ')
if row[0].isdigit() :
counter = 0
while counter < len(row)-1:
s = row[counter]+' '+row[counter+1]+'\n'
fwtr.write(s)
counter += 2
frdr.close()#Close log File
fwtr.close()#Close Coords File
call(["gnuplot","C:\\Users\\Pankaj\\Desktop\\plot 1.plt"]) #Plot the Coords into map.
#Here absolute path is given. For reusability of this script One need to change
#this path or can give relative path. In windows make sure to add Environment variable path
#for GNUPLOT. Otherwise it will give error.
|
import piexif
import sys
exif_dict = piexif.load(sys.argv[1])
#for ifd in ("0th", "Exif", "GPS", "1st"):
# for tag in exif_dict[ifd]:
# print(piexif.TAGS[ifd][tag]["name"], exif_dict[ifd][tag])
#print('Keys:', exif_dict.keys())
#print('Pre validate:', exif_dict['_types']['GPS'])
#print('Pre validate:', exif_dict['GPS'])
#std_exif_dict = piexif.validate(exif_dict)
#print('Post fixup:', std_exif_dict['GPS'])
exif_bytes_1 = piexif.dump(exif_dict)
f1 = open('dump_orig','wb')
f1.write(exif_bytes_1)
f1.close()
exif_bytes_2 = piexif.dump(exif_dict, True)
f1 = open('dump_std','wb')
f1.write(exif_bytes_2)
f1.close()
|
"""
imutils/ml/data/datamodule.py
Created on: Wednesday March 16th, 2022
Created by: Jacob Alexander Rose
- Update (Wednesday March 24th, 2022)
- Refactored to make more modular BaseDataset and BaseDataModule, which Herbarium2022Dataset and Herbarium2022DataModule inherit, respectively.
- Update (Wednesday April 6th, 2022)
- Wonky refactor to add ExtantLeavesDataset and ExtantLeavesDataModule to definitions. Involed some blurring of abstractions -- will need to refactor the base class AbstractCatalogDataset.
"""
from dataclasses import dataclass, asdict, replace
import matplotlib.pyplot as plt
from icecream import ic
import jpeg4py as jpeg
import numpy as np
from omegaconf import DictConfig, OmegaConf
import os
import pandas as pd
from pathlib import Path
from PIL import Image
from rich import print as pp
import multiprocessing as mproc
import pytorch_lightning as pl
from sklearn import preprocessing
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms as T
import torchvision
from pytorch_lightning.utilities import rank_zero_only
from typing import *
# from imutils.big.make_train_val_splits import main as make_train_val_splits
from imutils.big.make_train_val_splits import main as make_train_val_splits
from imutils.big.split_catalog_utils import (check_already_built,
read_encoded_splits,
find_data_splits_dir)
# from imutils.big.transforms.image import (Preprocess,
from imutils.ml.aug.image.images import (instantiate_transforms,
Preprocess,
BatchTransform,
get_default_transforms,
DEFAULT_CFG as DEFAULT_TRANSFORM_CFG)
from imutils.ml.utils import label_utils, taxonomy_utils
__all__ = ["Herbarium2022DataModule",
"Herbarium2022Dataset",
"ExtantLeavesDataset",
"ExtantLeavesDataModule",
"get_default_transforms"]
import torch
def tensor_to_image(x: torch.Tensor) -> np.ndarray:
if x.ndim==3:
return x.permute(1,2,0)
return x.permute(0, 2, 3, 1)
def read_jpeg(path):
return jpeg.JPEG(path).decode()
def read_pil(path):
return Image.open(path)
def read_torchvision_img(path):
img=torchvision.io.read_image(path)
return img
default_reader = read_jpeg
IMAGE_READERS = {
"jpeg.JPEG":read_jpeg,
"PIL":read_pil,
"torchvision":read_torchvision_img,
"default":default_reader
}
@dataclass
class ExtantLeavesDatasetConfig:
catalog_dir: str="/media/data_cifs/projects/prj_fossils/users/jacob/data/leavesdb-v1_1/extant_leaves_family_3_512" #/splits/splits=(0.5,0.2,0.3)"
subset: str="train"
label_col: str="family"
x_col: str="path"
y_col: str="y"
id_col: str="catalog_number"
smallest_taxon_col: str="Species"
splits: Tuple[float]=(0.5,0.2,0.3)
shuffle: bool=True
seed: int=14
@dataclass
class ExtantLeavesDataModuleConfig:
catalog_dir: str="/media/data_cifs/projects/prj_fossils/users/jacob/data/leavesdb-v1_1/extant_leaves_family_3_512" #/splits/splits=(0.5,0.2,0.3)"
label_col: str="family"
splits: Tuple[float]=(0.5,0.2,0.3)
shuffle: bool=True
seed:int=14
batch_size: int=128
num_workers: int=4
pin_memory: bool=True
persistent_workers: Optional[bool]=False
transform_cfg: Optional["Config"]=None
to_grayscale: bool=False
num_channels: int=3
remove_transforms: bool=False
############################
############################
@dataclass
class Herbarium2022DatasetConfig:
catalog_dir: str="/media/data_cifs/projects/prj_fossils/data/raw_data/herbarium-2022-fgvc9_resize-512/catalogs" #/splits/train_size-0.8"
# catalog_dir: str="/media/data_cifs/projects/prj_fossils/data/raw_data/herbarium-2022-fgvc9_resize-512/catalogs"
subset: str="train"
label_col: str="scientificName"
x_col: str="path"
y_col: str="y"
id_col: str="image_id"
smallest_taxon_col: str="Species"
train_size: float=0.8
shuffle: bool=True
seed: int=14
@dataclass
class Herbarium2022DataModuleConfig:
catalog_dir: str="/media/data_cifs/projects/prj_fossils/data/raw_data/herbarium-2022-fgvc9_resize-512/catalogs" #/splits/train_size-0.8"
label_col: str="scientificName"
train_size: float=0.8
shuffle: bool=True
seed:int=14
batch_size: int=128
num_workers: int=4
pin_memory: bool=True
persistent_workers: Optional[bool]=False
transform_cfg: Optional["Config"]=None
to_grayscale: bool=False
num_channels: int=3
remove_transforms: bool=False
class AbstractCatalogDataset(Dataset):
def __init__(self,
label_col: str="scientificName",
x_col: str="path",
y_col: str="y",
id_col: str="image_id",
smallest_taxon_col: str="Species"):
super().__init__()
"""
TBD: document these function kwargs
"""
self.label_col = label_col
self.x_col = x_col
self.y_col = y_col
self.id_col = id_col
self.smallest_taxon_col = smallest_taxon_col
@property
def splits_dir(self) -> Path:
return find_data_splits_dir(source_dir=self.catalog_dir,
train_size=self.train_size)
@property
def split_file_path(self) -> Path:
"""
Should return the path of this subset's on-disk csv catalog file. I emphasize should.
"""
return self.splits_dir / f"{self.subset}_metadata.csv"
@property
def already_built(self) -> bool:
"""
[TODO] Make abstract, move implementation to subclasses
"""
return check_already_built(self.splits_dir, label_col=self.label_col)
def prepare_metadata(self):
"""
[TODO] Make abstract, move implementation to subclasses
"""
if not self.already_built:
data = make_train_val_splits(source_dir=self.catalog_dir,
save_dir=self.splits_dir,
label_col=self.label_col,
train_size=self.train_size,
seed=self.seed)
return data
def get_data_subset(self,
subset: str="train") -> Tuple["LabelEncoder", pd.DataFrame]:
"""
Read the selected data subset into a pd.DataFrame
Returns a Tuple containing an sklearn LabelEncoder and a pd.DataFrame of the subset's data catalog
"""
data = read_encoded_splits(source_dir=self.splits_dir,
include=[subset],
label_col=self.label_col)
encoder = data["label_encoder"]
data = data["subsets"][subset]
return encoder, data
def setup(self):
"""
Assigns the following instance attributes:
::self.label_encoder
::self.df
::self.paths
::self.targets
::self.num_classes
"""
data = self.prepare_metadata()
if data is None:
encoder, data = self.get_data_subset(subset=self.subset)
else:
encoder = data["label_encoder"]
data = data["subsets"][self.subset]
if isinstance(encoder, preprocessing.LabelEncoder):
"""
Auto wraps any sklearn LabelEncoder in our custom class.
(Added 2022-03-25 - untested)
"""
encoder = label_utils.LabelEncoder.from_sklearn(encoder)
setattr(data, "label_encoder", encoder)
self.label_encoder = encoder
self.df = data
if self.shuffle:
self.df = self.df.sample(frac=1, random_state=self.seed).reset_index(drop=False)
if self.id_col in self.df.columns:
self.image_ids = self.df[self.id_col]
self.paths = self.df[self.x_col]
if self.is_supervised:
# self.imgs =
self.targets = self.df[self.y_col]
self.num_classes = len(set(self.df[self.y_col]))
else:
self.targets = None
self.num_classes = -1 # Or 0?
# if getattr(self, "subset") == "train":
# self.setup_taxonomy_table(self.df,
# smallest_taxon_col=self.smallest_taxon_col)
def get_decoded_targets(self, with_image_ids: bool=True) -> Tuple[str, np.ndarray]:
assert self.is_supervised
return self.label_encoder.inv_transform(self.targets)
@property
def classes(self):
return self.label_encoder.classes
@property
def class2idx(self):
return self.label_encoder.class2idx
def setup_taxonomy_table(self,
df: pd.DataFrame=None,
smallest_taxon_col: str="Species",
taxonomy: taxonomy_utils.TaxonomyLookupTable=None):
if isinstance(taxonomy, taxonomy_utils.TaxonomyLookupTable):
self.taxonomy = taxonomy
else:
self.taxonomy = taxonomy_utils.TaxonomyLookupTable(df=df,
smallest_taxon_col=smallest_taxon_col)
class BaseDataset(AbstractCatalogDataset):
catalog_dir: str = os.path.abspath("./data")
def __init__(self,
catalog_dir: Optional[str]=None,
subset: str="train",
label_col: str="scientificName",
x_col: str="path",
y_col: str="y",
id_col: str="image_id",
smallest_taxon_col: str="Species",
train_size: float=0.8,
shuffle: bool=True,
seed: int=14,
image_reader: Union[Callable,str]="default",
preprocess: Callable=None,
transform: Callable=None,
output_image_type = torch.Tensor,
output_image_range: Tuple[Any]=(0,1)):
"""
Arguments:
catalog_dir: Optional[str]=None,
subset: str="train",
label_col: str="scientificName",
Column containing the fully decoded str labels
train_size: float=0.7,
shuffle: bool=True,
seed: int=14,
image_reader: Union[Callable,str]="default", #Image.open,
preprocess: Callable=None,
transform: Callable=None
"""
super().__init__(
label_col=label_col,
x_col=x_col,
y_col=y_col,
id_col=id_col,
smallest_taxon_col=smallest_taxon_col)
self.catalog_dir = catalog_dir or self.catalog_dir
self.train_size = train_size
self.shuffle = shuffle
self.seed = seed
self.subset = subset
self.is_supervised = bool(subset != "test")
self.set_image_reader(image_reader)
self.preprocess = preprocess
self.transform = transform
self.output_image_type = output_image_type
self.output_image_range = output_image_range
# self.setup()
# self.cfg = self.get_cfg()
@classmethod
def from_cfg(cls,
cfg: DictConfig,
**kwargs):
cfg = OmegaConf.merge(cfg, kwargs)
return cls(**cfg)
def get_cfg(self,
cfg: DictConfig=None,
**kwargs):
cfg=cfg or {}
default_cfg = DictConfig(dict(
catalog_dir=self.catalog_dir or None,
subset=self.subset or "train",
label_col=self.label_col or "scientificName",
train_size=self.train_size or 0.7,
shuffle=self.shuffle,
seed=self.seed or 14))
cfg = OmegaConf.merge(default_cfg, cfg, kwargs)
return cfg
def __len__(self):
return len(self.df)
def set_image_reader(self,
reader: Union[Callable,str]) -> None:
if isinstance(reader, str):
if reader not in IMAGE_READERS:
print(f"specified image_reader is invalid, using default jpeg.JPEG")
reader = IMAGE_READERS.get(reader, IMAGE_READERS["default"])
elif not isinstance(reader, Callable):
raise InvalidArgument
self.reader = reader
def parse_output_image(self, img: Any):
if isinstance(img, self.output_image_type):
return img
if self.output_image_type == torch.Tensor:
if isinstance(img, np.ndarray):
if img.dtype == "uint8":
img = img.astype("float32")
if np.allclose(img.max(), 255.0):
img = img / 255.0
return torch.from_numpy(img).permute(2,0,1)
elif isinstance(img, PIL.Image.Image):
return T.ToTensor()(img)
elif self.output_image_type == np.ndarray:
if isinstance(img, torch.Tensor):
img = img.permute(1,2,0).numpy()
elif isinstance(img, PIL.Image.Image):
img = np.array(img)
if self.output_image_range == (0,1):
if img.dtype == "uint8":
img = img.astype("float32")
if np.allclose(img.max(), 255.0):
img = img / 255.0
return img
else:
raise Exception(f"Warning, parse_output_image received unexpected image of type {type(img)=}")
def parse_sample(self, index: int):
return self.df.iloc[index, :]
def fetch_item(self, index: int) -> Tuple[str]:
"""
Returns identically-structured namedtuple as __getitem__, with the following differences:
- PIL Image (or raw bytes) as returned by self.reader function w/o any transforms
vs.
torch.Tensor after all transformssx
- target text label vs, target int label
- image path
- image catalog_number
"""
sample = self.parse_sample(index)
path = getattr(sample, self.x_col)
image_id = getattr(sample, self.id_col)
image = self.reader(path)
metadata={
"path":path,
"image_id":image_id
# "catalog_number":catalog_number
}
label = -1
if self.is_supervised:
label = getattr(sample, self.y_col, -1)
return image, label, metadata
def __getitem__(self, index: int):
image, label, metadata = self.fetch_item(index)
# if self.preprocess is not None:
# image = self.preprocess(image)
if self.transform is not None:
image = self.transform(image = image)
image = image["image"]
image = self.parse_output_image(image)
return image, label, metadata
class BaseDataModule(pl.LightningDataModule):
dataset_cls = None #Herbarium2022Dataset
train_dataset = None
val_dataset = None
test_dataset = None
train_transform = None
val_transform = None
test_transform = None
transform_cfg = DEFAULT_TRANSFORM_CFG
# def __init__(self,
# catalog_dir: Optional[str]=None,
# label_col="scientificName",
# train_size=0.7,
# shuffle: bool=True,
# seed=14,
# batch_size: int = 128,
# num_workers: int = None,
# pin_memory: bool=True,
# persistent_workers: Optional[bool]=False,
# train_transform=None,
# val_transform=None,
# test_transform=None,
# transform_cfg=None,
# remove_transforms: bool=False,
# image_reader: Callable="default", #Image.open,
# **kwargs
# ):
# super().__init__()
# self.catalog_dir = catalog_dir
# self.label_col = label_col
# self.train_size = train_size
# self.shuffle = shuffle
# self.seed = seed
# self.batch_size = batch_size
# self.num_workers = num_workers if num_workers is not None else mproc.cpu_count()
# self.pin_memory = pin_memory
# self.persistent_workers = persistent_workers
# self.image_reader = image_reader
# self.setup_transforms(transform_cfg=transform_cfg,
# train_transform=train_transform,
# val_transform=val_transform,
# test_transform=test_transform,
# remove_transforms=remove_transforms)
# self.cfg = self.get_cfg()
# self.kwargs = kwargs
# @classmethod
# def from_cfg(cls,
# cfg: DictConfig,
# **kwargs):
# cfg = OmegaConf.merge(cfg, kwargs)
# return cls(**cfg)
# def get_cfg(self,
# cfg: DictConfig=None,
# **kwargs):
# cfg=cfg or {}
# default_cfg = DictConfig(dict(
# catalog_dir=self.catalog_dir or None,
# label_col=self.label_col or "scientificName",
# train_size=self.train_size or 0.7,
# shuffle=self.shuffle,
# seed=self.seed or 14,
# batch_size = self.batch_size or 128,
# num_workers = self.num_workers or None,
# pin_memory=self.pin_memory,
# transform_cfg=self.transform_cfg,
# remove_transforms=self.remove_transforms,
# ))
# cfg = OmegaConf.merge(default_cfg, cfg, kwargs)
# return cfg
def prepare_data(self):
pass
def setup(self, stage=None):
raise NotImplementedError
def setup_transforms(self,
transform_cfg: dict=None,
train_transform=None,
val_transform=None,
test_transform=None,
remove_transforms: bool=False):
transform_cfg = transform_cfg or {}
self.transform_cfg = OmegaConf.merge(self.transform_cfg, transform_cfg)
self.remove_transforms = remove_transforms
if self.remove_transforms:
for subset in ["train", "val", "test"]:
setattr(self, f"{subset}_transform", None)
if self.get_dataset(subset) is not None:
self.get_dataset(subset).transform = None
return
else:
# print("self.transform_cfg:"); pp(self.transform_cfg)
self.train_transform = (
instantiate_transforms(cfg=self.transform_cfg.train, to_grayscale=self.to_grayscale, num_output_channels=self.num_channels, verbose=False)
if train_transform is None else train_transform
)
self.val_transform = (
instantiate_transforms(cfg=self.transform_cfg.val, to_grayscale=self.to_grayscale, num_output_channels=self.num_channels, verbose=False)
if val_transform is None else val_transform
)
self.test_transform = (
instantiate_transforms(cfg=self.transform_cfg.test, to_grayscale=self.to_grayscale, num_output_channels=self.num_channels, verbose=False)
if test_transform is None else test_transform
)
for subset in ["train", "val", "test"]:
if self.get_dataset(subset) is not None:
# Replace the existing transforms on any already setup datasets
self.get_dataset(subset).transform = getattr(self, f"{subset}_transform")
def set_image_reader(self,
reader: Callable) -> None:
"""
Pass in a callable that reads image data from disk,
which is assigned to each of this datamodule's datasets, respectively.
"""
for data in [self.train_dataset, self.val_dataset, self.test_dataset]:
if data is None:
continue
data.set_image_reader(reader)
def setup_taxonomy_table(self,
df: pd.DataFrame=None,
smallest_taxon_col: str="Species",
taxonomy: taxonomy_utils.TaxonomyLookupTable=None):
if isinstance(taxonomy, taxonomy_utils.TaxonomyLookupTable):
self.taxonomy = taxonomy
else:
self.taxonomy = taxonomy_utils.TaxonomyLookupTable(df=df,
smallest_taxon_col=smallest_taxon_col)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=True,
pin_memory=self.pin_memory,
persistent_workers=self.persistent_workers
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,#*2,
num_workers=self.num_workers,
shuffle=False,
pin_memory=self.pin_memory,
persistent_workers=self.persistent_workers
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.batch_size,#*2,
num_workers=self.num_workers,
shuffle=False,
pin_memory=self.pin_memory
)
def get_dataloader(self,
subset:str="train"):
if subset == "train":
return self.train_dataloader()
elif subset == "val":
return self.val_dataloader()
elif subset == "test":
return self.test_dataloader()
else:
return None
def get_dataset(self,
subset:str="train"):
if subset == "train":
return self.train_dataset
elif subset == "val":
return self.val_dataset
elif subset == "test":
return self.test_dataset
else:
return None
@property
def num_classes(self) -> int:
assert self.train_dataset and self.val_dataset
return max(self.train_dataset.num_classes, self.val_dataset.num_classes)
def num_samples(self,
subset: str="train"):
return len(self.get_dataset(subset=subset))
def num_batches(self,
subset: str="train"):
return len(self.get_dataloader(subset=subset))
def get_dataset_size(self,
subset: str="train",
verbose: bool=False):
num_samples = self.num_samples(subset) # len(datamodule.get_dataset(subset=subset))
num_batches = self.num_batches(subset) # len(datamodule.get_dataloader(subset=subset))
if verbose:
# print(f"{subset} --> (num_samples: {num_samples:,}), (num_batches: {num_batches:,})")
rank_zero_only(ic)(subset, num_samples, num_batches, self.num_classes, self.batch_size)
return num_samples, num_batches
def show_batch(self, batch_idx: int=0, nrow: int=4, figsize=(10, 10)):
def _to_vis(data):
return tensor_to_image(torchvision.utils.make_grid(data, nrow=nrow, normalize=True))
transform_cfg = self.transform_cfg
# get a batch from the training set: try with `val_datlaoader` :)
# train_transform = self.train_transform
bsz = self.batch_size
indices = list(range(batch_idx*bsz, (batch_idx+1)*bsz))
self.setup_transforms(remove_transforms=True)
imgs = [self.train_dataset[i][0] for i in indices]
self.setup_transforms(transform_cfg=self.transform_cfg)
imgs_aug = [self.train_dataset[i][0] for i in indices]
# imgs_aug = train_transform(imgs.numpy()) # apply transforms
# use matplotlib to visualize
fig, ax = plt.subplots(1,2, figsize = (2*figsize[0], figsize[1]))
ax[0].set_title("image")
ax[1].set_title("aug image")
ax[1].set_yticklabels([])
ax[1].set_facecolor('#eafff5')
# plt.figure(figsize=figsize)
ax[0].imshow(_to_vis(imgs))
# plt.figure(figsize=win_size)
ax[1].imshow(_to_vis(imgs_aug))
plt.tight_layout(w_pad=0.05)
##############################
##############################
class Herbarium2022Dataset(BaseDataset):
catalog_dir: str="/media/data_cifs/projects/prj_fossils/data/raw_data/herbarium-2022-fgvc9_resize-512/catalogs" #/splits/train_size-0.8"
default_cfg: Herbarium2022DatasetConfig = Herbarium2022DatasetConfig(catalog_dir=catalog_dir)
def __init__(self,
catalog_dir: Optional[str]=None,
subset: str="train",
label_col: str="scientificName",
x_col: str="path",
y_col: str="y",
id_col: str="image_id",
smallest_taxon_col: str="Species",
train_size: float=0.8,
shuffle: bool=True,
seed: int=14,
image_reader: Union[Callable,str]="default", #Image.open,
preprocess: Callable=None,
transform: Callable=None,
output_image_type = torch.Tensor):
"""
Arguments:
catalog_dir: Optional[str]=None,
subset: str="train",
label_col: str="family",
Column containing the fully decoded str labels
splits: float=(0.5,0.2,0.3),
shuffle: bool=True,
seed: int=14,
image_reader: Union[Callable,str]="default", #Image.open,
preprocess: Callable=None,
transform: Callable=None
"""
# self.x_col = "path"
# self.y_col = "y"
# self.id_col = "catalog_number"
super().__init__(catalog_dir=catalog_dir,
subset=subset,
label_col=label_col,
x_col=x_col,
y_col=y_col,
id_col=id_col,
smallest_taxon_col=smallest_taxon_col,
train_size=train_size,
shuffle=shuffle,
seed=seed,
image_reader=image_reader,
preprocess=preprocess,
transform=transform,
output_image_type=output_image_type)
self.is_supervised = bool(subset != "test")
self.setup()
self.cfg = self.get_cfg()
@classmethod
def from_cfg(cls,
cfg: Union[Herbarium2022DatasetConfig, DictConfig],
**kwargs):
if isinstance(cfg, Herbarium2022DatasetConfig):
cfg = asdict(cfg)
elif isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
cfg = replace(self.default_cfg, **cfg)
return cls(**cfg)
def get_cfg(self, as_dict: bool=False) -> Herbarium2022DatasetConfig:
cfg = replace(self.default_cfg,
catalog_dir=self.catalog_dir,
subset=self.subset,
label_col=self.label_col,
x_col=self.x_col,
y_col=self.y_col,
id_col=self.id_col,
smallest_taxon_col=self.smallest_taxon_col,
train_size=self.train_size,
shuffle=self.shuffle,
seed=self.seed)
if as_dict:
return asdict(cfg)
return cfg
class Herbarium2022DataModule(BaseDataModule):
catalog_dir: str="/media/data_cifs/projects/prj_fossils/data/raw_data/herbarium-2022-fgvc9_resize-512/catalogs" #/splits/train_size-0.8"
dataset_cls = Herbarium2022Dataset
transform_cfg = DEFAULT_TRANSFORM_CFG
default_cfg: Herbarium2022DataModuleConfig = Herbarium2022DataModuleConfig(catalog_dir=catalog_dir)
def __init__(self,
catalog_dir: Optional[str]=None,
label_col="scientificName",
train_size=0.8,
smallest_taxon_col: str="Species",
shuffle: bool=True,
seed=14,
batch_size: int = 128,
num_workers: int = None,
pin_memory: bool=True,
persistent_workers: Optional[bool]=False,
train_transform=None,
val_transform=None,
test_transform=None,
transform_cfg=None,
to_grayscale: bool=False,
num_channels: int=3,
remove_transforms: bool=False,
image_reader: Callable="default", #Image.open,
**kwargs
):
super().__init__()
self.catalog_dir = catalog_dir or self.catalog_dir
self.label_col = label_col
self.train_size = train_size
self.shuffle = shuffle
self.seed = seed
self.batch_size = batch_size
self.num_workers = num_workers if num_workers is not None else mproc.cpu_count()
self.pin_memory = pin_memory
self.persistent_workers = persistent_workers
self.image_reader = image_reader
self.to_grayscale = to_grayscale
self.num_channels = num_channels
self.smallest_taxon_col = smallest_taxon_col
self.setup_transforms(transform_cfg=transform_cfg,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
remove_transforms=remove_transforms)
self.setup()
self.cfg = self.get_cfg()
self.kwargs = kwargs
def setup(self, stage="fit"):
subsets=[]
if stage in ["train", "fit", "all", None]:
self.train_dataset = self.dataset_cls(catalog_dir=self.catalog_dir,
subset="train",
label_col=self.label_col,
train_size=self.train_size,
shuffle=self.shuffle,
seed=self.seed,
transform=self.train_transform)
self.setup_taxonomy_table(df=self.train_dataset.df,
smallest_taxon_col=self.label_col)
subsets.append("train")
if stage in ["val", "fit", "all", None]:
self.val_dataset = self.dataset_cls(catalog_dir=self.catalog_dir,
subset="val",
label_col=self.label_col,
train_size=self.train_size,
shuffle=self.shuffle,
seed=self.seed,
transform=self.val_transform)
subsets.append("val")
if stage in ["test", "all", None]:
self.test_dataset = self.dataset_cls(catalog_dir=self.catalog_dir,
subset="test",
label_col=self.label_col,
train_size=self.train_size,
shuffle=self.shuffle,
seed=self.seed,
transform=self.test_transform)
subsets.append("test")
for s in subsets:
self.get_dataset_size(subset=s,
verbose=True)
self.set_image_reader(self.image_reader)
self.setup_taxonomy_table(
df=self.train_dataset.df,
smallest_taxon_col=self.smallest_taxon_col)
@classmethod
def from_cfg(cls,
cfg: Union[ExtantLeavesDataModuleConfig, DictConfig],
**kwargs):
if isinstance(cfg, ExtantLeavesDatasetConfig):
cfg = asdict(cfg)
elif isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
cfg = replace(self.default_cfg, **cfg)
return cls(**cfg)
def get_cfg(self, as_dict: bool=False) -> ExtantLeavesDataModuleConfig:
cfg = replace(self.default_cfg,
catalog_dir=self.catalog_dir,
label_col=self.label_col,
train_size=self.train_size,
shuffle=self.shuffle,
seed=self.seed,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
persistent_workers=self.persistent_workers,
transform_cfg=self.transform_cfg,
to_grayscale=self.to_grayscale,
num_channels=self.num_channels,
remove_transforms=self.remove_transforms)
if as_dict:
return asdict(cfg)
return cfg
#################
##################
from imutils.big import make_train_val_test_splits as leavesdb_utils
from imutils.big.make_train_val_test_splits import main as make_train_val_test_splits
class ExtantLeavesDataset(BaseDataset):
catalog_dir: str = "/media/data_cifs/projects/prj_fossils/users/jacob/data/leavesdb-v1_1/Extant_Leaves_family_10_512/splits/splits=(0.5,0.2,0.3)"
default_cfg: ExtantLeavesDatasetConfig = ExtantLeavesDatasetConfig(catalog_dir=catalog_dir)
def __init__(self,
catalog_dir: Optional[str]=None,
subset: str="train",
label_col: str="family",
x_col: str="path",
y_col: str="y",
id_col: str="catalog_number",
smallest_taxon_col: str="Species",
splits: float=(0.5,0.2,0.3),
shuffle: bool=True,
seed: int=14,
image_reader: Union[Callable,str]="default", #Image.open,
preprocess: Callable=None,
transform: Callable=None,
output_image_type = torch.Tensor):
"""
Arguments:
catalog_dir: Optional[str]=None,
subset: str="train",
label_col: str="family",
Column containing the fully decoded str labels
splits: float=(0.5,0.2,0.3),
shuffle: bool=True,
seed: int=14,
image_reader: Union[Callable,str]="default", #Image.open,
preprocess: Callable=None,
transform: Callable=None
"""
# self.x_col = "path"
# self.y_col = "y"
# self.id_col = "catalog_number"
self.splits = splits
self.smallest_taxon_col="Species"
super().__init__(catalog_dir=catalog_dir,
subset=subset,
label_col=label_col,
x_col=x_col,
y_col=y_col,
id_col=id_col,
smallest_taxon_col=smallest_taxon_col,
train_size=None,
shuffle=shuffle,
seed=seed,
image_reader=image_reader,
preprocess=preprocess,
transform=transform,
output_image_type=output_image_type)
# super().__init__()
# self.x_col = "path"
# self.y_col = "y"
# self.id_col = "catalog_number"
# self.catalog_dir = catalog_dir or self.catalog_dir
# self.label_col = label_col
# self.splits = splits
# self.shuffle = shuffle
# self.seed = seed
# self.subset = subset
self.is_supervised = bool(subset != "test")
# self.set_image_reader(image_reader)
# self.preprocess = preprocess
# self.transform = transform
self.setup()
self.cfg = self.get_cfg()
@classmethod
def from_cfg(cls,
cfg: Union[ExtantLeavesDatasetConfig, DictConfig],
**kwargs):
if isinstance(cfg, ExtantLeavesDatasetConfig):
cfg = asdict(cfg)
elif isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
cfg = replace(self.default_cfg, **cfg)
return cls(**cfg)
def get_cfg(self, as_dict: bool=False) -> ExtantLeavesDatasetConfig:
cfg = replace(self.default_cfg,
catalog_dir=self.catalog_dir,
subset=self.subset,
label_col=self.label_col,
x_col=self.x_col,
y_col=self.y_col,
id_col=self.id_col,
smallest_taxon_col=self.smallest_taxon_col,
splits=self.splits,
shuffle=self.shuffle,
seed=self.seed)
if as_dict:
return asdict(cfg)
return cfg
@property
def splits_dir(self) -> Path:
return leavesdb_utils.find_data_splits_dir(source_dir=self.catalog_dir,
splits=self.splits)
@property
def split_file_path(self) -> Path:
"""
Should return the path of this subset's on-disk csv catalog file. I emphasize should.
"""
return self.splits_dir / f"{self.subset}_metadata.csv"
@property
def already_built(self) -> bool:
return leavesdb_utils.check_already_built(self.splits_dir, label_col=self.label_col)
def prepare_metadata(self):
if not self.already_built:
data = make_train_val_test_splits(source_dir=self.catalog_dir,
splits_dir=self.splits_dir,
label_col=self.label_col,
splits=args.splits,
seed=self.seed)
return data
def get_data_subset(self,
subset: str="train") -> Tuple["LabelEncoder", pd.DataFrame]:
"""
Read the selected data subset into a pd.DataFrame
Returns a Tuple containing an sklearn LabelEncoder and a pd.DataFrame of the subset's data catalog
"""
data = leavesdb_utils.read_encoded_splits(source_dir=self.splits_dir,
include=[subset],
label_col=self.label_col,
index_col=0)
encoder = data["label_encoder"]
data = data["subsets"][subset]
return encoder, data
# def setup(self):
# """
# Assigns the following instance attributes:
# ::self.label_encoder
# ::self.df
# ::self.paths
# ::self.targets
# ::self.num_classes
# """
# data = self.prepare_metadata()
# if data is None:
# encoder, data = self.get_data_subset(subset=self.subset)
# else:
# encoder = data["label_encoder"]
# data = data["subsets"][self.subset]
# if isinstance(encoder, preprocessing.LabelEncoder):
# """
# Auto wraps any sklearn LabelEncoder in our custom class.
# (Added 2022-03-25 - untested)
# """
# encoder = label_utils.LabelEncoder.from_sklearn(encoder)
# setattr(data, "label_encoder", encoder)
# self.label_encoder = encoder
# self.df = data
# if self.shuffle:
# self.df = self.df.sample(frac=1, random_state=self.seed).reset_index(drop=False)
# if self.id_col in self.df.columns:
# self.image_ids = self.df[self.id_col]
# self.paths = self.df[self.x_col]
# if self.is_supervised:
# # self.imgs =
# self.targets = self.df[self.y_col]
# self.num_classes = len(set(self.df[self.y_col]))
# else:
# self.targets = None
# self.num_classes = -1 # Or 0?
class ExtantLeavesDataModule(BaseDataModule):
catalog_dir: str="/media/data_cifs/projects/prj_fossils/users/jacob/data/leavesdb-v1_1/extant_leaves_family_3_512/splits/splits=(0.5,0.2,0.3)"
dataset_cls = ExtantLeavesDataset
transform_cfg = DEFAULT_TRANSFORM_CFG
default_cfg: ExtantLeavesDataModuleConfig = ExtantLeavesDataModuleConfig(catalog_dir=catalog_dir)
def __init__(self,
catalog_dir: Optional[str]=None,
label_col="family",
splits: Tuple[float]=(0.5,0.2,0.3),
smallest_taxon_col: str="Species",
shuffle: bool=True,
seed=14,
batch_size: int = 128,
num_workers: int = None,
pin_memory: bool=True,
persistent_workers: Optional[bool]=False,
train_transform=None,
val_transform=None,
test_transform=None,
transform_cfg=None,
to_grayscale: bool=False,
num_channels: int=3,
remove_transforms: bool=False,
image_reader: Callable="default", #Image.open,
**kwargs
):
super().__init__()
self.catalog_dir = catalog_dir or self.catalog_dir
self.label_col = label_col
self.splits = splits
self.shuffle = shuffle
self.seed = seed
self.batch_size = batch_size
self.num_workers = num_workers if num_workers is not None else mproc.cpu_count()
self.pin_memory = pin_memory
self.persistent_workers = persistent_workers
self.image_reader = image_reader
self.to_grayscale = to_grayscale
self.num_channels = num_channels
self.smallest_taxon_col = smallest_taxon_col
self.setup_transforms(transform_cfg=transform_cfg,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
remove_transforms=remove_transforms)
self.cfg = self.get_cfg()
self.setup()
self.kwargs = kwargs
@classmethod
def from_cfg(cls,
cfg: Union[ExtantLeavesDataModuleConfig, DictConfig],
**kwargs):
if isinstance(cfg, ExtantLeavesDatasetConfig):
cfg = asdict(cfg)
elif isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
cfg = replace(self.default_cfg, **cfg)
return cls(**cfg)
def get_cfg(self, as_dict: bool=False) -> ExtantLeavesDataModuleConfig:
cfg = replace(self.default_cfg,
catalog_dir=self.catalog_dir,
label_col=self.label_col,
splits=self.splits,
shuffle=self.shuffle,
seed=self.seed,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
persistent_workers=self.persistent_workers,
transform_cfg=self.transform_cfg,
remove_transforms=self.remove_transforms)
if as_dict:
return asdict(cfg)
return cfg
def setup(self, stage=None):
subsets=[]
if stage in ["train", "fit", "all", None]:
self.train_dataset = self.dataset_cls(catalog_dir=self.catalog_dir,
subset="train",
label_col=self.label_col,
splits=self.splits,
shuffle=self.shuffle,
seed=self.seed,
transform=self.train_transform)
self.setup_taxonomy_table(df=self.train_dataset.df,
smallest_taxon_col=self.smallest_taxon_col)
subsets.append("train")
if stage in ["val", "fit", "all", None]:
self.val_dataset = self.dataset_cls(catalog_dir=self.catalog_dir,
subset="val",
label_col=self.label_col,
splits=self.splits,
shuffle=self.shuffle,
seed=self.seed,
transform=self.val_transform)
subsets.append("val")
if stage in ["test", "all", None]:
self.test_dataset = self.dataset_cls(catalog_dir=self.catalog_dir,
subset="test",
label_col=self.label_col,
splits=self.splits,
shuffle=self.shuffle,
seed=self.seed,
transform=self.test_transform)
subsets.append("test")
# for s in subsets:
# self.get_dataset_size(subset=s,
# verbose=True)
self.set_image_reader(self.image_reader)
self.setup_taxonomy_table(df=self.train_dataset.df,
smallest_taxon_col=self.smallest_taxon_col)
# catalog_dir: str="/media/data_cifs/projects/prj_fossils/users/jacob/data/leavesdb-v1_1/Fossil_family_10_512/splits/splits=(0.5,0.2,0.3)"
@dataclass
class AutoDataModule:
"""
"""
# def __init__(self)
@classmethod
def from_config(self,
name: str):
if "Herbarium" in name:
["Herbarium2022DataModule",
"ExtantLeavesDataModule"]
|
import unittest
import math
# from IPython import embed
from skopt import space as sp
import numpy as np
from buster import sampler, metrics
class TestAdaptiveSampler(unittest.TestCase):
def test_ask_one_dimension_numeric(self):
X = [[10], [20], [30], [40], [50], [60], [70], [80], [90], [100]]
y = [False, False, False, False, False, True, True, True, True, True]
space = sp.Space([(0, 100)])
opt = sampler.AdaptiveSampler(space.dimensions, random_state=0)
opt.tell(X, y)
result = opt.ask()
expected = [[66], [53], [59], [64], [62], [58], [70], [51], [55], [68]]
# TODO: rework so that it doesn't change every time the algorithm is altered
np.testing.assert_array_equal(result, expected)
def test_ask_two_dimensions_mixed(self):
X = [[10, 'cat'], [20, 'cat'], [30, 'cat'], [40, 'cat'], [50, 'cat'],
[60, 'cat'], [70, 'cat'], [80, 'cat'], [90, 'cat'], [100, 'cat']]
y = [False, False, False, False, False, True, True, True, True, True]
space = sp.Space([(0, 100), ['cat', 'dog', 'rabbit']])
apt = sampler.AdaptiveSampler(space.dimensions, random_state=0)
apt.tell(X, y)
result = apt.ask()
expected = [[51, 'cat'], [60, 'dog'], [68, 'dog'], [77, 'rabbit'],
[54, 'rabbit'], [75, 'dog'], [79, 'cat'], [53, 'dog'],
[64, 'rabbit'], [42, 'rabbit'], [69, 'cat'], [58, 'rabbit'],
[50, 'cat'], [44, 'dog'], [72, 'dog'], [57, 'dog'], [47, 'dog'],
[63, 'dog'], [73, 'cat'], [44, 'dog']]
# TODO: rework so that it doesn't change every time the algorithm is altered
np.testing.assert_array_equal(result, expected)
def test_run_three_dimensions_mixed(self):
space = sp.Space([(0, 100), (0., 100.), ['cat', 'dog', 'rabbit']])
opt = sampler.AdaptiveSampler(space.dimensions,
random_state=1,
n_initial_points=1000)
def func(X):
def in_radius(c_x, c_y, r, x, y):
return math.hypot(c_x - x, c_y - y) <= r
answer = []
for x, y, a in X:
if a == 'rabbit':
answer.append(in_radius(50, 50, 20, x, y))
elif a == 'cat':
answer.append(in_radius(20, 20, 10, x, y))
elif a == 'dog':
answer.append(in_radius(60, 60, 30, x, y))
return answer
opt.run(func, n_iter=20)
result = opt.get_result()
# TODO: finish this test
class TestKLargestDiverseNeighborhood(unittest.TestCase):
def test_one_dimension_numeric(self):
X = [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]]
y = [False, False, False, False, False, True, True, True, True, True]
space = sp.Space([(0, 10)])
distances = metrics.gowers.gowers_distance(X, X, space)
result = sampler.k_largest_diverse_neighborhood(distances,
y,
n_neighbors=space.n_dims *
2,
k=2)
expected = np.array([[5, 4, 6], [4, 3, 5]])
np.testing.assert_array_equal(result, expected)
def test_one_dimension_categorical(self):
X = [['cat'], ['dog'], ['rabbit']]
y = [False, True, False]
space = sp.Space([['cat', 'dog', 'rabbit']])
distances = metrics.gowers.gowers_distance(X, X, space)
result = sampler.k_largest_diverse_neighborhood(distances,
y,
n_neighbors=space.n_dims *
2,
k=2)
expected = np.array([[1, 0, 2], [2, 1, 0]])
np.testing.assert_array_equal(result, expected)
if __name__ == '__main__':
unittest.main()
|
from django.core.management.base import BaseCommand
from django.db.models import QuerySet
from django.contrib.auth import get_user_model
from urls.models import Url,RecycleUrl
class Command(BaseCommand):
help = "Custom Command to Perform a Database Operation"
def handle(self, *args, **kwargs):
User = get_user_model()
blue_user = User.objects.get(username="blue")
QuerySet(model=Url).filter(creator=blue_user).filter(id__gt=144).delete()
QuerySet(model=RecycleUrl).filter(creator=blue_user).delete()
self.stdout.write("Deleteing those entries")
|
from diameter.AVP import AVP
from diameter.Error import InvalidAVPLengthError
import struct
class AVP_Unsigned64(AVP):
"A Diameter Unsigned64 AVP"
def __init__(self,code,value,vendor_id=0):
AVP.__init__(self,code,struct.pack("!Q",value),vendor_id)
def queryValue(self):
"""Returns the payload as a 64-bit unsigned value."""
return struct.unpack("!Q",self.payload)[0]
def setValue(self,value):
"""Sets the payload to the specified 64-bit unsigned value."""
self.payload = struct.pack("!Q",value)
def __str__(self):
return str(self.code) + ":" + str(self.queryValue())
@staticmethod
def narrow(avp):
"""Convert generic AVP to AVP_Unsigned64
Raises: InvalidAVPLengthError
"""
if len(avp.payload)!=8:
raise InvalidAVPLengthError(avp)
value = struct.unpack("!Q",avp.payload)[0]
a = AVP_Unsigned64(avp.code, value, avp.vendor_id)
a.flags = avp.flags
return a
def _unittest():
a = AVP_Unsigned64(1,17)
assert a.queryValue()==17
a.setValue(42)
assert a.queryValue()==42
a = AVP_Unsigned64.narrow(AVP(1," "))
assert a.queryValue()==0x2020202020202020
try:
a = AVP_Unsigned64.narrow(AVP(1," "))
assert False
except InvalidAVPLengthError:
pass
|
r""" Static order of nodes in dask graph
Dask makes decisions on what tasks to prioritize both
* Dynamically at runtime
* Statically before runtime
Dynamically we prefer to run tasks that were just made available. However when
several tasks become available at the same time we have an opportunity to break
ties in an intelligent way
d
|
b c
\ /
a
For example after we finish ``a`` we can choose to run either ``b`` or ``c``
next. Making small decisions like this can greatly affect our performance,
especially because the order in which we run tasks affects the order in which
we can release memory, which operationally we find to have a large affect on
many computation. We want to run tasks in such a way that we keep only a small
amount of data in memory at any given time.
Static Ordering
---------------
And so we create a total ordering over all nodes to serve as a tie breaker. We
represent this ordering with a dictionary mapping keys to integer values.
Lower scores have higher priority. These scores correspond to the order in
which a sequential scheduler would visit each node.
{'a': 0,
'c': 1,
'd': 2,
'b': 3}
There are several ways in which we might order our keys. This is a nuanced
process that has to take into account many different kinds of workflows, and
operate efficiently in linear time. We strongly recommend that readers look at
the docstrings of tests in dask/tests/test_order.py. These tests usually have
graph types laid out very carefully to show the kinds of situations that often
arise, and the order we would like to be determined.
Policy
------
Work towards *small goals* with *big steps*.
1. **Small goals**: prefer tasks whose final dependents have few dependencies.
We prefer to prioritize those tasks that help branches of computation that
can terminate quickly.
With more detail, we compute the total number of dependencies that each
task depends on (both its own dependencies, and the dependencies of its
dependencies, and so on), and then we choose those tasks that drive towards
results with a low number of total dependencies. We choose to prioritize
tasks that work towards finishing shorter computations first.
2. **Big steps**: prefer tasks with many dependents
However, many tasks work towards the same final dependents. Among those,
we choose those tasks with the most work left to do. We want to finish
the larger portions of a sub-computation before we start on the smaller
ones.
3. **Name comparison**: break ties with key name
Often graphs are made with regular keynames. When no other structural
difference exists between two keys, use the key name to break ties.
This relies on the regularity of graph constructors like dask.array to be a
good proxy for ordering. This is usually a good idea and a sane default.
"""
from math import log
from .core import get_dependencies, reverse_dict, get_deps, getcycle # noqa: F401
from .utils_test import add, inc # noqa: F401
def order(dsk, dependencies=None):
""" Order nodes in dask graph
This produces an ordering over our tasks that we use to break ties when
executing. We do this ahead of time to reduce a bit of stress on the
scheduler and also to assist in static analysis.
This currently traverses the graph as a single-threaded scheduler would
traverse it. It breaks ties in the following ways:
1. Begin at a leaf node that is a dependency of a root node that has the
largest subgraph (start hard things first)
2. Prefer tall branches with few dependents (start hard things first and
try to avoid memory usage)
3. Prefer dependents that are dependencies of root nodes that have
the smallest subgraph (do small goals that can terminate quickly)
Examples
--------
>>> dsk = {'a': 1, 'b': 2, 'c': (inc, 'a'), 'd': (add, 'b', 'c')}
>>> order(dsk)
{'a': 0, 'c': 1, 'b': 2, 'd': 3}
"""
if not dsk:
return {}
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k) for k in dsk}
dependents = reverse_dict(dependencies)
num_needed, total_dependencies = ndependencies(dependencies, dependents)
metrics = graph_metrics(dependencies, dependents, total_dependencies)
if len(metrics) != len(dsk):
cycle = getcycle(dsk, None)
raise RuntimeError(
"Cycle detected between the following keys:\n -> %s"
% "\n -> ".join(str(x) for x in cycle)
)
# Leaf nodes. We choose one--the initial node--for each weakly connected subgraph.
# Let's calculate the `initial_stack_key` as we determine `init_stack` set.
init_stack = {
# First prioritize large, tall groups, then prioritize the same as ``dependents_key``.
key: (
# at a high-level, work towards a large goal (and prefer tall and narrow)
-max_dependencies,
num_dependents - max_heights,
# tactically, finish small connected jobs first
min_dependencies,
num_dependents - min_heights, # prefer tall and narrow
-total_dependents, # take a big step
# try to be memory efficient
num_dependents,
# tie-breaker
StrComparable(key),
)
for key, num_dependents, (
total_dependents,
min_dependencies,
max_dependencies,
min_heights,
max_heights,
) in (
(key, len(dependents[key]), metrics[key])
for key, val in dependencies.items()
if not val
)
}
# `initial_stack_key` chooses which task to run at the very beginning.
# This value is static, so we pre-compute as the value of this dict.
initial_stack_key = init_stack.__getitem__
def dependents_key(x):
""" Choose a path from our starting task to our tactical goal
This path is connected to a large goal, but focuses on completing a small goal.
"""
num_dependents = len(dependents[x])
total_dependents, min_dependencies, _, min_heights, _ = metrics[x]
return (
# tactically, finish small connected jobs first
min_dependencies,
num_dependents - min_heights, # prefer tall and narrow
-total_dependents, # take a big step
# try to be memory efficient
num_dependents - len(dependencies[x]) + num_needed[x],
num_dependents,
total_dependencies[x], # already found work, so don't add more
# tie-breaker
StrComparable(x),
)
def dependencies_key(x):
""" Choose which dependency to run as part of a reverse DFS
This is very similar to both ``initial_stack_key`` and ``dependents_key``.
"""
num_dependents = len(dependents[x])
(
total_dependents,
min_dependencies,
max_dependencies,
min_heights,
max_heights,
) = metrics[x]
# Prefer short and narrow instead of tall in narrow, because we're going in
# reverse along dependencies.
return (
# at a high-level, work towards a large goal (and prefer short and narrow)
-max_dependencies,
num_dependents + max_heights,
# tactically, finish small connected jobs first
min_dependencies,
num_dependents + min_heights, # prefer short and narrow
-total_dependencies[x], # go where the work is
# try to be memory efficient
num_dependents - len(dependencies[x]) + num_needed[x],
num_dependents,
total_dependents, # already found work, so don't add more
# tie-breaker
StrComparable(x),
)
result = {}
i = 0
# Nodes in the current weakly connected subgraph that are not part of the current
# DFS along dependencies. The next DFS will begin from these nodes. `outer_stack`
# is populated from `init_stack` and the dependents of `outer_stack_seeds`.
outer_stack = []
# Nodes get added to this as they complete. We will populate `outer_stack` from
# dependents of these nodes. We consider nodes using a FIFO policy (yes, we could
# have used collections.deque, but a list with the current index is faster).
# Note that this does not include nodes with no dependents!
outer_stack_seeds = []
outer_stack_seeds_index = 0
# Used to perform a DFS along dependencies. Once emptied (when traversing dependencies),
# this continues (along dependents) along a path from the initial (leaf) node down to a
# root node (a tactical goal). This ensures this root node will get calculated before
# we consider any other dependents from `outer_stack_seeds`.
inner_stack = []
# aliases for speed
outer_stack_append = outer_stack.append
outer_stack_pop = outer_stack.pop
outer_stack_extend = outer_stack.extend
outer_stack_seeds_append = outer_stack_seeds.append
inner_stack_append = inner_stack.append
inner_stack_pop = inner_stack.pop
inner_stack_extend = inner_stack.extend
set_difference = set.difference
is_init_sorted = False
item = min(init_stack, key=initial_stack_key)
while True:
outer_stack_append(item)
while outer_stack:
item = outer_stack_pop()
if item not in result:
inner_stack_append(item)
while inner_stack:
# Perform a DFS along dependencies until we complete our tactical goal
item = inner_stack_pop()
if item in result:
continue
if num_needed[item]:
inner_stack_append(item)
deps = set_difference(dependencies[item], result)
if 1 < len(deps) < 1000:
inner_stack_extend(
sorted(deps, key=dependencies_key, reverse=True)
)
else:
inner_stack_extend(deps)
continue
result[item] = i
i += 1
deps = dependents[item]
if metrics[item][3] == 2: # min_height
# Don't leave any dangling single nodes! Finish all dependents that are
# ready and are also root nodes. Doing this here also lets us continue
# down to a different root node.
finish_now = {
dep
for dep in deps
if not dependents[dep] and num_needed[dep] == 1
}
if finish_now:
deps -= finish_now
if len(finish_now) > 1:
finish_now = sorted(finish_now, key=dependents_key)
for dep in finish_now:
result[dep] = i
i += 1
if deps:
for dep in deps:
num_needed[dep] -= 1
if not inner_stack:
# Continue towards our tactical goal (an easy-to-compute root)
if len(deps) == 1:
inner_stack_extend(deps)
else:
# If there are many, many dependents, then calculating
# `dependents_key` here could be relatively expensive.
# However, this is still probably worth it tactically.
inner_stack_append(min(deps, key=dependents_key))
outer_stack_seeds_append(item)
else:
# Our next starting point will be "seeded" by a completed task in a
# FIFO manner. When our DFS with `inner_stack` is finished--which
# means we computed our tactical goal--we will choose our next starting
# point from the dependents of completed tasks. However, it is too
# expensive to consider all dependents of all completed tasks, so we
# consider the dependents of tasks in the order they complete.
outer_stack_seeds_append(item)
while not outer_stack and outer_stack_seeds_index < len(outer_stack_seeds):
# We wait for as long as possible to consider dependents of completed nodes:
# 1. some may have already completed, so waiting lets us sort fewer items, and
# 2. sorting via `dependents_key` depends on `num_needed`, which is dynamic.
deps = set_difference(
dependents[outer_stack_seeds[outer_stack_seeds_index]], result
)
if deps:
if 1 < len(deps) < 1000:
outer_stack_extend(
sorted(deps, key=dependents_key, reverse=True)
)
else:
outer_stack_extend(deps)
outer_stack_seeds_index += 1
if len(dependencies) == len(result):
break # all done!
# We just finished computing a connected group.
# Let's choose the first `item` in the next group to compute.
# If we have few large groups left, then it's best to find `item` by taking a minimum.
# If we have many small groups left, then it's best to sort.
# If we have many tiny groups left, then it's best to simply iterate.
if not is_init_sorted:
prev_len = len(init_stack)
if type(init_stack) is dict:
init_stack = set(init_stack)
init_stack = set_difference(init_stack, result)
N = len(init_stack)
m = prev_len - N
# is `min` likely better than `sort`?
if m >= N or N + (N - m) * log(N - m) < N * log(N):
item = min(init_stack, key=initial_stack_key)
continue
if len(init_stack) < 10000:
init_stack = sorted(init_stack, key=initial_stack_key, reverse=True)
else:
init_stack = list(init_stack)
init_stack_pop = init_stack.pop
is_init_sorted = True
item = init_stack_pop()
while item in result:
item = init_stack_pop()
return result
def graph_metrics(dependencies, dependents, total_dependencies):
r""" Useful measures of a graph used by ``dask.order.order``
Example DAG (a1 has no dependencies; b2 and c1 are root nodes):
c1
|
b1 b2
\ /
a1
For each key we return:
1. The number of keys that can only be run after this key is run. The
root nodes have value 1 while deep child nodes will have larger values.
1
|
2 1
\ /
4
2. The minimum value of the total number of dependencies of
all final dependents (see module-level comment for more).
In other words, the minimum of ``ndependencies`` of root
nodes connected to the current node.
3
|
3 2
\ /
2
3. The maximum value of the total number of dependencies of
all final dependents (see module-level comment for more).
In other words, the maximum of ``ndependencies`` of root
nodes connected to the current node.
3
|
3 2
\ /
3
4. The minimum height from a root node
1
|
2 1
\ /
2
5. The maximum height from a root node
1
|
2 1
\ /
3
Examples
--------
>>> dsk = {'a1': 1, 'b1': (inc, 'a1'), 'b2': (inc, 'a1'), 'c1': (inc, 'b1')}
>>> dependencies, dependents = get_deps(dsk)
>>> _, total_dependencies = ndependencies(dependencies, dependents)
>>> metrics = graph_metrics(dependencies, dependents, total_dependencies)
>>> sorted(metrics.items())
[('a1', (4, 2, 3, 2, 3)), ('b1', (2, 3, 3, 2, 2)), ('b2', (1, 2, 2, 1, 1)), ('c1', (1, 3, 3, 1, 1))]
Returns
-------
metrics: Dict[key, Tuple[int, int, int, int, int]]
"""
result = {}
num_needed = {k: len(v) for k, v in dependents.items() if v}
current = []
current_pop = current.pop
current_append = current.append
for key, deps in dependents.items():
if not deps:
val = total_dependencies[key]
result[key] = (1, val, val, 1, 1)
for child in dependencies[key]:
num_needed[child] -= 1
if not num_needed[child]:
current_append(child)
while current:
key = current_pop()
parents = dependents[key]
if len(parents) == 1:
(parent,) = parents
(
total_dependents,
min_dependencies,
max_dependencies,
min_heights,
max_heights,
) = result[parent]
result[key] = (
1 + total_dependents,
min_dependencies,
max_dependencies,
1 + min_heights,
1 + max_heights,
)
else:
(
total_dependents,
min_dependencies,
max_dependencies,
min_heights,
max_heights,
) = zip(*(result[parent] for parent in dependents[key]))
result[key] = (
1 + sum(total_dependents),
min(min_dependencies),
max(max_dependencies),
1 + min(min_heights),
1 + max(max_heights),
)
for child in dependencies[key]:
num_needed[child] -= 1
if not num_needed[child]:
current_append(child)
return result
def ndependencies(dependencies, dependents):
""" Number of total data elements on which this key depends
For each key we return the number of tasks that must be run for us to run
this task.
Examples
--------
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> num_dependencies, total_dependencies = ndependencies(dependencies, dependents)
>>> sorted(total_dependencies.items())
[('a', 1), ('b', 2), ('c', 3)]
Returns
-------
num_dependencies: Dict[key, int]
total_dependencies: Dict[key, int]
"""
num_needed = {k: len(v) for k, v in dependencies.items()}
num_dependencies = num_needed.copy()
current = []
current_pop = current.pop
current_append = current.append
result = {k: 1 for k, v in dependencies.items() if not v}
for key in result:
for parent in dependents[key]:
num_needed[parent] -= 1
if not num_needed[parent]:
current_append(parent)
while current:
key = current_pop()
result[key] = 1 + sum(result[child] for child in dependencies[key])
for parent in dependents[key]:
num_needed[parent] -= 1
if not num_needed[parent]:
current_append(parent)
return num_dependencies, result
class StrComparable(object):
""" Wrap object so that it defaults to string comparison
When comparing two objects of different types Python fails
>>> 'a' < 1 # doctest: +SKIP
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
This class wraps the object so that, when this would occur it instead
compares the string representation
>>> StrComparable('a') < StrComparable(1)
False
"""
__slots__ = ("obj",)
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
return self.obj < other.obj
except Exception:
return str(self.obj) < str(other.obj)
|
import sys
import datetime
from traceback import format_exception_only
import string
import random
import traceback
from sqlalchemy import or_, desc
from flask import (Blueprint, jsonify, request, current_app)
from sqlalchemy.exc import IntegrityError
from sqlalchemy import func
from mail import EmailTemplate, sendmail
from model import (db, User, Boat, unique_constraint_key, not_null_constraint_key, unique_constraint_error)
from auth import ( make_digest, generate_token, generate_id_token,authenticate, compare_digest, authenticateAdmin)
from validators import(validate_boats, users_validate_required)
from datetime import datetime, timedelta
users = Blueprint('users', __name__)
@users.route('/api/users', methods=['GET'])
@authenticate
def getUsers(reqUser):
try:
term = request.args.get("search_term")
term_date = request.args.get("search_date")
users = []
if term:
users = User.query.filter(User.status != 'deleted').\
filter( or_ (User.firstname.like("%" + term + "%"),
User.lastname.like("%" + term + "%"),
User.email.like("%" + term + "%"),
func.date(User.created_at) == datetime.strptime(term_date, "%Y-%m-%d"))).\
order_by(desc(User.id)).\
all()
else:
users = User.query.filter(User.status != 'deleted').\
order_by(desc(User.id)).\
all()
return jsonify([user.json() for user in users])
except Exception:
traceback.print_exc()
return jsonify(error='Invalid JSON.'), 400
@users.route('/api/users/count', methods=['GET'])
@authenticate
def get_count_users(reqUser):
data = db.session.query(func.count(User.id)).scalar()
return jsonify(data)
@users.route('/api/users/boats')
@authenticate
def getBoats(reqUser):
boats = reqUser.boats.all()
boatJsn = []
for boat in boats:
boatJsn.append(boat.json())
return jsonify(boatJsn)
@users.route('/api/users', methods=['POST'])
def post_users():
# Signup/Register
try:
payload = request.get_json()
# current_app.logger.debug(payload)
except Exception:
return jsonify(error='Invalid JSON.')
try:
if not payload.get('username', None):
payload['username'] = payload.get('email')
validation = users_validate_required(payload)
if validation['errors']:
return jsonify(error={'name': 'invalid_model',
'errors': validation['errors']}), 400
boats = payload.get('boats', None)
validation = validate_boats(boats)
if boats:
if (validation and validation['errors']):
return jsonify(
error={'name': 'invalid_model',
'errors': validation['errors']}), 400
payload['boats'] = [Boat(**boat) for boat in boats]
payload['status'] = 'draft'
payload['password'] = make_digest(payload['password'])
user = User(**payload)
except Exception as e:
err_type, err_value, tb = sys.exc_info()
current_app.logger.warn(
''.join(format_exception_only(err_type, err_value)))
if err_type == 'TypeError':
return jsonify(error='Invalid JSON.'), 400
return jsonify(error='Empty or malformed required field.'), 400
try:
db.session.add(user)
db.session.commit()
except (IntegrityError, Exception) as e:
db.session.rollback()
err_type, err_value, tb = sys.exc_info()
current_app.logger.warn(
''.join(format_exception_only(err_type, err_value)))
err_orig = str(getattr(e, 'orig', 'register error'))
errors = []
if err_orig.find('violates unique constraint') > -1:
errors.append({'name': 'value_exists',
'key': unique_constraint_key(err_orig)})
elif err_orig.find('violates not-null constraint') > -1:
errors.append({'name': 'missing_attribute',
'key': not_null_constraint_key(err_orig)})
return jsonify(error={'name': 'invalid_model', 'errors': errors}), 400
emailToken = generate_token(
user.id, timedelta(seconds=60 * 60 * 24),
current_app.config['JWTSECRET'] + b'_emailconfirm').decode('utf-8')
emailBody = EmailTemplate(
template=current_app.config['WELCOME_EMAIL_TEMPLATE'],
values={
'title': current_app.config['WELCOME_EMAIL_SUBJECT'],
'firstname': user.firstname,
'serverUrl': current_app.config['SERVER_URL'],
'token': emailToken
}).render()
sendmail('no-reply@natural-solutions.eu', user.email,
current_app.config['WELCOME_EMAIL_SUBJECT'], emailBody)
return jsonify(user.json())
@users.route('/api/users/me')
@authenticate
def getMe(reqUser):
reqUser = reqUser.json()
return jsonify(reqUser)
@users.route('/api/users', methods=['DELETE'])
@authenticateAdmin
def deleteUsers(reqUser):
try:
ids = request.args.getlist('id[]')
for id in ids:
print(id)
db.session.query(User).filter(User.id == int(id)).update({'status': 'deleted'})
db.session.commit()
return jsonify('success'), 200
except Exception:
traceback.print_exc()
return jsonify(error='Invalid JSON.'), 400
@users.route('/api/users', methods=['PATCH'])
@authenticateAdmin
def patchUsers(reqUser):
users = request.get_json()
for user in users:
userPatch = {}
for key, value in user.items():
if value != '':
userPatch[key] = value
patchUser(userPatch, reqUser)
return jsonify('success'), 200
@users.route('/api/users/me', methods=['PATCH'])
@authenticate
def patchMe(reqUser):
# userPatch = {key: value for key, value in request.get_json().items() if value != ''}
userPatch = {}
for key, value in request.get_json().items():
if value != '':
userPatch[key] = value
userPatch['id'] = reqUser.id
patchUser(userPatch, reqUser)
user = User.query.filter_by(id=reqUser.id).first()
return jsonify(user.json())
def patchUser(userPatch, reqUser):
boats = userPatch['boats']
del userPatch['boats']
validate_boats(boats)
if boats:
boatsValidation = validate_boats(boats)
if (boatsValidation and boatsValidation['errors']):
return jsonify(error={
'name': 'invalid_model',
'errors': boatsValidation['errors']
}), 400
for i, boat in enumerate(boats):
try:
if boat.get('id', None):
Boat.query.filter_by(id=boat.get('id')).update(boat)
else:
boat['user_id'] = reqUser.id
boatModel = Boat(**boat)
db.session.add(boatModel)
# TODO factorize
except (IntegrityError, Exception) as e:
catch(e)
try:
#userPatch['photo'] = str(userPatch['photo'])
try:
print(userPatch)
if 'password' in userPatch:
userPatch['password'] = make_digest(userPatch['password'])
except NameError:
print('index error')
db.session.query(User).filter(User.id == int(userPatch['id'])).update(userPatch)
db.session.commit()
# TODO factorize
except (IntegrityError, Exception) as e:
traceback.print_exc()
catch(e)
def catch(e):
db.session.rollback()
err_type, err_value, tb = sys.exc_info()
current_app.logger.warn(
''.join(format_exception_only(err_type, err_value)))
err_orig = str(getattr(e, 'orig', 'register error'))
errors = []
if err_orig.find('violates unique constraint') > -1:
errorValue = unique_constraint_error(err_orig)
errors.append(errorValue)
elif err_orig.find('violates not-null constraint') > -1:
errorValue = not_null_constraint_error(str(e))
errors.append(errorValue)
return jsonify(error={'name': 'invalid_model', 'errors': errors}), 400
@users.route('/api/users/login', methods=['POST'])
def log_in():
user = None
# Required fields
if (request.json.get('password') in (None, '') or
request.json.get('username') in (None, '')):
return jsonify(error='Could not authenticate.'), 401
# Registered user
try:
user = User.query.filter_by(username=request.json['username']).first()
except Exception as e:
return jsonify(error='Could not authenticate.'), 401
if user is None:
return jsonify(error='Not registered.'), 401
# Valid password
if (user and not compare_digest(
user.password, make_digest(request.json['password']))):
return jsonify(error='Wrong credentials.'), 401
if user and user.status == 'draft':
emailToken = generate_token(
user.id, timedelta(seconds=60 * 60 * 24),
current_app.config['JWTSECRET'] + b'_emailconfirm').decode('utf-8')
emailBody = EmailTemplate(
template=current_app.config['REMINDER_EMAIL_TEMPLATE'],
values={
'title': current_app.config['REMINDER_EMAIL_SUBJECT'],
'firstname': user.firstname,
'serverUrl': current_app.config['SERVER_URL'],
'token': emailToken
}).render()
sendmail(
'no-reply@natural-solutions.eu', user.email,
current_app.config['REMINDER_EMAIL_SUBJECT'], emailBody)
return jsonify(error='user_draft'), 403
token = generate_id_token(user.id)
return jsonify(token=token.decode('utf-8'), profile=user.json())
@users.route('/api/users/<email>/password/recover', methods=['GET'])
def recover(email):
try:
user = User.query.filter_by(username=email).first()
except Exception as e:
return jsonify(error='Could not verify.'), 401
if user is None:
return jsonify(error='Not registered.'), 401
else:
new_pass = id_generator()
User.query. \
filter(User.id == user.id). \
update({User.password:make_digest(new_pass)})
db.session.commit()
emailBody = EmailTemplate(
template=current_app.config['RECOVER_PASSWORD_TEMPLATE'],
values={
'title': current_app.config['REMINDER_EMAIL_SUBJECT'],
'firstname': user.firstname,
'password': new_pass,
'serverUrl': current_app.config['SERVER_URL']
}).render()
sendmail(
'no-reply@natural-solutions.eu', user.email,
current_app.config['REMINDER_EMAIL_SUBJECT'], emailBody)
return jsonify(new_pass)
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#import bibliotek
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.dummy import DummyClassifier
# In[2]:
#dane Titanic
df = pd.read_csv('http://raw.githubusercontent.com/dataworkshop/titanic/master/vladimir/input/train.csv')
df.head()
# In[3]:
#zbior cech
feats = ['Pclass', 'Fare']
X = df[feats].values
y = df['Survived'].values
#zbior testowy , treningowy
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3)
X_train.shape,X_test.shape
# In[4]:
#model Dummy
model = DummyClassifier()
model.fit (X_train, y_train)
y_pred = model.predict(X_test)
accuracy_score(y_test,y_pred)
# In[5]:
#model DecisionTreeClassifier
model = DecisionTreeClassifier(max_depth=20)
model.fit (X_train, y_train)
y_pred = model.predict(X_test)
accuracy_score(y_test,y_pred)
# In[6]:
#model RandomForestClassifier
model = RandomForestClassifier(max_depth=10, n_estimators=50)
model.fit (X_train, y_train)
y_pred = model.predict(X_test)
accuracy_score(y_test,y_pred)
# In[7]:
model = RandomForestClassifier(max_depth=10, n_estimators=50)
model.fit (X_train, y_train)
y_pred = model.predict(X_test)
accuracy_score(y_test,y_pred)
# In[8]:
#factorize zamiana ciagu znakow na liczby np male = 0, female = 1
df['Sex_cat'] = df['Sex'].factorize()[0]
# In[9]:
#zbior cech o nową ceche Sex_cat
feats = ['Pclass', 'Fare','Sex_cat']
X = df[feats].values
y = df['Survived'].values
#zbior testowy , treningowy
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3)
X_train.shape,X_test.shape
# In[10]:
#model DecisionTreeClassifier
model = DecisionTreeClassifier(max_depth=20)
model.fit (X_train, y_train)
y_pred = model.predict(X_test)
accuracy_score(y_test,y_pred)
# In[ ]:
|
import time
import json
import argparse
import os
import sys
import logging
import shutil
from datetime import datetime
import glob
import random
from scipy.stats import mannwhitneyu
from scipy.stats import spearmanr
import numpy as np
from sklearn.metrics import roc_auc_score, precision_recall_curve, auc
import tensorflow as tf
import tensorflow_addons as tfa
#from optimization import create_optimizer
from model_attention import ModelAttention
from dataset import build_dataset
from loss import compute_loss
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
tf.config.threading.set_intra_op_parallelism_threads(60)
tf.config.threading.set_inter_op_parallelism_threads(60)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging_formatter = logging.Formatter(
'%(asctime)s - %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging_formatter)
logger.addHandler(ch)
class LearningRate(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, base_lr, end_learning_rate, warmup_steps, decay_steps):
super(LearningRate, self).__init__()
self.base_lr = base_lr
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
if decay_steps == 0:
self.poly_decay_fn = lambda x: self.base_lr
else:
self.poly_decay_fn = tf.keras.optimizers.schedules.PolynomialDecay(
base_lr,
decay_steps,
end_learning_rate=end_learning_rate,
power=1.0)
def __call__(self, step):
lr = tf.cond(
step < self.warmup_steps, lambda: self.base_lr * tf.cast(
step + 1, tf.float32) / tf.cast(self.warmup_steps, tf.float32),
lambda: self.poly_decay_fn(step - self.warmup_steps))
#if step % 100 == 0:
# tf.print('learning_rate', step, lr)
return lr
class TestMetric(object):
def __init__(self):
self._targets = tf.zeros((0, ), tf.int32)
self._preds = tf.zeros((0, ), tf.float32)
def reset_states(self):
self._targets = tf.zeros((0, ), tf.int32)
self._preds = tf.zeros((0, ), tf.float32)
def update_state(self, targets, preds):
self._targets = tf.concat(
[self._targets, tf.cast(targets, tf.int32)], axis=-1)
self._preds = tf.concat(
[self._preds, tf.cast(preds, tf.float32)], axis=-1)
def result_auROC(self):
try:
auROC = roc_auc_score(self._targets.numpy(), self._preds.numpy())
return auROC
except:
return 0.0
def result_auPR(self):
try:
precision, recall, _ = precision_recall_curve(
self._targets.numpy(), self._preds.numpy())
auPR = auc(recall, precision)
return auPR
except:
return 0.0
def result_pvalue(self):
all_pred = self._preds.numpy()
all_label = self._targets.numpy()
mtest = mannwhitneyu(all_pred[all_label == 1],
all_pred[all_label == 0],
alternative='two-sided')
pvalue = mtest.pvalue
return pvalue
def result_total(self):
res = self._targets.numpy()
return res.shape[0]
def result_neg(self):
res = self._targets.numpy()
return res.shape[0] - np.sum(res)
def result_pos(self):
res = self._targets.numpy()
return np.sum(res)
def result_corr(self):
try:
all_pred = self._preds.numpy()
all_label = self._targets.numpy()
corr, pvalue = spearmanr(all_pred, all_label)
return corr, pvalue
except:
return 0.0
def result_max(self):
try:
all_pred = self._preds.numpy()
return np.max(all_pred)
except:
return 0.0
def train_single_gpu(config, args):
#setup logger
str_t = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
train_dir = f'./res/{str_t}'
config['train']['train_dir'] = train_dir
os.makedirs(train_dir)
os.makedirs(train_dir + '/result')
os.makedirs(train_dir + '/model')
fh = logging.FileHandler(f'{train_dir}/train.log')
fh.setFormatter(logging_formatter)
logger.addHandler(fh)
logger.info(json.dumps(config, indent=4))
#train and validate files
batch_size = config['train']['batch_size']
input_config = config['input']
input_base_dir = input_config['base_dir']
all_files = glob.glob(input_base_dir + '/' + input_config['train'][:-1] +
args.random + '*tfrec')
#all_files = glob.glob('../dataset/tf/f_v1_w64_2021_v2' + '/' +
# input_config['train'][:-1] + args.random + '*tfrec')
random.seed(2020)
random.shuffle(all_files)
train_files, validate_files = [], []
for i in range(10):
if i == args.cv:
validate_files.append(all_files[i])
else:
train_files.append(all_files[i])
print(train_files)
print(validate_files)
asd = glob.glob(input_base_dir + '/' + 'ASD' + '.tfrec')
ndd = glob.glob(input_base_dir + '/' + 'NDD' + '.tfrec')
control = glob.glob(input_base_dir + '/' + 'Control' + '.tfrec')
brca2 = glob.glob(input_base_dir + '/' + 'BRCA2' + '.tfrec')
pparg = glob.glob(input_base_dir + '/' + 'PPARG' + '.tfrec')
#train_files += pparg
train_dataset = build_dataset(train_files, batch_size)
validate_dataset = build_dataset(validate_files, batch_size)
#model
model_type = config['train']['model_type']
if model_type == 'attention':
model = ModelAttention(config['model'])
else:
raise ValueError(f'model type {model_type} does not exist.')
#learning rate
init_learning_rate = config['train']['learning_rate']
end_learning_rate = config['train']['end_learning_rate']
'''
warmup_epochs = config['train']['warmup_epochs']
decay_epochs = config['train']['decay_epochs']
training_samples = 0
for inputs in train_dataset:
training_samples += inputs[0].shape[0]
logger.info(f'training_samples= {training_samples}')
batches_each_epoch = int(training_samples / batch_size)
warmup_steps = batches_each_epoch * warmup_epochs
decay_steps = batches_each_epoch * decay_epochs
'''
warmup_steps, decay_steps = config['train']['warmup_steps'], config[
'train']['decay_steps']
learning_rate = LearningRate(init_learning_rate,
end_learning_rate=end_learning_rate,
warmup_steps=warmup_steps,
decay_steps=decay_steps)
#training algorithm
opt = config['train'].get('opt', 'adam')
if opt == 'adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
#optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
elif opt == 'adamw':
weight_decay_rate = config['train']['weight_decay_rate']
optimizer = tfa.optimizers.AdamW(
weight_decay=weight_decay_rate,
learning_rate=learning_rate,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
)
'''
optimizer = create_optimizer(init_learning_rate,
decay_steps + warmup_steps,
warmup_steps,
end_lr=end_learning_rate,
optimizer_type='adamw')
'''
else:
raise NotImplementedError(f"opt {opt} not NotImplementedError")
#metrics
metric_train_loss = tf.keras.metrics.Mean(name='train_loss')
metric_test_loss = tf.keras.metrics.Mean(name='test_loss')
metric_test = TestMetric()
#summary
train_log_dir = f'{train_dir}/summary/train'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
def _update_histogram_summary():
with train_summary_writer.as_default():
for var in model.trainable_variables:
if 'kernel:' in var.name or 'gamma:' in var.name or 'beta:' in var.name:
tf.summary.histogram(var.name,
var,
step=optimizer.iterations)
def _update_gradient_norm_summary(var, grad):
with train_summary_writer.as_default():
for v, g in zip(var, grad):
if 'kernel:' in v.name or 'gamma:' in v.name or 'beta:' in v.name:
tf.summary.scalar(f'gradient_norm/{v.name}',
tf.norm(g, ord='euclidean'),
step=optimizer.iterations)
@tf.function(input_signature=[validate_dataset.element_spec])
def test_step(sample):
var, ref_aa, alt_aa, feature, label, padding_mask = sample
logit = model((ref_aa, alt_aa, feature), False, padding_mask)
loss = compute_loss(label, logit)
pred = model.predict_from_logit(logit)
return var, label, pred, loss
def _save_res(var_id, target, pred, name, epoch):
with open(f'{train_dir}/result/epoch_{epoch}_{name}.score', 'w') as f:
f.write('var\ttarget\tScore\n')
for a, c, d in zip(var_id, target, pred):
f.write('{}\t{:d}\t{:f}\n'.format(a.numpy().decode('utf-8'),
int(c), d))
return True
def test(test_dataset,
data_name,
epoch,
auc=False,
pvalue=False,
corr=False):
metric_test_loss.reset_states()
metric_test.reset_states()
all_pred, all_label, all_var = [], [], []
for step, sample in enumerate(test_dataset):
var, label, pred, loss = test_step(sample)
metric_test.update_state(label, pred)
metric_test_loss.update_state(loss)
all_pred.extend(list(pred))
all_label.extend(list(label))
all_var.extend(list(var))
all_var = np.array(all_var)
all_label = np.array(all_label)
all_pred = np.array(all_pred)
_save_res(all_var, all_label, all_pred, data_name, epoch)
if auc:
logger.info(
f'{data_name} pos= {metric_test.result_pos()} neg= {metric_test.result_neg()} loss= {metric_test_loss.result()} auPR= {metric_test.result_auPR()} auROC= {metric_test.result_auROC()} max= {metric_test.result_max()}'
)
if pvalue:
logger.info(
f'{data_name} pos= {metric_test.result_pos()} neg= {metric_test.result_neg()} loss= {metric_test_loss.result()} pvalue= {metric_test.result_pvalue()}'
)
if corr:
corr, pvalue = metric_test.result_corr()
logger.info(
f'{data_name} pos= {metric_test.result_total()} corr= {corr} pvalue= {pvalue} max= {metric_test.result_max()}'
)
return metric_test_loss.result()
@tf.function(input_signature=[train_dataset.element_spec])
def train_step(sample):
var, ref_aa, alt_aa, feature, label, padding_mask = sample
with tf.GradientTape() as tape:
logit = model((ref_aa, alt_aa, feature), True, padding_mask)
loss = compute_loss(label, logit)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
metric_train_loss.update_state(loss)
#if optimizer.iterations % 512 == 0:
# _update_gradient_norm_summary(model.trainable_variables, gradients)
return loss
EPOCHS = 512
watch_loss = 10000.0
watch_epoch = -1
patience_epochs = 5
for epoch in range(EPOCHS):
start = time.time()
for step, samples in enumerate(train_dataset):
loss = train_step(samples)
#tf.print(
# f'lr= {learning_rate(global_step)} wd={weight_decay(global_step)}'
#)
#model summary
if optimizer.iterations == 1:
model.summary(print_fn=logger.info)
#logging kernel weights
#if (optimizer.iterations + 1) % 512 == 0:
# _update_histogram_summary()
logger.info(f'Epoch {epoch} Loss {metric_train_loss.result():.4f}')
metric_train_loss.reset_states()
model.save_weights(f'{train_dir}/model/epoch-{epoch}.h5')
#validate and test
validate_loss = test(validate_dataset,
'validate',
epoch,
pvalue=False,
auc=True,
corr=False)
if validate_loss < watch_loss:
watch_loss = validate_loss
watch_epoch = epoch
#denovo
if epoch - watch_epoch == patience_epochs:
logger.info(f'best_epoch {watch_epoch} min_loss= {watch_loss}')
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--cv', type=int, default=0)
parser.add_argument('--random', type=str, default='0')
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
train_single_gpu(config, args)
if __name__ == '__main__':
main()
|
""" MLPNN for microbial samples from DIABIMMUNE subjects """
## v1.0
from __future__ import division, print_function, absolute_import
from collections import defaultdict, OrderedDict
import random
import os
import datetime
import timeit
import numpy as np
import pandas as pd
import tensorflow as tf
import sklearn.metrics as sk
import sys
from optparse import OptionParser
######## Retrieve all features of one subject
def getbatch_MLPNN(df, timepoints, subjects, meta, batch_size = 1, idx = 0):
subjs = list()
subLen = list()
label = list()
samples = []
if(idx + batch_size >= len(subjects)):
batch_size = len(subjects) - idx
for i in range(0, batch_size):
s = subjects[i + idx]
s_samples = timepoints[s]
numSamplesSubject = len(s_samples)
SubjectSample = s_samples[numSamplesSubject-1]
r = meta.loc[meta["subjectID"] == s, "allergy"]
target = np.unique(r.values)
if (target == True):
output = [1, 0]
elif (target == False):
output = [0, 1]
else:
print("\n\nTarget is not unqiue for subject's samples = ", s)
print("target = ", target)
exit()
subjs.append(s)
label.append(output)
subLen.append(numSamplesSubject)
samples.append(SubjectSample)
idx = idx + batch_size
data_sample_arr = df.loc[:, samples]
data_sample_arr = data_sample_arr.values.transpose()
return data_sample_arr, np.asarray(label), idx, subjs
def trainingMLPNN(df, subjects, timepoints, meta, numFeatures):
n_classes = 2
hid1_size = 128 # First layer
hid2_size = 256 # Second layer
epochs = 50
num_fold = 10
learning_rate = 0.05
inputs = tf.placeholder(tf.float32, [None, numFeatures], name='inputs')
label = tf.placeholder(tf.float32, [None, n_classes], name='labels')
w1 = tf.Variable(tf.random_normal([hid1_size, numFeatures], stddev=0.01), name='w1')
b1 = tf.Variable(tf.constant(0.1, shape=(hid1_size, 1)), name='b1')
y1 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(w1, tf.transpose(inputs)), b1)), keep_prob=0.5)
w2 = tf.Variable(tf.random_normal([hid2_size, hid1_size], stddev=0.01), name='w2')
b2 = tf.Variable(tf.constant(0.1, shape=(hid2_size, 1)), name='b2')
y2 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(w2, y1), b2)), keep_prob=0.5)
wo = tf.Variable(tf.random_normal([2, hid2_size], stddev=0.01), name='wo')
bo = tf.Variable(tf.random_normal([2, 1]), name='bo')
yo = tf.transpose(tf.add(tf.matmul(wo, y2), bo))
# Loss function and optimizer
lr = tf.placeholder(tf.float32, shape=(), name='learning_rate')
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=yo, labels=label))
optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss)
# Prediction
pred = tf.nn.softmax(yo)
y_true = tf.argmax(label, 1)
y_pred = tf.argmax(pred, 1)
correct_prediction = tf.equal(y_pred, y_true)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Create operation which will initialize all variables
init = tf.global_variables_initializer()
# Configure GPU not to use all memory
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Start a new tensorflow session and initialize variables
sess = tf.InteractiveSession(config=config)
sess.run(init)
testAUC = list()
testMCC = list()
for loop in range(0,num_fold):
print("\n Loop = ", loop)
random.shuffle(subjects)
subjAll = np.unique(meta.loc[meta["allergy"] == True, "subjectID"])
subjNonAll = np.unique(meta.loc[meta["allergy"] == False, "subjectID"])
testFoldSizeAllergy = round(0.2 * len(subjAll))
testAllergy = subjAll[0: testFoldSizeAllergy]
testFoldSizeNonAllergy = round(0.2 * len(subjNonAll))
testNonAllergy = subjNonAll[0: testFoldSizeNonAllergy]
testSet = np.concatenate([testAllergy, testNonAllergy])
random.shuffle(testSet)
idx_test = 0
X_test, labels_test, indx, test_subjects = getbatch_MLPNN(df, timepoints, testSet, meta, batch_size=len(testSet),
idx=idx_test)
print("Total # subjects = ", len(subjects))
print("Total # allergic subjects = ", len(subjAll),
" Total # non-allergic subjects = ", len(subjNonAll))
subjAll = [x for x in subjAll if x not in testAllergy]
subjNonAll = [x for x in subjNonAll if x not in testNonAllergy]
print("Training # allergic subjects = ", len(subjAll),
" Training # non-allergic subjects = ", len(subjNonAll))
print("Testing # allergic subjects = ", len(testAllergy),
" Testing # non-allergic subjects = ", len(testNonAllergy))
for i in range(0, num_fold):
print("fold = ", i)
foldSize = round((1 / num_fold) * (len(subjAll) + len(subjNonAll)))
#print("FoldSize = ", foldSize)
### Prepare validation fold
validateFoldSizeAllergy = round((1 / num_fold) * len(subjAll))
validateAllergy = subjAll[i * validateFoldSizeAllergy: (i + 1) * validateFoldSizeAllergy]
validateFoldSizeNonAllergy = round((1 / num_fold) * len(subjNonAll))
validateNonAllergy = subjNonAll[i * validateFoldSizeNonAllergy: (i + 1) * validateFoldSizeNonAllergy]
validateSet = np.concatenate([validateAllergy, validateNonAllergy])
random.shuffle(validateSet)
### Prepare training fold
trainAllergy = [x for x in subjAll if x not in validateAllergy]
trainNonAllergy = [x for x in subjNonAll if x not in validateNonAllergy]
trainSet = np.concatenate([trainAllergy, trainNonAllergy])
random.shuffle(trainSet)
idx_validate = 0
idx_train = 0
X_validate, labels_validate, indx, validate_subjects = getbatch_MLPNN(df, timepoints, validateSet, meta,
batch_size=len(validateSet),
idx=idx_validate)
for epoch in range(epochs):
X_train, labels_train, indx, train_subjects = getbatch_MLPNN(df, timepoints, trainSet, meta,
batch_size=len(trainSet),
idx=idx_train)
avg_cost = 0.0
for i in range(X_train.shape[0]):
_, c = sess.run([optimizer, loss], feed_dict={lr: learning_rate,
inputs: X_train,
label: labels_train})
avg_cost += c
avg_cost /= X_train.shape[0]
if epoch % 5 == 0:
print("Epoch: {:3d} Train Cost: {:.4f}".format(epoch, avg_cost))
idx_train = 0
acc_train = accuracy.eval(feed_dict={inputs: X_train, label: labels_train})
y_t, y_p, y_sm = sess.run([y_true, y_pred, pred], feed_dict={inputs: X_train, label: labels_train})
precision = sk.precision_score(y_t, y_p, average=None)
recall = sk.recall_score(y_t, y_p, average=None)
fscore = sk.f1_score(y_t, y_p, average=None)
mcc = sk.matthews_corrcoef(y_t, y_p, sample_weight=None)
fpr, tpr, _ = sk.roc_curve(y_t, y_sm[:, 1])
roc_auc = sk.auc(fpr, tpr)
acc_test = accuracy.eval(feed_dict={inputs: X_validate, label: labels_validate})
y_t, y_p, y_sm = sess.run([y_true, y_pred, pred], feed_dict={inputs: X_test, label: labels_test})
precision = sk.precision_score(y_t, y_p, average=None)
recall = sk.recall_score(y_t, y_p, average=None)
fscore = sk.f1_score(y_t, y_p, average=None)
mcc = sk.matthews_corrcoef(y_t, y_p, sample_weight=None)
fpr, tpr, _ = sk.roc_curve(y_t, y_sm[:, 1])
roc_auc = sk.auc(fpr, tpr)
testAUC.append(roc_auc)
testMCC.append(mcc)
print("average AUC = %0.2f" % np.mean(testAUC))
print("average MCC = %0.2f" % np.mean(testMCC))
return
def main(_):
optparser = OptionParser()
optparser.add_option('-i', '--inputFile',
dest='input',
help='path to input file',
default=None)
optparser.add_option('-m', '--metadata',
dest='meta',
help='path to metadata file',
default=None)
optparser.add_option('-o', '--outputDir',
dest='out',
help='name of the output directory',
default="TEST",
type='string')
(options, args) = optparser.parse_args()
InputFile = None
if options.input is not None:
InputFile = options.input
else:
print('No input filename specified, system with exit\n')
sys.exit('System will exit')
MetadataFile = None
if options.meta is not None:
MetadataFile = options.meta
else:
print('No metadata file specified, system with exit\n')
sys.exit('System will exit')
OutDir = None
if options.out is not None:
OutDir = options.out
else:
print('No OutDir specified, system with exit\n')
sys.exit('System will exit')
print("InputFile = ", InputFile)
print("MetadataFile = ", MetadataFile)
print("OutDir = ", OutDir)
## Read OTU matrix
df = pd.read_csv(InputFile, sep=",", index_col = 0)
features = list(df.index)
samples = list(df)
numFeatures = len(features)
numSamples = len(samples)
## Read Metadatafile
meta = pd.read_csv(MetadataFile, sep=",", index_col = 0)
unique, counts = np.unique(meta['subjectID'], return_counts=True)
maxLen = max(counts)
## Calculate # samples per subject
timepoints = defaultdict(list)
for i in range(0, numSamples):
timepoints[meta["subjectID"][i]].append(meta["gid_wgs"][i])
### Extract subjects
subjects = list(timepoints.keys())
numSubject = len(subjects)
print("# of subjects = ", numSubject)
print("# of samples = ", numSamples)
print("max sequence len = ", maxLen)
print("# of features = ", numFeatures)
#### Remove subjects with less than 3 datapoints
finn = 0
russ = 0
est = 0
for row in meta['country']:
if(row == "FIN"):
finn = finn + 1
if (row == "RUS"):
russ = russ + 1
if (row == "EST"):
est = est + 1
print("Before filtration: # Finnish samples = ", finn, " # Russian samples = ", russ, " # Estonian samples = ", est)
ctr = 0
for i in subjects:
if (len(timepoints[i]) < 3):
ctr = ctr +1
df = df.drop(timepoints[i], axis=1)
meta = meta.drop(meta[meta.subjectID == i].index)
timepoints.pop(i, None)
subjects.remove(i)
print("# of removed subjects = ", ctr)
print("After filtration: # subjects = ", len(subjects))
finn = 0
russ = 0
est = 0
for row in meta['country']:
if(row == "FIN"):
finn = finn + 1
if (row == "RUS"):
russ = russ + 1
if (row == "EST"):
est = est + 1
print("After filtration: # Finnish samples = ", finn, " # Russian samples = ", russ, " # Estonian samples = ", est)
russ = 0
finn = 0
est = 0
for i in subjects:
cntry = np.unique(meta.loc[meta.subjectID == i, "country"])[0]
if(cntry == "RUS"):
russ = russ + 1
if(cntry == "FIN"):
finn = finn + 1
if(cntry == "EST"):
est = est + 1
print("After filtration: # Finnish subjects = ", finn, " # Russian subjects= ", russ,
" # Estonian subjects= ", est)
fAllergy = 0
nonfallergy = 0
for i in subjects:
allergyy = np.unique(meta.loc[meta.subjectID == i, "allergy"])[0]
if (allergyy):
fAllergy = fAllergy + 1
else:
nonfallergy = nonfallergy + 1
print("After filtration: # allergic subjects = ", fAllergy, " # non-allergic subjects = ", nonfallergy)
print("df shape = ", df.shape)
print("meta shape = ", meta.shape)
#### Training MLPNN
start = timeit.default_timer()
trainingMLPNN(df, subjects, timepoints, meta, numFeatures)
stop = timeit.default_timer()
print("Elapsed time = %0.2f Seconds" % (stop - start))
if __name__ == '__main__':
tf.app.run(main = main) |
try:
import io
except ImportError:
# probably linux
import StringIO
io = StringIO
import tokenize
import re
import sublime, sublime_plugin
def find(collection, f):
for i in collection:
if f(i):
return i
return None
def search_points_to_replace(command):
points_to_replace = set()
for region in command.view.sel():
test_point = region.b
while True:
opening_point = find(command.opening_points, lambda p: p <= test_point)
if opening_point:
if command.blocks[opening_point] >= test_point:
points_to_replace.add(opening_point)
points_to_replace.add(command.blocks[opening_point])
break
else:
test_point = opening_point - 1
else:
break
return list(points_to_replace)
def match_blocks(command, toknum, opening, closing):
view = command.view
opening_points = []
blocks = {}
content = view.substr(sublime.Region(0, view.size()))
tokens = tokenize.generate_tokens(io.StringIO(content).readline)
for num, val, start, _, _ in tokens:
if num == toknum:
start_point = view.text_point(start[0] - 1, start[1])
if val == opening:
opening_points.append(start_point)
elif val == closing:
if opening_points:
blocks[opening_points.pop()] = start_point
return blocks
class BraceToDoEndCommand(sublime_plugin.TextCommand):
lines_to_reindent = set()
def reserve_reindent(self, line):
self.lines_to_reindent = set([l + 1 for l in self.lines_to_reindent])
self.lines_to_reindent.add(line)
def reindent(self):
view = self.view
sel = view.sel()
dirty_lines = set()
for line in self.lines_to_reindent:
line_end = sublime.Region(view.line(view.text_point(line, 0)).b)
if not sel.contains(line_end):
sel.add(line_end)
dirty_lines.add(line)
view.run_command('reindent', {'force_indent': False})
for line in dirty_lines:
sel.subtract(sublime.Region(view.line(view.text_point(line, 0)).b))
def run(self, edit):
view = self.view
self.blocks = match_blocks(self, tokenize.OP, '{', '}')
self.opening_points = list(self.blocks.keys())
self.opening_points.sort(reverse=True)
points_to_replace = search_points_to_replace(self)
points_to_replace.sort(reverse = True)
for p in points_to_replace:
if p in self.opening_points:
# f{ f do
# f{|a| f do |a|
# f{ |a| f do |a|
# f{a} f do\n
# f{|a|a} f do |a|\n
# f{ |a|a} f do |a|\n
# f{|a| a} f do |a|\n
# f{ |a| a} f do |a|\n
# f { |a| a} f do |a|\n
opening_pattern = re.compile('(?P<heading>[ \t])?(?P<opening>\{ *(?P<args>\|[ ,()\w\t]*\|)?[ \t]*)(?P<exp>.*)')
m = opening_pattern.search(view.substr(sublime.Region(p - 1, view.line(p).b)))
heading = '' if m.group('heading') else ' '
following = ' ' if m.group('args') else ''
if m.group('exp'):
newline = '\n'
self.reserve_reindent(view.rowcol(p)[0] + 1)
else:
newline = ''
region = sublime.Region(p, p + len(m.group('opening')))
view.replace(edit, region, '%sdo%s%s%s' % (heading, following, m.group('args') or '', newline))
else:
# a }
# a}
# }
m = re.search('(?P<exp>[^ \t]*)(?P<spaces>[ \t]*)}$', view.substr(sublime.Region(view.line(p).a, p + 1)))
if m.group('exp'):
newline = '\n'
self.reserve_reindent(view.rowcol(p)[0] + 1)
else:
newline = ''
replace_start = p - (len(m.group('spaces')) if m.group('exp') and m.group('spaces') else 0)
end = newline + 'end'
view.replace(edit, sublime.Region(replace_start, p + 1), end)
sel = view.sel()
after_end = sublime.Region(replace_start + len(end))
if sel.contains(after_end):
sel.subtract(after_end)
sel.add(sublime.Region(replace_start))
self.reindent()
class DoEndToBraceCommand(sublime_plugin.TextCommand):
def row_span(self, point):
return self.view.rowcol(self.blocks[point])[0] - self.view.rowcol(point)[0]
def run(self, edit):
view = self.view
self.blocks = match_blocks(self, tokenize.NAME, 'do', 'end')
self.opening_points = list(self.blocks.keys())
self.opening_points.sort(reverse = True)
points_to_replace = search_points_to_replace(self)
points_to_replace.sort(reverse = True)
for p in points_to_replace:
if p in self.opening_points:
if self.row_span(p) in [1, 2]:
inner_region = sublime.Region(p + 2, self.blocks[p])
view.replace(edit, inner_region, re.sub('[ \t]*[\r\n]+[ \t]*', ' ', view.substr(inner_region)))
replace_end = p + 3 if view.substr(sublime.Region(p + 2, p + 4)) == ' |' else p + 2
view.replace(edit, sublime.Region(p, replace_end), '{')
else:
view.replace(edit, sublime.Region(p, p + 3), '}')
|
from __future__ import annotations
from prettyqt import gui, widgets
from prettyqt.qt import QtWidgets
QtWidgets.QGraphicsOpacityEffect.__bases__ = (widgets.GraphicsEffect,)
class GraphicsOpacityEffect(QtWidgets.QGraphicsOpacityEffect):
def serialize_fields(self):
return dict(opacity=self.opacity(), opacity_mask=self.get_opacity_mask())
def __setstate__(self, state):
self.setOpacity(state["opacity"])
self.setOpacityMask(state["opacity_mask"])
def get_opacity_mask(self) -> gui.Brush:
return gui.Brush(self.opacityMask())
|
#!/usr/bin/env python
import pymongo
from flask import Blueprint
from flask import current_app as app
from flask import request
from flask_restx import Namespace, Resource, fields
import json
from libblapiserver.auth import require_user, require_admin
api = Namespace('mongodb', description='Binlex MongoDB API')
@api.route('/version')
class mongodb_collection_count(Resource):
@require_user
def get(self):
"""Get MongoDB Version"""
return {
'version': pymongo.__version__
}
|
import logging
from time import time
from decimal import ROUND_DOWN, Decimal
logger = logging.getLogger(__name__)
_missing = object()
def elapsed(t0=0.0):
"""
Get the elapsed time from the give time.
If the start time is not given, returns the unix-time.
Returns
-------
t : float
Elapsed time from the given time; Otherwise the epoch time.
s : str
The elapsed time in seconds in a string
"""
t = time()
dt = t - t0
dt_sec = Decimal(str(dt)).quantize(Decimal('.0001'), rounding=ROUND_DOWN)
if dt_sec == 1:
s = str(dt_sec) + ' second'
else:
s = str(dt_sec) + ' seconds'
return t, s
def to_number(s):
"""
Convert a string to a number. If unsuccessful, return the de-blanked string.
"""
ret = s
# remove single quotes
if "'" in ret:
ret = ret.strip("'").strip()
# try converting to booleans / None
if ret == 'True':
return True
elif ret == 'False':
return False
elif ret == 'None':
return None
# try converting to float or int
try:
ret = int(ret)
except ValueError:
try:
ret = float(ret)
except ValueError:
pass
return ret
def is_notebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qt-console
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def is_interactive():
"""
Check if is in an interactive shell (python or ipython).
Returns
-------
bool
"""
ipython = False
try:
cls_name = get_ipython().__class__.__name__
if cls_name in ('InteractiveShellEmbed', 'TerminalInteractiveShell'):
ipython = True
except NameError:
pass
import __main__ as main
return not hasattr(main, '__file__') or ipython
class cached:
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo:
@cached
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
See for details:
http://stackoverflow.com/questions/17486104/python-lazy-loading-of-class-attributes
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
|
import requests
office_dict = {'PRS': 'President',
'USS': 'U.S. Senate',
'GOV': 'Governor',
'LTG': 'Lieutenant Governor',
'SOS': 'Secretary of State',
'CON': 'Controller',
'TRS': 'Treasurer',
'ATG': 'Attorney General',
'INS': 'Insurance Commissioner',
'SPI': 'Superintendent of Public Instruction',
'BOE': 'Board of Equalization',
'CNG': 'U.S. House',
'SEN': 'State Senate',
'ASS': 'State Assembly'}
prop_dict = {'Y': 'Yes',
'N': 'No'}
class Entry(object):
def __init__(self, split):
self.candidate = ' '.join(split[1:-1]).strip('*')
self.__process(split[0])
def __process(self, code):
if code[0:3] == 'PR_':
self.office = 'Proposition ' + code.split('_')[1]
self.candidate = prop_dict[code[-1]]
else:
self.office = office_dict[code[0:3]]
if len(code) < 8:
self.district = ''
self.party = ''
elif len(code) == 8:
self.district = ''
self.party = code[3:6]
else:
self.district = int(code[3:5])
self.party = code[5:8]
def __repr__(self):
d = {'candidate': self.candidate,
'office': self.office,
'party': self.party}
if self.district:
d['district'] = self.district
return repr(d)
class Codes(object):
def __init__(self, fname, candidate_lookup):
self.data = requests.get(fname)
self.data.raise_for_status()
self.candidate_lookup = candidate_lookup
self.__parse()
def __parse(self):
self.code_dict = {}
for line in self.data.iter_lines():
split = str(line, self.data.apparent_encoding).strip().split()
if split[0] == 'TOTREG' or split[0] == 'TOTVOTE':
continue
entry = Entry(split)
if entry.candidate in self.candidate_lookup:
entry.candidate = self.candidate_lookup[entry.candidate]
self.code_dict[split[0]] = entry
def office(self, column):
if column.startswith('PR_'):
return 'Proposition ' + column.split('_')[1]
return office_dict[column[0:3]]
def party(self, column):
if column.startswith('PR_'):
return ''
return column[-5:-2]
def lookup(self, column, district=None):
key = None
if district:
key = (column[0:3] + '%02d' + column[3:]) % district
else:
key = column
return self.code_dict[key]
|
from keras.preprocessing import image
import os
import sys
import matplotlib.pyplot as plt
sys.path.insert(0, '../libraries')
from mrcnn.config import Config
import mrcnn.model as modellib
import cv2
from moviepy.editor import VideoFileClip
from mrcnn import visualize_drivable
# HOME_DIR is the path that the project you put on
HOME_DIR = '/home/pandamax/current-lane-drivable-master'
DATA_DIR = os.path.join(HOME_DIR, "data/drivable")
WEIGHTS_DIR = os.path.join(HOME_DIR, "data/weights")
MODEL_DIR = os.path.join(DATA_DIR, "logs")
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
#################################
# Set your config for inference #
#################################
class InferenceConfig(Config):
"""Configuration for training on the shapes dataset.
"""
NAME = "drivable"
# Choose BACKBONE from ["resnet50","resnet101"]
BACKBONE = "resnet101"
# Train on 1 GPU and 2 images per GPU. Put multiple images on each
# GPU if the images are small. Batch size is 2 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1+1 # background + 1(drivable)
# Use smaller images for faster inference
IMAGE_MAX_DIM = 1024
IMAGE_MIN_DIM = 800
IMAGE_RESIZE_MODE = "square"
# Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 128
STEPS_PER_EPOCH = 1000
VALIDATION_STEPS = 50
POST_NMS_ROIS_INFERENCE = 1000
# show inference config
inference_config = InferenceConfig()
inference_config.display()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
# print(model.find_last()[1])
# model_path = model.find_last()[1]
# choose the trained model to run inference stage
model_path = os.path.join(WEIGHTS_DIR, "mask_rcnn_drivable_res101.h5")
print("The Model Is Loading ...")
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
model.load_weights(model_path, by_name=True)
# two class
class_names = ['BG', 'drivable']
def process_video(image, title="", figsize=(16, 16), ax=None):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# you should return the final output (image with lines are drawn on lanes
results = model.detect([image], verbose=0)
r = results[0]
image = visualize_drivable.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
return image
output = os.path.join(HOME_DIR, "mask-rcnn/notebooks/test_out/harder_challenge_video_1280 x 720.mp4")
clip1 = VideoFileClip(os.path.join(HOME_DIR, "mask-rcnn/notebooks/test_video/harder_challenge_video_1280 x 720.mp4"))
clip = clip1.fl_image(process_video) #NOTE: this function expects color images!!
clip.write_videofile(output, audio=False)
print("Process Successfully!")
|
'''
reference_pad_cds_alleles.py
Add reference seqeunces to the flanks of CDS
alleles from a single gene.
'''
import sys
import os
import subprocess
from Bio.Seq import Seq
from Bio import SeqIO
from utils import *
def parse_chromosome(filename, chrom_name):
sequence = ""
for record in SeqIO.parse(filename, "fasta"):
if record.id == chrom_name:
sequence += str(record.seq)
return sequence
def parse_gene_coords(filename, gene_name):
gene_coords = ["", "", -1, -1, -1, -1]
transcript_file = open(filename, "r")
for line in transcript_file:
if line[0] == "#":
continue
line_split = line.split("\t")
attributes_split = line_split[8].split(";")
cur_gene_name = ""
tags = []
transcript_type = ""
for attribute in attributes_split:
attribute = attribute.strip()
if attribute[:9] == "gene_name":
assert(cur_gene_name == "")
cur_gene_name = attribute.split('"')[1]
if attribute[:3] == "tag":
tags.append(attribute.split('"')[1])
if attribute[:15] == "transcript_type":
assert(transcript_type == "")
transcript_type = attribute.split('"')[1]
assert(cur_gene_name != "")
if cur_gene_name != gene_name:
continue
if gene_coords[0] == "":
gene_coords[0] = line_split[0]
assert(gene_coords[0] == line_split[0])
if gene_coords[1] == "":
gene_coords[1] = line_split[6]
assert(gene_coords[1] == line_split[6])
if line_split[2] == "transcript":
if gene_coords[2] == -1:
assert(gene_coords[5] == -1)
gene_coords[2] = int(line_split[3])
gene_coords[5] = int(line_split[4])
else:
gene_coords[2] = min(gene_coords[2], int(line_split[3]))
gene_coords[5] = max(gene_coords[5], int(line_split[4]))
elif line_split[2] == "start_codon" and "basic" in tags and transcript_type == "protein_coding":
if gene_coords[1] == "-":
line_split[3] = line_split[4]
if gene_coords[3] == -1:
gene_coords[3] = int(line_split[3])
elif gene_coords[3] != int(line_split[3]):
print("Warning different start codon:")
print(gene_coords[3])
print(int(line_split[3]))
elif line_split[2] == "stop_codon" and "basic" in tags and transcript_type == "protein_coding":
if gene_coords[1] == "-":
line_split[4] = line_split[3]
if gene_coords[4] == -1:
gene_coords[4] = int(line_split[4])
elif gene_coords[4] != int(line_split[4]):
print("Warning different stop codon:")
print(gene_coords[4])
print(int(line_split[4]))
assert(gene_coords[0] != "")
assert(gene_coords[1] != "")
assert(not -1 in gene_coords[2:])
if gene_coords[1] == "+":
assert(gene_coords[2] <= gene_coords[3])
assert(gene_coords[3] < gene_coords[4])
assert(gene_coords[4] <= gene_coords[5])
else:
assert(gene_coords[1] == "-")
gene_coords[2], gene_coords[5] = gene_coords[5], gene_coords[2]
assert(gene_coords[2] >= gene_coords[3])
assert(gene_coords[3] > gene_coords[4])
assert(gene_coords[4] >= gene_coords[5])
return gene_coords
printScriptHeader()
if len(sys.argv) != 7:
print("Usage: python reference_pad_cds_alleles.py <cds_alleles_input_name> <genome_fasta_name> <transcripts_gtf_name> <gene_name> <gene_flank_size> <output_fasta_name>\n")
sys.exit(1)
gene_coords = parse_gene_coords(sys.argv[3], sys.argv[4])
print(gene_coords)
chrom_seq = parse_chromosome(sys.argv[2], gene_coords[0])
print len(chrom_seq)
cds_file = open(sys.argv[1], "r")
out_file = open(sys.argv[6], "w")
gene_flank_size = int(sys.argv[5])
for line in cds_file:
line = line.strip()
line_split = line.split("\t")
assert(len(line_split) == 2)
if line_split[0] == "allele":
continue
if gene_coords[1] == "+":
left_flank = chrom_seq[(gene_coords[2] - gene_flank_size - 1):(gene_coords[3] - 1)]
right_flank = chrom_seq[gene_coords[4]:(gene_coords[5] + gene_flank_size - 1)]
else:
assert(gene_coords[1] == "-")
left_flank = chrom_seq[gene_coords[3]:(gene_coords[2] + gene_flank_size - 1)]
right_flank = chrom_seq[(gene_coords[5] - gene_flank_size - 1):(gene_coords[4] - 1)]
left_flank = Seq(left_flank)
left_flank = str(left_flank.reverse_complement())
right_flank = Seq(right_flank)
right_flank = str(right_flank.reverse_complement())
out_file.write(">" + line_split[0] + "\n")
out_file.write(left_flank + line_split[1] + right_flank + "\n")
cds_file.close()
out_file.close()
print("Done")
|
#!/usr/bin/env python
u"""
gmao_spire_gnss_sync.py
Written by Tyler Sutterley (10/2021)
Syncs Spire GNSS grazing angle altimetry data from the NASA
Global Modeling and Assimilation Office (GMAO)
CALLING SEQUENCE:
python gmao_spire_gnss_sync.py --user=<username>
where <username> is your NASA GMAO Extranet credentials
COMMAND LINE OPTIONS:
--help: list the command line options
-U X, --user X: username for NASA GMAO Extranet Login
-W X, --password X: password for NASA GMAO Extranet Login
-N X, --netrc X: path to .netrc file for authentication
-D X, --directory X: working data directory
-p X, --product X: Spire data products to sync
-Y X, --year X: Years of Spire data to sync
-P X, --np X: Number of processes to use in file downloads
-t X, --timeout X: Timeout in seconds for blocking operations
-l, --log: output log of files downloaded
-M X, --mode X: Local permissions mode of the directories and files synced
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
dateutil: powerful extensions to datetime
https://dateutil.readthedocs.io/en/stable/
lxml: Pythonic XML and HTML processing library using libxml2/libxslt
https://lxml.de/
https://github.com/lxml/lxml
future: Compatibility layer between Python 2 and Python 3
https://python-future.org/
PROGRAM DEPENDENCIES:
utilities.py: download and management utilities for syncing files
UPDATE HISTORY:
Updated 10/2021: using python logging for handling verbose output
Written 10/2021
"""
from __future__ import print_function
import sys
import os
import io
import re
import time
import netrc
import getpass
import logging
import tarfile
import builtins
import argparse
import posixpath
import traceback
import multiprocessing as mp
import spire_toolkit.utilities
#-- PURPOSE: Syncs Spire GNSS grazing angle altimetry data
def gmao_spire_gnss_sync(DIRECTORY,
PRODUCT=[],
YEAR=None,
PROCESSES=0,
TIMEOUT=None,
LOG=False,
MODE=0o775):
#-- create log file with list of synchronized files (or print to terminal)
if LOG:
#-- format: GMAO_Spire_GNSS_sync_2002-04-01.log
today = time.strftime('%Y-%m-%d',time.localtime())
LOGFILE = 'GMAO_Spire_GNSS_sync_{0}.log'.format(today)
logging.basicConfig(filename=os.path.join(DIRECTORY,LOGFILE),
level=logging.INFO)
logging.info('GMAO Spire GNSS Sync Log ({0})'.format(today))
else:
#-- standard output (terminal output)
logging.basicConfig(level=logging.INFO)
#-- remote host for Spire GNSS data
HOST = 'https://gmao.gsfc.nasa.gov'
#-- regular expression pattern for finding tar files
regex_pattern = r'(spire_gnss-r)_(L\d+).({0})_({1})(\d{{2}}).tar'
regex_products = r'|'.join(PRODUCT) if PRODUCT else r'.*?'
regex_years = r'|'.join([r'{0:4d}'.format(y) for y in YEAR])
R1 = re.compile(regex_pattern.format(regex_products,regex_years))
#-- open connection with GMAO extranet server at remote directory
PATH = [HOST,'extranet','collab','spire_team']
files = spire_toolkit.utilities.gmao_list(PATH, timeout=TIMEOUT,
adddirlink="grazing_angle_L2", pattern=R1, sort=True)
#-- sync in series if PROCESSES = 0
if (PROCESSES == 0):
#-- get each tarfile from the GMAO extranet server
for colname in files:
#-- sync Spire-GNSS files with GMAO server
REMOTE = [*PATH,'grazing_angle_L2',colname]
kwds = dict(DIRECTORY=DIRECTORY, TIMEOUT=TIMEOUT,
MODE=MODE)
output = multiprocess_sync(REMOTE, **kwds)
#-- print the output string
logging.info(output)
else:
#-- set multiprocessing start method
ctx = mp.get_context("fork")
#-- sync in parallel with multiprocessing Pool
pool = ctx.Pool(processes=PROCESSES)
#-- sync each Spire-GNSS data file
out = []
#-- get each tarfile from the GMAO extranet server
for colname in files:
#-- sync Spire-GNSS files with GMAO server
REMOTE = [*PATH,'grazing_angle_L2',colname]
kwds = dict(DIRECTORY=DIRECTORY, TIMEOUT=TIMEOUT,
MODE=MODE)
out.append(pool.apply_async(multiprocess_sync,
args=(REMOTE,),kwds=kwds))
#-- start multiprocessing jobs
#-- close the pool
#-- prevents more tasks from being submitted to the pool
pool.close()
#-- exit the completed processes
pool.join()
#-- print the output string
for output in out:
temp = output.get()
logging.info(temp)
#-- close log file and set permissions level to MODE
if LOG:
os.chmod(os.path.join(DIRECTORY,LOGFILE), MODE)
#-- PURPOSE: wrapper for running the sync program in multiprocessing mode
def multiprocess_sync(*args, **kwds):
try:
output = http_pull_file(*args, **kwds)
except Exception as e:
#-- if there has been an error exception
#-- print the type, value, and stack trace of the
#-- current exception being handled
logging.critical('process id {0:d} failed'.format(os.getpid()))
logging.error(traceback.format_exc())
else:
return output
#-- PURPOSE: try extracting Spire files from a tar file
def http_pull_file(REMOTE, DIRECTORY=None, TIMEOUT=None, MODE=None):
#-- regular expression pattern for extracting data from files
rx = re.compile(r'(spire_gnss-r)_(L\d+)_(.*?)_(v\d+\.\d+)_'
r'(\d{4})-(\d{2})-(\d{2})T(\d{2})-(\d{2})-(\d{2})_(.*?)\.nc$')
#-- get BytesIO object containing data from tar file
buffer = spire_toolkit.utilities.from_http(REMOTE,
timeout=TIMEOUT, context=None)
#-- open the monthly tar file
tar1 = tarfile.open(fileobj=buffer, mode='r')
mem1 = [f for f in tar1.getmembers() if f.name.endswith('tar')]
#-- for each file within the tarfile
output = ''
for member in mem1:
#-- print tarfile name to log
output += '\t{0}\n'.format(member.name)
#-- open the daily tarfile
fileID = io.BytesIO(tar1.extractfile(member).read())
tar2 = tarfile.open(fileobj=fileID, mode='r')
mem2 = [f for f in tar2.getmembers() if f.name.endswith('nc')]
#-- extract netCDF4 files to local directory
for nc in mem2:
#-- extract parameters from netCDF4 file
MS,LV,PRD,VERS,YY,MM,DD,HH,MN,SS,AUX = rx.findall(nc.name).pop()
SUBDIRECTORY = '{0}.{1}.{2}'.format(YY,MM,DD)
#-- create output local directory if non-existent
if not os.access(os.path.join(DIRECTORY,SUBDIRECTORY), os.F_OK):
os.makedirs(os.path.join(DIRECTORY,SUBDIRECTORY), mode=MODE)
#-- extract file
tar2.extract(nc, path=os.path.join(DIRECTORY,SUBDIRECTORY))
local_file = os.path.join(DIRECTORY,SUBDIRECTORY,nc.name)
output += '\t\t{0} -->\n\t\t\t{1}\n'.format(nc.name,local_file)
#-- use original modification time from tar file
os.utime(local_file,(os.stat(local_file).st_atime,nc.mtime))
#-- change the permissions mode
os.chmod(local_file,MODE)
#-- return the output string
return output
#-- Main program that calls gmao_spire_gnss_sync()
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Syncs Spire GNSS grazing angle altimetry data from
the NASA Global Modeling and Assimilation Office (GMAO)
"""
)
#-- command line parameters
parser.add_argument('--user','-U',
type=str, default=os.environ.get('GMAO_USERNAME'),
help='Username for NASA GMAO Extranet Login')
parser.add_argument('--password','-W',
type=str, default=os.environ.get('GMAO_PASSWORD'),
help='Password for NASA GMAO Extranet Login')
parser.add_argument('--netrc','-N',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.path.join(os.path.expanduser('~'),'.netrc'),
help='Path to .netrc file for authentication')
#-- working data directory
parser.add_argument('--directory','-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Working data directory')
#-- Spire data products to sync
products = ['grzAlt','grzIce']
parser.add_argument('--product','-p',
type=str, nargs='+', choices=products, default=products,
help='Spire data products to sync')
#-- years of Spire data to sync
now = time.gmtime()
parser.add_argument('--year','-Y',
type=int, nargs='+', default=range(2020,now.tm_year+1),
help='Years of Spire data to sync')
#-- run sync in series if processes is 0
parser.add_argument('--np','-P',
metavar='PROCESSES', type=int, default=0,
help='Number of processes to use in file downloads')
#-- connection timeout
parser.add_argument('--timeout','-t',
type=int, default=360,
help='Timeout in seconds for blocking operations')
#-- Output log file in form
#-- GMAO_Spire_GNSS_sync_2002-04-01.log
parser.add_argument('--log','-l',
default=False, action='store_true',
help='Output log file')
#-- permissions mode of the directories and files synced (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='Permission mode of directories and files synced')
args,_ = parser.parse_known_args()
#-- NASA GMAO Extranet hostname
URS = 'gmao.gsfc.nasa.gov'
#-- get NASA Earthdata credentials
try:
args.user,_,args.password = netrc.netrc(args.netrc).authenticators(URS)
except:
#-- check that NASA Earthdata credentials were entered
if not args.user:
prompt = 'Username for {0}: '.format(URS)
args.user = builtins.input(prompt)
#-- enter password securely from command-line
if not args.password:
prompt = 'Password for {0}@{1}: '.format(args.user,URS)
args.password = getpass.getpass(prompt)
#-- build an urllib opener for NASA GMAO Extranet
#-- Add the username and password for NASA GMAO Extranet Login system
opener = spire_toolkit.utilities.build_opener(args.user,args.password,
password_manager=False,
authorization_header=True,
get_ca_certs=False,
redirect=False)
#-- post credentials to login to retrieve cookies
LOGIN = posixpath.join('https://gmao.gsfc.nasa.gov','extranet','index.php')
data = spire_toolkit.utilities.urlencode({'un':args.user, 'pw':args.password})
request = spire_toolkit.utilities.urllib2.Request(LOGIN)
response = spire_toolkit.utilities.urllib2.urlopen(request,
data=data.encode('utf-8'))
#-- verify url and cookies
assert response.url
assert opener.handlers[7].cookiejar
#-- check internet connection before attempting to run program
if spire_toolkit.utilities.check_connection(LOGIN):
gmao_spire_gnss_sync(args.directory,
PRODUCT=args.product,
YEAR=args.year,
PROCESSES=args.np,
TIMEOUT=args.timeout,
LOG=args.log,
MODE=args.mode)
#-- run main program
if __name__ == '__main__':
main()
|
import numpy as np
from pathlib import Path
import json
from .. import DiscreteModel
class QLearning(DiscreteModel):
"""
Models the Q-Function over a StateActionSpace as an array of values updated with Q-Learning. This is the vanilla
Q-Learning.
"""
ARRAY_SAVE_NAME = 'q_values_map.npy'
SAVE_NAME = 'model.json'
def __init__(self, env, step_size, discount_rate):
"""
Initializer
:param env: the environment
:param step_size: the step size in the Q-Learning update
:param discount_rate: the discount rate
"""
super(QLearning, self).__init__(env)
self.step_size = step_size
self.discount_rate = discount_rate
self.q_values = np.zeros(
self.env.stateaction_space.index_shape,
dtype=np.float
)
def update(self, state, action, new_state, reward, failed):
"""
Updates the value of (state, action) with the Q-Learning update
:param state: the previous state
:param action: the action taken
:param new_state: the new state
:param reward: the reward incurred
:param failed: whether the agent has failed
"""
sa_index = (
self.env.stateaction_space.state_space.get_index_of(state),
self.env.stateaction_space.action_space.get_index_of(action)
)
self.q_values[sa_index] = self[sa_index] + self.step_size * (
reward + self.discount_rate * np.max(self[new_state, :])
- self.q_values[sa_index]
)
def _query(self, index):
return self.q_values[index]
@property
def state_dict(self):
return {'step_size': self.step_size,
'discount_rate': self.discount_rate}
def save(self, save_folder):
"""
Saves the model in the given folder. The array is saved in the file QLearning.ARRAY_SAVE_NAME, and the model
itself in QLearning.SAVE_NAME
:param save_folder: str or Path: the folder where to save
"""
save_path = Path(save_folder)
model_path = save_path / QLearning.SAVE_NAME
array_path = save_path / QLearning.ARRAY_SAVE_NAME
with model_path.open('w') as f:
json.dump(self.state_dict, f, indent=4)
np.save(array_path, self.q_values)
@staticmethod
def load(load_folder, env):
"""
Loads the model and the array saved by the QLearning.save method. Note that this method may fail if the save was
made with an older version of the code.
:param load_folder: the folder where the files are
:return: QLearning: the model
"""
load_path = Path(load_folder)
model_path = load_path / QLearning.SAVE_NAME
array_path = load_path / QLearning.ARRAY_SAVE_NAME
with model_path.open('r') as f:
state_dict = json.load(f)
q_values = np.load(array_path)
model = QLearning(env, **state_dict)
model.q_values = q_values
return model |
#!/usr/bin/env python3
##################################################################################
# Pyprojectx #
# https://github.com/houbie/pyprojectx #
# #
# Copyright (c) 2021 Ivo Houbrechts #
# #
# Licensed under the MIT license #
##################################################################################
import argparse
import os
import subprocess
import sys
from pathlib import Path
from venv import EnvBuilder
VERSION = "__version__"
PYPROJECTX_INSTALL_DIR_ENV_VAR = "PYPROJECTX_INSTALL_DIR"
PYPROJECTX_PACKAGE_ENV_VAR = "PYPROJECTX_PACKAGE"
PYPROJECT_TOML = "pyproject.toml"
DEFAULT_INSTALL_DIR = ".pyprojectx"
CYAN = "\033[96m"
BLUE = "\033[94m"
RED = "\033[91m"
RESET = "\033[0m"
if sys.platform.startswith("win"):
os.system("color")
def run(args):
try:
options = get_options(args)
pyprojectx_script = ensure_pyprojectx(options)
explicit_options = []
if not options.toml:
explicit_options += ["--toml", str(options.toml_path)]
if not options.install_dir:
explicit_options += ["--install-dir", str(options.install_path)]
subprocess.run([str(pyprojectx_script), *explicit_options, *args], check=True)
except subprocess.CalledProcessError as e:
raise SystemExit(e.returncode) from e
def get_options(args):
options = arg_parser().parse_args(args)
options.install_path = Path(
options.install_dir
or os.environ.get(PYPROJECTX_INSTALL_DIR_ENV_VAR, Path(__file__).with_name(DEFAULT_INSTALL_DIR))
)
options.toml_path = Path(options.toml) if options.toml else Path(__file__).with_name(PYPROJECT_TOML)
if os.environ.get(PYPROJECTX_PACKAGE_ENV_VAR):
options.version = "development"
options.pyprojectx_package = os.environ.get(PYPROJECTX_PACKAGE_ENV_VAR)
else:
options.version = VERSION
options.pyprojectx_package = f"pyprojectx~={VERSION}"
options.verbosity = 0 if options.quiet or not options.verbosity else options.verbosity
return options
def arg_parser():
parser = argparse.ArgumentParser(
description="Execute commands or aliases defined in the [tool.pyprojectx] section of pyproject.toml. "
"Use the -i or --info option to see available tools and aliases.",
)
parser.add_argument("--version", action="version", version=VERSION)
parser.add_argument(
"--toml",
"-t",
action="store",
help="The toml config file. Defaults to 'pyproject.toml' in the same directory as the pw script.",
)
parser.add_argument(
"--install-dir",
action="store",
help=f"The directory where all tools (including pyprojectx) are installed; defaults to the"
f"{PYPROJECTX_INSTALL_DIR_ENV_VAR} environment value if set, else '.pyprojectx'"
f" in the same directory as the invoked pw script",
)
parser.add_argument(
"--force-install",
"-f",
action="store_true",
help="Force clean installation of the virtual environment used to run cmd, if any",
)
parser.add_argument(
"--verbose",
"-v",
action="count",
dest="verbosity",
help="Give more output. This option is additive and can be used up to 2 times.",
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help="Suppress output",
)
parser.add_argument(
"--info",
"-i",
action="store_true",
help="Show the configuration details of a command in stead of running it. "
"If the command is not configured as tool or alias, a list with all available tools and aliases is shown.",
)
parser.add_argument(
"--init",
action="store_true",
help="Create or prepare a pyproject.toml and pyprojectx wrapper scripts. "
"Run with '--init help' to show available init options",
)
parser.add_argument(
"command", nargs=argparse.REMAINDER, help="The command/alias with optional arguments to execute."
)
return parser
def ensure_pyprojectx(options):
env_builder = EnvBuilder(with_pip=True)
venv_dir = options.install_path.joinpath(
"pyprojectx", f"{options.version}-py{sys.version_info.major}.{sys.version_info.minor}"
)
env_context = env_builder.ensure_directories(venv_dir)
pyprojectx_script = Path(env_context.bin_path, "pyprojectx")
pyprojectx_exe = Path(env_context.bin_path, "pyprojectx.exe")
pip_cmd = [env_context.env_exe, "-m", "pip", "install"]
if options.quiet:
out = subprocess.DEVNULL
pip_cmd.append("--quiet")
else:
out = sys.stderr
if not pyprojectx_script.is_file() and not pyprojectx_exe.is_file():
if not options.quiet:
print(f"{CYAN}creating pyprojectx venv in {BLUE}{venv_dir}{RESET}", file=sys.stderr)
env_builder.create(venv_dir)
subprocess.run(
pip_cmd + ["--upgrade", "pip"],
stdout=out,
check=True,
)
if not options.quiet:
print(
f"{CYAN}installing pyprojectx {BLUE}{options.version}: {options.pyprojectx_package} {RESET}",
file=sys.stderr,
)
subprocess.run(pip_cmd + [options.pyprojectx_package], stdout=out, check=True)
return pyprojectx_script
if __name__ == "__main__":
run(sys.argv[1:])
|
import requests
import json
class ChronicleEntry(object):
def __init__(self, data_dict, headers, cookies, api_url):
self.headers = headers
self.cookies = cookies
self.API_URL = api_url
self.episode = data_dict.get('episode', None)
self.anime_id = data_dict.get('id', None)
self.anime_title = data_dict.get('anime_title', None)
self.ep_title = data_dict.get('ep_title', None)
self.chronicle_id = data_dict.get('chronicle_id', None)
try:
self.date = datetime.utcfromtimestamp(data_dict['date'])
except:
self.date = None
def __post(self, data):
with requests.post(self.API_URL, headers=self.headers, json=data, cookies=self.cookies) as url:
return json.loads(url.text)
def __repr__(self):
return f'<ChronicleEntry: {self.chronicle_id}>'
def remove_chronicle_entry(self):
data = {
"controller": "Profile",
"action": "removeChronicleEntry",
"chronicle_id": self.chronicle_id,
}
return self.__post(data)
|
import copy
from bntransformer.utils import entity_map
from transformers import (
AutoModelForQuestionAnswering,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
pipeline,
)
class BanglaQA:
def __init__(self, model_path=None):
if not model_path:
model_path = "sagorsarker/mbert-bengali-tydiqa-qa"
self.model = AutoModelForQuestionAnswering.from_pretrained(model_path)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.nlp = pipeline('question-answering', model=self.model, tokenizer=self.tokenizer)
def find_answer(self, context, question):
qa_input = {
'question': question,
'context': context
}
result = self.nlp(qa_input)
return result
class BanglaNER:
def __init__(self, model_path=None):
if not model_path:
model_path = "neuropark/sahajBERT-NER"
self.model = AutoModelForTokenClassification.from_pretrained(model_path)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.nlp = pipeline("ner", model=self.model, tokenizer=self.tokenizer, grouped_entities=True)
def get_updated_ner_results(self, ner_results, entity_maps):
updated_ner_results = []
for entity in ner_results:
entity_group = int(entity['entity_group'])
word = entity['word']
start = entity['start']
end = entity['end']
score = entity['score']
if entity_group in entity_maps:
label = entity_maps[entity_group]
updated_ner_result = {
"entity": label,
"score": score,
"word": word,
"start": start,
"end": end
}
updated_ner_results.append(updated_ner_result)
return updated_ner_results
def ner_tag(self, sentence):
ner_results = self.nlp(sentence)
try:
# for "neuropark/sahajBERT-NER" only
entity_maps = copy.deepcopy(entity_map)
updated_ner_results = self.get_updated_ner_results(
ner_results,
entity_maps
)
return updated_ner_results
except:
return ner_results
class BanglaMaskGeneration:
def __init__(self, model_path=None):
if not model_path:
model_path = "sagorsarker/bangla-bert-base"
self.unmasker = pipeline('fill-mask', model=model_path)
def generate_mask(self, sentence):
try:
results = self.unmasker(sentence)
return results
except Exception as e:
print(e)
return sentence
class BanglaTranslation:
def __init__(self, model_path=None):
if not model_path:
model_path = "Helsinki-NLP/opus-mt-bn-en"
self.model = AutoModelWithLMHead.from_pretrained(model_path)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
def bn2en(self, inputs):
inputs = self.tokenizer.encode(inputs, return_tensors="pt")
outputs = self.model.generate(inputs, max_length=40, num_beams=4, early_stopping=True)
final_outputs = self.tokenizer.decode(outputs[0])
if '<pad>' in final_outputs:
final_outputs = final_outputs.replace('<pad>', '')
final_outputs = final_outputs.strip()
return final_outputs
class BanglaTextGeneration:
def __init__(self, model_path=None, max_length=50, do_sample=False):
if not model_path:
model_path = "flax-community/gpt2-bengali"
self.text_generator = pipeline('text-generation',model=model_path, tokenizer=model_path)
self.max_length = max_length
self.do_sample = do_sample
def generate_text(self, input_text):
try:
results = self.text_generator(input_text, max_length=self.max_length, do_sample=self.do_sample)
return results
except Exception as e:
print(e)
return None |
#Write your code below this line 👇
from math import ceil
# Two ways of rounding up a number:
# import math
# print(int(math.ceil(4.2)))
# Second way:
# int(21 / 5) + (21 % 5 > 0)
# The first part becomes 4 and the second part evaluates to "True" if there is a remainder, which in addition True = 1; False = 0.
def paint_calc(height, width, cover):
number_of_cans = ceil((height * width) / cover)
print(f"You'll need {number_of_cans} cans of paint.")
#Write your code above this line 👆
# Define a function called paint_calc() so that the code below works.
# 🚨 Don't change the code below 👇
test_h = int(input("Height of wall: "))
test_w = int(input("Width of wall: "))
coverage = 5
paint_calc(height=test_h, width=test_w, cover=coverage)
''' SOLUTION
import math
def paint_calc(height, width, cover):
num_cans = (height * width) / cover
round_up_cans = math.ceil(num_cans)
print(f"You'll need {round_up_cans} cans of paint.")
'''
|
"""
BlobFactory.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Fri Dec 11 14:24:53 PST 2015
Description:
"""
import os
import re
import glob
import numpy as np
from inspect import ismethod
from types import FunctionType
from scipy.interpolate import RectBivariateSpline, interp1d
from ..util.Pickling import read_pickle_file, write_pickle_file
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
try:
import h5py
except ImportError:
pass
def get_k(s):
m = re.search(r"\[(\d+(\.\d*)?)\]", s)
return int(m.group(1))
def parse_attribute(blob_name, obj_base):
"""
Find the attribute nested somewhere in an object that we need to compute
the value of blob `blob_name`.
..note:: This is the only place I ever use eval (I think). It's because
using __getattribute__ conflicts with the __getattr__ method used
in analysis.Global21cm.
Parameters
----------
blob_name : str
Full name of blob (might be nested)
obj_base : instance
Usually just an instance of BlobFactory, which is inherited by
simulation classes, so think of this as an instance of an
ares.simulation class.
"""
# Check for decimals
decimals = []
for i in range(1, len(blob_name) - 1):
if blob_name[i-1].isdigit() and blob_name[i] == '.' \
and blob_name[i+1].isdigit():
decimals.append(i)
marker = 'x&x&'
s = ''
for i, char in enumerate(blob_name):
if i in decimals:
s += marker
else:
s += blob_name[i]
attr_split = []
for element in s.split('.'):
attr_split.append(element.replace(marker, '.'))
if len(attr_split) == 1:
s = attr_split[0]
return eval('obj_base.{!s}'.format(s))
# Nested attribute
blob_attr = None
obj_list = [obj_base]
for i in range(len(attr_split)):
# One particular chunk of the attribute name
s = attr_split[i]
new_obj = eval('obj_base.{!s}'.format(s))
obj_list.append(new_obj)
obj_base = obj_list[-1]
# Need to stop once we see parentheses
#if blob is None:
# blob = new_obj
return new_obj
class BlobFactory(object):
"""
This class must be inherited by another class, which need only have the
``pf`` attribute.
The three most (only) important parameters are:
blob_names
blob_ivars
blob_funcs
"""
#def __del__(self):
# print("Killing blobs! Processor={}".format(rank))
def _parse_blobs(self):
hdf5_situation = False
try:
names = self.pf['blob_names']
except KeyError:
names = None
except TypeError:
hdf5_situation = True
f = h5py.File('{!s}.hdf5'.format(self.prefix), 'r')
names = list(f['blobs'].keys())
f.close()
if names is None:
self._blob_names = self._blob_ivars = None
self._blob_dims = self._blob_nd = None
self._blob_funcs = None
self._blob_kwargs = None
return None
else:
# Otherwise, figure out how many different kinds (shapes) of
# blobs we have
assert type(names) in [list, tuple], \
"Must supply blob_names as list or tuple!"
if hdf5_situation:
f = h5py.File('{!s}.hdf5'.format(self.prefix), 'r')
_blob_ivars = []
_blob_ivarn = []
_blob_names = names
for name in names:
ivar = f['blobs'][name].attrs.get('ivar')
if ivar is None:
_blob_ivars.append(ivar)
else:
_blob_ivarn.append('unknown')
_blob_ivars.append(ivar.squeeze())
f.close()
# Re-organize...maybe eventually
self._blob_ivars = _blob_ivars
self._blob_ivarn = _blob_ivarn
self._blob_names = _blob_names
elif 'blob_ivars' in self.pf:
self._blob_names = names
if self.pf['blob_ivars'] is None:
self._blob_ivars = [None] * len(names)
else:
self._blob_ivarn = []
self._blob_ivars = []
raw = self.pf['blob_ivars']
# k corresponds to ivar group
for k, element in enumerate(raw):
if element is None:
self._blob_ivarn.append(None)
self._blob_ivars.append(None)
continue
# Must make list because could be multi-dimensional
# blob, i.e., just appending to blob_ivars won't
# cut it.
self._blob_ivarn.append([])
self._blob_ivars.append([])
for l, pair in enumerate(element):
assert type(pair) in [list, tuple], \
"Must supply blob_ivars as (variable, values)!"
self._blob_ivarn[k].append(pair[0])
self._blob_ivars[k].append(pair[1])
else:
self._blob_names = names
self._blob_ivars = [None] * len(names)
self._blob_nd = []
self._blob_dims = []
self._blob_funcs = []
self._blob_kwargs = []
for i, element in enumerate(self._blob_names):
# Scalar blobs handled first
if self._blob_ivars[i] is None:
self._blob_nd.append(0)
self._blob_dims.append(0)
if hdf5_situation:
continue
if self.pf['blob_funcs'] is None:
self._blob_funcs.append([None] * len(element))
self._blob_kwargs.append([None] * len(element))
elif self.pf['blob_funcs'][i] is None:
self._blob_funcs.append([None] * len(element))
self._blob_kwargs.append([None] * len(element))
else:
self._blob_funcs.append(self.pf['blob_funcs'][i])
# For backward compatibility
if 'blob_kwargs' in self.pf:
self._blob_kwargs.append(self.pf['blob_kwargs'][i])
else:
self._blob_kwargs.append([None] * len(element))
continue
# Everything else
else:
# Be careful with 1-D
if type(self._blob_ivars[i]) is np.ndarray:
lenarr = len(self._blob_ivars[i].shape)
assert lenarr == 1
self._blob_nd.append(1)
dims = len(self._blob_ivars[i]),
self._blob_dims.append(dims)
else:
self._blob_nd.append(len(self._blob_ivars[i]))
dims = tuple([len(element2) \
for element2 in self._blob_ivars[i]])
self._blob_dims.append(dims)
# Handle functions
try:
no_blob_funcs = self.pf['blob_funcs'] is None or \
self.pf['blob_funcs'][i] is None
except (KeyError, TypeError, IndexError):
no_blob_funcs = True
if no_blob_funcs:
self._blob_funcs.append([None] * len(element))
self._blob_kwargs.append([None] * len(element))
continue
assert len(element) == len(self.pf['blob_funcs'][i]), \
"blob_names must have same length as blob_funcs!"
self._blob_funcs.append(self.pf['blob_funcs'][i])
if 'blob_kwargs' in self.pf:
self._blob_kwargs.append(self.pf['blob_kwargs'][i])
else:
self._blob_kwargs.append(None)
self._blob_nd = tuple(self._blob_nd)
self._blob_dims = tuple(self._blob_dims)
self._blob_names = tuple(self._blob_names)
self._blob_ivars = tuple(self._blob_ivars)
self._blob_ivarn = tuple(self._blob_ivarn)
self._blob_funcs = tuple(self._blob_funcs)
self._blob_kwargs = tuple(self._blob_kwargs)
@property
def blob_nbytes(self):
"""
Estimate for the size of each blob (per walker per step).
"""
if not hasattr(self, '_blob_nbytes'):
nvalues = 0.
for i in range(self.blob_groups):
if self.blob_nd[i] == 0:
nvalues += len(self.blob_names[i])
else:
nvalues += len(self.blob_names[i]) \
* np.product(self.blob_dims[i])
self._blob_nbytes = nvalues * 8.
return self._blob_nbytes
@property
def all_blob_names(self):
if not hasattr(self, '_all_blob_names'):
if not self.blob_names:
self._all_blob_names = []
return []
nested = any(isinstance(i, list) for i in self.blob_names)
if nested:
self._all_blob_names = []
for i in range(self.blob_groups):
self._all_blob_names.extend(self.blob_names[i])
else:
self._all_blob_names = self._blob_names
if len(set(self._all_blob_names)) != len(self._all_blob_names):
raise ValueError('Blobs must be unique!')
return self._all_blob_names
@property
def blob_groups(self):
if not hasattr(self, '_blob_groups'):
nested = any(isinstance(i, list) for i in self.blob_names)
if nested:
if self.blob_nd is not None:
self._blob_groups = len(self.blob_nd)
else:
self._blob_groups = 0
else:
self._blob_groups = None
return self._blob_groups
@property
def blob_nd(self):
if not hasattr(self, '_blob_nd'):
self._parse_blobs()
return self._blob_nd
@property
def blob_dims(self):
if not hasattr(self, '_blob_dims'):
self._parse_blobs()
return self._blob_dims
@property
def blob_names(self):
if not hasattr(self, '_blob_names'):
self._parse_blobs()
return self._blob_names
@property
def blob_ivars(self):
if not hasattr(self, '_blob_ivars'):
self._parse_blobs()
return self._blob_ivars
@property
def blob_ivarn(self):
if not hasattr(self, '_blob_ivarn'):
self._parse_blobs()
return self._blob_ivarn
@property
def blob_funcs(self):
if not hasattr(self, '_blob_funcs'):
self._parse_blobs()
return self._blob_funcs
@property
def blob_kwargs(self):
if not hasattr(self, '_blob_kwargs'):
self._parse_blobs()
return self._blob_kwargs
@property
def blobs(self):
if not hasattr(self, '_blobs'):
if not self.blob_names:
self._blobs = []
else:
try:
self._generate_blobs()
except AttributeError as e:
if hasattr(self, 'prefix'):
self._blobs =\
read_pickle_file('{!s}.blobs.pkl'.format(self.prefix),\
nloads=1, verbose=False)
else:
raise AttributeError(e)
return self._blobs
def get_ivars(self, name):
if self.blob_groups is None:
return self.blob_ivars[self.blob_names.index(name)]
found_blob = False
for i in range(self.blob_groups):
for j, blob in enumerate(self.blob_names[i]):
if blob == name:
found_blob = True
break
if blob == name:
break
if not found_blob:
print("WARNING: ivars for blob {} not found.".format(name))
if name in self.derived_blob_names:
print("CORRECTION: found {} in derived blobs!".format(name))
return self.derived_blob_ivars[name]
return None
return self.blob_ivars[i]
def get_blob(self, name, ivar=None, tol=1e-2):
"""
This is meant to recover a blob from a single simulation, i.e.,
NOT a whole slew of them from an MCMC.
"""
found = True
#for i in range(self.blob_groups):
# for j, blob in enumerate(self.blob_names[i]):
# if blob == name:
# found = True
# break
#
# if blob == name:
# break
try:
i, j, dims, shape = self.blob_info(name)
except KeyError:
found = False
if not found:
print("WARNING: blob={} not found. This should NOT happen!".format(name))
return np.inf
if self.blob_nd[i] == 0:
return float(self.blobs[i][j])
elif self.blob_nd[i] == 1:
if ivar is None:
try:
# When would this NOT be the case?
return self.blobs[i][j]
except:
return self.blobs[i]
elif len(self.blob_ivars[i]) == 1:
iv = self.blob_ivars[i][1]
else:
iv = self.blob_ivars[i]
# This is subject to rounding errors
if ivar in iv:
k = list(iv).index(ivar)
elif np.any(np.abs(iv - ivar) < tol):
k = np.argmin(np.abs(iv - ivar))
else:
raise IndexError("ivar={0:.2g} not in listed ivars!".format(\
ivar))
return float(self.blobs[i][j][k])
elif self.blob_nd[i] == 2:
if ivar is None:
return self.blobs[i][j]
assert len(ivar) == 2
# also assert that both values are in self.blob_ivars!
# Actually, we don't have to abide by that. As long as a function
# is provided we can evaluate the blob anywhere (with interp)
kl = []
for n in range(2):
#if ivar[n] is None:
# kl.append(slice(0,None))
# continue
#
assert ivar[n] in self.blob_ivars[i][n], \
"{} not in ivars for blob={}".format(ivar[n], name)
#val = list(self.blob_ivars[i][n]).index(ivar[n])
#
#kl.append(val)
k = list(self.blob_ivars[i][0]).index(ivar[0])
l = list(self.blob_ivars[i][1]).index(ivar[1])
#k, l = kl
#print(i,j,k,l)
return float(self.blobs[i][j][k][l])
def _generate_blobs(self):
"""
Create a list of blobs, one per blob group.
..note:: This should only be run for individual simulations,
not in the analysis of MCMC data.
Returns
-------
List, where each element has shape (ivar x blobs). Each element of
this corresponds to the blobs for one blob group, which is defined by
either its dimensionality, its independent variables, or both.
For example, for 1-D blobs, self.blobs[i][j][k] would mean
i = blob group
j = index corresponding to elements of self.blob_names
k = index corresponding to elements of self.blob_ivars[i]
"""
self._blobs = []
for i, element in enumerate(self.blob_names):
this_group = []
for j, key in enumerate(element):
# 0-D blobs. Need to know name of attribute where stored!
if self.blob_nd[i] == 0:
if self.blob_funcs[i][j] is None:
# Assume blob name is the attribute
#blob = self.__getattribute__(key)
blob = parse_attribute(key, self)
else:
fname = self.blob_funcs[i][j]
# In this case, the return of parse_attribute is
# a value, not a function to be applied to ivars.
blob = parse_attribute(fname, self)
# 1-D blobs. Assume the independent variable is redshift
# unless a function is provided
elif self.blob_nd[i] == 1:
# The 0 index is because ivars are kept in a list no
# matter what
x = np.array(self.blob_ivars[i][0]).squeeze()
if (self.blob_funcs[i][j] is None) and (key in self.history):
blob = np.interp(x, self.history['z'][-1::-1],
self.history[key][-1::-1])
elif self.blob_funcs[i][j] is None:
raise KeyError('Blob {!s} not in history!'.format(key))
else:
fname = self.blob_funcs[i][j]
# Name of independent variable
xn = self.blob_ivarn[i][0]
if isinstance(fname, basestring):
func = parse_attribute(fname, self)
else:
print('hey {!s}'.format(fname))
raise ValueError('pretty sure this is broken!')
# fname is a slice, like ('igm_k_heat', 0)
# to retrieve heating rate from H ionizations
_xx = self.history['z'][-1::-1]
_yy = self.history[fname[0]][-1::-1,fname[1]]
func = (_xx, _yy)
if ismethod(func) or isinstance(func, interp1d) or \
(type(func) == FunctionType) \
or hasattr(func, '__call__'):
try:
if self.blob_kwargs[i] is not None:
kw = self.blob_kwargs[i][j]
else:
kw = {}
def func_kw(xx):
_kw = kw.copy()
_kw.update({xn:xx})
return func(**_kw)
blob = np.array([func_kw(xx) for xx in x])
except TypeError:
blob = np.array(list(map(func, x)))
else:
blob = np.interp(x, func[0], func[1])
else:
# Must have blob_funcs for this case
fname = self.blob_funcs[i][j]
tmp_f = parse_attribute(fname, self)
xarr, yarr = list(map(np.array, self.blob_ivars[i]))
if (type(tmp_f) is FunctionType) or ismethod(tmp_f) \
or hasattr(func, '__call__'):
func = tmp_f
elif type(tmp_f) is tuple:
z, E, flux = tmp_f
func = RectBivariateSpline(z, E, flux)
else:
raise TypeError('Sorry: don\'t understand blob {!s}'.format(key))
xn, yn = self.blob_ivarn[i]
blob = []
# We're assuming that the functions are vectorized.
# Didn't used to, but it speeds things up (a lot).
for x in xarr:
tmp = []
if self.blob_kwargs[i] is not None:
kw = self.blob_kwargs[i][j]
else:
kw = {}
kw.update({xn:x, yn:yarr})
result = func(**kw)
# Happens when we save a blob that isn't actually
# a PQ (i.e., just a constant). Need to kludge so it
# doesn't crash.
if type(result) in [int, float, np.float64]:
result = result * np.ones_like(yarr)
tmp.extend(result)
blob.append(tmp)
this_group.append(np.array(blob))
self._blobs.append(np.array(this_group))
@property
def blob_data(self):
if not hasattr(self, '_blob_data'):
self._blob_data = {}
return self._blob_data
@blob_data.setter
def blob_data(self, value):
self._blob_data.update(value)
def get_blob_from_disk(self, name):
return self.__getitem__(name)
def __getitem__(self, name):
if name in self.blob_data:
return self.blob_data[name]
return self._get_item(name)
def blob_info(self, name):
"""
Returns
-------
index of blob group, index of element within group, dimensionality,
and exact dimensions of blob.
"""
if hasattr(self, 'derived_blob_names'):
# This is bad practice since this is an attribute of ModelSet,
# i.e., the child class (sometimes)
if name in self.derived_blob_names:
iv = self.derived_blob_ivars[name]
return None, None, len(iv), tuple([len(element) for element in iv])
nested = any(isinstance(i, list) for i in self.blob_names)
if nested:
found = False
for i, group in enumerate(self.blob_names):
for j, element in enumerate(group):
if element == name:
found = True
break
if element == name:
break
if not found:
raise KeyError('Blob {!s} not found.'.format(name))
return i, j, self.blob_nd[i], self.blob_dims[i]
else:
i = self.blob_names.index(name)
return None, None, self.blob_nd[i], self.blob_dims[i]
def _get_item(self, name):
i, j, nd, dims = self.blob_info(name)
fn = "{0!s}.blob_{1}d.{2!s}.pkl".format(self.prefix, nd, name)
# Might have data split up among processors or checkpoints
by_proc = False
by_dd = False
if not os.path.exists(fn):
# First, look for processor-by-processor outputs
fn = "{0!s}.000.blob_{1}d.{2!s}.pkl".format(self.prefix, nd, name)
if os.path.exists(fn):
by_proc = True
by_dd = False
# Then, those where each checkpoint has its own file
else:
by_proc = False
by_dd = True
search_for = "{0!s}.dd????.blob_{1}d.{2!s}.pkl".format(\
self.prefix, nd, name)
_ddf = glob.glob(search_for)
if self.include_checkpoints is None:
ddf = _ddf
else:
ddf = []
for dd in self.include_checkpoints:
ddid = str(dd).zfill(4)
tmp = "{0!s}.dd{1!s}.blob_{2}d.{3!s}.pkl".format(\
self.prefix, ddid, nd, name)
ddf.append(tmp)
# Need to put in order if we want to match up with
# chain etc.
ddf = np.sort(ddf)
# Start with the first
fn = ddf[0]
fid = 0
to_return = []
while True:
if not os.path.exists(fn):
break
all_data = []
data_chunks = read_pickle_file(fn, nloads=None, verbose=False)
for data_chunk in data_chunks:
all_data.extend(data_chunk)
del data_chunks
print("# Loaded {}".format(fn))
# Used to have a squeeze() here for no apparent reason...
# somehow it resolved itself.
all_data = np.array(all_data, dtype=np.float64)
to_return.extend(all_data)
if not (by_proc or by_dd):
break
fid += 1
if by_proc:
fn = "{0!s}.{1!s}.blob_{2}d.{3!s}.pkl".format(self.prefix,\
str(fid).zfill(3), nd, name)
else:
if (fid >= len(ddf)):
break
fn = ddf[fid]
mask = np.logical_not(np.isfinite(to_return))
masked_data = np.ma.array(to_return, mask=mask)
# CAN BE VERY CONFUSING
#if by_proc and rank == 0:
# fn = "{0!s}.blob_{1}d.{2!s}.pkl".format(self.prefix, nd, name)
# write_pickle_file(masked_data, fn, ndumps=1, open_mode='w',\
# safe_mode=False, verbose=False)
self.blob_data = {name: masked_data}
return masked_data
|
# Copyright (C) 2018 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from aether.mocker import MockFn, DataMocker, Generic, MockingManager
def pprint(obj):
print(json.dumps(obj, indent=2))
def weird(args=None):
if args:
return args[::-1]
return "weird"
def show_mockfn():
# illustrate how mocker.MockFn can be passed a function and args for later
m = MockFn(sum, [1, 2])
f = MockFn(weird, ["dook", "ip"])
p = MockFn(weird, ["args", "pie"])
print(m())
print(f())
print(p())
def main():
person = "org.eha.demo.Person"
location = "org.eha.demo.GeoLocation"
manager = MockingManager()
manager.types[location].override_property(
"latitude", MockFn(Generic.geo_lat))
manager.types[location].override_property(
"longitude", MockFn(Generic.geo_lng))
for x in range(100):
# Since types are linked, we only need to generate one to spawn linked versions of others
manager.register(person)
manager.kill() # we explicitly clean up our threads
if __name__ == "__main__":
main()
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Calculates confidence scores for all suspected CLs so far and save the scores
to data store.
"""
import argparse
from collections import defaultdict
import datetime
import json
import os
import sys
_FINDIT_DIR = os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir)
sys.path.insert(1, _FINDIT_DIR)
from local_libs import remote_api
from common.waterfall import failure_type
from lib import time_util
from model import analysis_approach_type
from model import suspected_cl_status
from model.suspected_cl_confidence import SuspectedCLConfidence
from model.suspected_cl_confidence import ConfidenceInformation
from model.wf_suspected_cl import WfSuspectedCL
TRIAGED_STATUS = [
suspected_cl_status.CORRECT,
suspected_cl_status.INCORRECT,
suspected_cl_status.PARTIALLY_CORRECT,
suspected_cl_status.PARTIALLY_TRIAGED
]
def _CreateConfidenceInformation(result, score=None):
correct_number = result[suspected_cl_status.CORRECT]
incorrect_number = result[suspected_cl_status.INCORRECT]
total_number = correct_number + incorrect_number
confidence = (
float(correct_number) / total_number if total_number else -1.0)
return ConfidenceInformation(
correct=correct_number, total=total_number, confidence=confidence,
score=score)
def _CalculateConfidenceLevelsForHeuristic(new_results):
updated_results = []
for score, result in new_results.iteritems():
updated_results.append(_CreateConfidenceInformation(result, score=score))
return updated_results
def _SavesNewCLConfidence(
date_start, date_end, result_heuristic, result_try_job, result_both):
new_compile_heuristic = _CalculateConfidenceLevelsForHeuristic(
result_heuristic[failure_type.COMPILE])
new_test_heuristic = _CalculateConfidenceLevelsForHeuristic(
result_heuristic[failure_type.TEST])
new_compile_try_job = _CreateConfidenceInformation(
result_try_job[failure_type.COMPILE])
new_test_try_job = _CreateConfidenceInformation(
result_try_job[failure_type.TEST])
new_compile_heuristic_try_job = _CreateConfidenceInformation(
result_both[failure_type.COMPILE])
new_test_heuristic_try_job = _CreateConfidenceInformation(
result_both[failure_type.TEST])
SuspectedCLConfidence.Get().Update(
date_start, date_end, new_compile_heuristic, new_compile_try_job,
new_compile_heuristic_try_job, new_test_heuristic, new_test_try_job,
new_test_heuristic_try_job)
return SuspectedCLConfidence.Get()
def _AddMoreConstrainsToQuery(query, failure_args, date_start, date_end):
if 'compile' in failure_args:
query = query.filter(
WfSuspectedCL.failure_type == failure_type.COMPILE)
elif 'test' in failure_args:
query = query.filter(
WfSuspectedCL.failure_type == failure_type.TEST)
if date_start:
query = query.filter(
WfSuspectedCL.updated_time >= date_start)
query = query.filter(
WfSuspectedCL.updated_time < date_end)
return query
def _GetCLDataForHeuristic(failure_args, date_start, date_end):
suspected_cls_query = WfSuspectedCL.query(remote_api.ndb.AND(
WfSuspectedCL.status.IN(TRIAGED_STATUS),
WfSuspectedCL.approaches == analysis_approach_type.HEURISTIC))
suspected_cls_query = _AddMoreConstrainsToQuery(
suspected_cls_query, failure_args, date_start, date_end)
suspected_cls = suspected_cls_query.fetch()
cl_by_top_score_dict = defaultdict(
lambda: defaultdict(lambda: defaultdict(int)))
for cl in suspected_cls:
if not cl.builds:
continue
failures = []
for build in cl.builds.values():
if (build['failures'] in failures or not build['top_score'] or
build['status'] is None):
continue
if (('compile' in failure_args and
build['failure_type'] == failure_type.TEST) or
('test' in failure_args and
build['failure_type'] == failure_type.COMPILE)):
continue
failures.append(build['failures'])
failure = build['failure_type']
top_score = build['top_score']
status = build['status']
cl_by_top_score_dict[failure][top_score][status] += 1
return cl_by_top_score_dict
def _GetCLDataForTryJob(failure_args, date_start, date_end):
suspected_cls_query = WfSuspectedCL.query(remote_api.ndb.AND(
WfSuspectedCL.status.IN(TRIAGED_STATUS),
WfSuspectedCL.approaches == analysis_approach_type.TRY_JOB))
suspected_cls_query = _AddMoreConstrainsToQuery(
suspected_cls_query, failure_args, date_start, date_end)
suspected_cls = suspected_cls_query.fetch()
try_job_cls_dict = defaultdict(lambda: defaultdict(int))
both_cls_dict = defaultdict(lambda: defaultdict(int))
for cl in suspected_cls:
if not cl.builds:
continue
failures = []
for build in cl.builds.values():
if build['failures'] in failures or build['status'] is None:
continue
if (('compile' in failure_args and
build['failure_type'] == failure_type.TEST) or
('test' in failure_args and
build['failure_type'] == failure_type.COMPILE)):
continue
failures.append(build['failures'])
try_job_cls_dict[build['failure_type']][build['status']] += 1
if analysis_approach_type.HEURISTIC in build['approaches']:
# Both heuristic and try job found this CL on this build.
both_cls_dict[build['failure_type']][build['status']] += 1
return try_job_cls_dict, both_cls_dict
def _FormatResult(result):
if not result:
return None
new_result = {}
if isinstance(result, list):
for score_result in result:
new_result[score_result.score] = score_result.ToDict()
elif isinstance(result, dict):
new_result = result
else:
new_result = result.ToDict()
return new_result
def _PrintResult(
date_start, date_end, result_heuristic, result_try_job, result_both):
print 'Start Date: ', date_start
print 'End Date: ', date_end
print '--------------------------------------------------------------------'
if result_heuristic:
print 'compile_heuristic'
print json.dumps(
_FormatResult(result_heuristic.get(failure_type.COMPILE)), indent=2)
print
print 'test_heuristic'
print json.dumps(
_FormatResult(result_heuristic.get(failure_type.TEST)), indent=2)
print
if result_try_job:
print 'compile_try_job'
print json.dumps(
_FormatResult(result_try_job.get(failure_type.COMPILE)), indent=2)
print
print 'test_try_job'
print json.dumps(
_FormatResult(result_try_job.get(failure_type.TEST)), indent=2)
print
if result_both:
print 'compile_heuristic_try_job'
print json.dumps(
_FormatResult(result_both.get(failure_type.COMPILE)), indent=2)
print
print 'test_heuristic_try_job'
print json.dumps(
_FormatResult(result_both.get(failure_type.TEST)), indent=2)
print
def _ValidDate(date_str):
try:
return datetime.datetime.strptime(date_str, '%Y-%m-%d')
except ValueError:
raise argparse.ArgumentTypeError('Type of date is invalid.')
def _GetArguments():
parser = argparse.ArgumentParser()
# Uses group to make -c|-t are exclusive from each other, because if both
# arguments are there, it means query everything.
# Same for -r|-j.
failure_group = parser.add_mutually_exclusive_group()
failure_group.add_argument('-c', action='store_true', dest='compile',
help='get confidence score for compile failures.')
failure_group.add_argument('-t', action='store_true', dest='test',
help='get confidence score for test failures.')
approach_group = parser.add_mutually_exclusive_group()
# Uses -r for heuristic failures because -h is already used for help.
approach_group.add_argument('-r', action='store_true', dest='heuristic',
help='get confidence score for heuristic failures.')
# Uses -j for try job failures because -t is already used for test failures.
approach_group.add_argument('-j', action='store_true', dest='try_job',
help='get confidence score for try job failures.')
parser.add_argument('-s', type=_ValidDate, dest='start_date',
help='The Start Date - format YYYY-MM-DD')
parser.add_argument('-e', type=_ValidDate, dest='end_date',
help='The End Date - format YYYY-MM-DD')
args_dict = vars(parser.parse_args())
useful_args = {}
for arg, value in args_dict.iteritems():
if value:
useful_args[arg] = value
return useful_args
if __name__ == '__main__':
# Set up the Remote API to use services on the live App Engine.
remote_api.EnableRemoteApi(app_id='findit-for-me')
args = _GetArguments()
default_end_date = time_util.GetUTCNow().replace(
hour=0, minute=0, second=0, microsecond=0)
end_date = args.get('end_date', default_end_date)
start_date = args.get('start_date')
if not args: # Limits start_date to roughly half years ago.
start_date = end_date - datetime.timedelta(days=183)
heuristic_result = None
try_job_result = None
both_result = None
if 'heuristic' in args: # Only calculates results for heuristic.
heuristic_result = _GetCLDataForHeuristic(args, start_date, end_date)
elif 'try_job' in args: # Only calculates results for try job.
try_job_result, both_result = _GetCLDataForTryJob(
args, start_date, end_date)
else: # A full calculation for CLs for both failure types.
heuristic_result = _GetCLDataForHeuristic(args, start_date, end_date)
try_job_result, both_result = _GetCLDataForTryJob(
args, start_date, end_date)
if not args: # Saves new confidence score for full calculation only.
cl_confidence = _SavesNewCLConfidence(
start_date, end_date, heuristic_result, try_job_result, both_result)
heuristic_result = {
failure_type.COMPILE: cl_confidence.compile_heuristic,
failure_type.TEST: cl_confidence.test_heuristic
}
try_job_result = {
failure_type.COMPILE: cl_confidence.compile_try_job,
failure_type.TEST: cl_confidence.test_try_job
}
both_result = {
failure_type.COMPILE: cl_confidence.compile_heuristic_try_job,
failure_type.TEST: cl_confidence.test_heuristic_try_job
}
_PrintResult(
start_date, end_date, heuristic_result, try_job_result, both_result)
else:
_PrintResult(
start_date, end_date, heuristic_result, try_job_result, both_result)
|
class IncludeDependencyAnalyzer(object):
def listIncludes(self, source_file):
return []
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
def talk():
pub = rospy.Publisher('message', String, queue_size=10)
rospy.init_node('talk')
rate = rospy.Rate(1)
while not rospy.is_shutdown():
freq = "8"
rospy.loginfo(freq)
pub.publish(freq)
rate.sleep()
if __name__ == '__main__':
try:
talk()
except rospy.ROSInterruptException:
pass |
"""Provides utility programs.
Notes:
The astronomy-type utilities should probably be separated out into
another file.
--schriste
"""
from __future__ import absolute_import
from scipy.constants import constants as con
__all__ = ["toggle_pylab", "degrees_to_hours", "degrees_to_arc",
"kelvin_to_keV", "keV_to_kelvin", "unique", "print_table",
"to_angstrom"]
from matplotlib import pyplot
import numpy as np
from itertools import izip, imap
def to_signed(dtype):
""" Return dtype that can hold data of passed dtype but is signed.
Raise ValueError if no such dtype exists.
Parameters
----------
dtype : np.dtype
dtype whose values the new dtype needs to be able to represent.
"""
if dtype.kind == "u":
if dtype.itemsize == 8:
raise ValueError("Cannot losslessy convert uint64 to int.")
dtype = "int%d" % (min(dtype.itemsize * 2 * 8, 64))
return np.dtype(dtype)
def toggle_pylab(fn):
""" A decorator to prevent functions from opening matplotlib windows
unexpectedly when sunpy is run in interactive shells like ipython
--pylab.
Toggles the value of matplotlib.pyplot.isinteractive() to preserve the
users' expections of pylab's behaviour in general. """
if pyplot.isinteractive():
def fn_itoggle(*args, **kwargs):
pyplot.ioff()
ret = fn(*args, **kwargs)
pyplot.ion()
return ret
return fn_itoggle
else:
return fn
def degrees_to_hours(angle):
"""Converts an angle from the degree notation to the hour, arcmin, arcsec
notation (returned as a tuple)."""
hour = int(np.floor(angle / 15))
remainder = angle / 15.0 - hour
arcminute = int(np.floor(remainder * 60))
remainder = remainder * 60 - arcminute
arcsecond = remainder * 60.0
return [hour, arcminute, arcsecond]
def degrees_to_arc(angle):
"""Converts decimal degrees to degree, arcminute,
arcsecond (returned as a tuple)."""
degree = int(np.floor(angle))
remainder = angle - degree
arcminute = int(np.floor(remainder * 60))
remainder = remainder * 60 - arcminute
arcsecond = remainder * 60.0
return [degree, arcminute, arcsecond]
wavelength = [
('Angstrom', 1e-10),
('nm', 1e-9),
('micron', 1e-6),
('micrometer', 1e-6),
('mm', 1e-3),
('cm', 1e-2),
('m', 1e-6),
]
energy = [
('eV', 1),
('keV', 1e3),
('MeV', 1e6),
]
frequency = [
('Hz', 1),
('kHz', 1e3),
('MHz', 1e6),
('GHz', 1e9),
]
units = {}
for k, v in wavelength:
units[k] = ('wavelength', v)
for k, v in energy:
units[k] = ('energy', v)
for k, v in frequency:
units[k] = ('frequency', v)
def to_angstrom(value, unit):
C = 299792458.
ANGSTROM = units['Angstrom'][1]
try:
type_, n = units[unit]
except KeyError:
raise ValueError('Cannot convert %s to Angstrom' % unit)
if type_ == 'wavelength':
x = n / ANGSTROM
return value / x
elif type_ == 'frequency':
x = 1 / ANGSTROM / n
return x * (C / value)
elif type_ == 'energy':
x = 1 / (ANGSTROM / 1e-2) / n
return x * (1 / (8065.53 * value))
else:
raise ValueError('Unable to convert %s to Angstrom' % type_)
def kelvin_to_keV(temperature):
"""Convert from temperature expressed in Kelvin to a
temperature expressed in keV"""
return temperature / (con.e / con.k * 1000.0)
def keV_to_kelvin(temperature):
"""Convert from temperature expressed in keV to a temperature
expressed in Kelvin"""
return temperature * (con.e / con.k * 1000.0)
def unique(itr, key=None):
items = set()
if key is None:
for elem in itr:
if elem not in items:
yield elem
items.add(elem)
else:
for elem in itr:
x = key(elem)
if x not in items:
yield elem
items.add(x)
def print_table(lst, colsep=' ', linesep='\n'):
width = [max(imap(len, col)) for col in izip(*lst)]
return linesep.join(
colsep.join(
col.ljust(n) for n, col in izip(width, row)
) for row in lst
)
|
"""Input objects for Queenbee jobs."""
from typing import Dict, List, Union
from pydantic import Field, constr
from ..artifact_source import HTTP, S3, ProjectFolder
from ...base.basemodel import BaseModel
from ...base.parser import parse_file
class JobArgument(BaseModel):
"""Job argument is an argument input for arguments which are not files or folders."""
type: constr(regex='^JobArgument$') = 'JobArgument'
name: str = Field(
...,
description='Argument name. The name must match one of the input names from '
'Job\'s DAG template.'
)
value: str = Field(
...,
description='The value of the job argument.'
)
@property
def is_artifact(self):
return False
@property
def is_parameter(self):
return not self.is_artifact
class JobPathArgument(BaseModel):
type: constr(regex='^JobPathArgument$') = 'JobPathArgument'
name: str = Field(
...,
description='Argument name. The name must match one of the input names from '
'Job\'s template which can be a function or DAG.'
)
source: Union[HTTP, S3, ProjectFolder] = Field(
...,
description='The path to source the file from.'
)
@property
def is_artifact(self):
return True
@property
def is_parameter(self):
return not self.is_artifact
JobArguments = Union[JobArgument, JobPathArgument]
def load_job_arguments(fp: str) -> List[JobArguments]:
"""Load Job arguments from a JSON or YAML file.
Args:
fp: File path to a JSON or YAML file with a list of JobArguments.
Returns:
List - A list of of JobArgument and JobPathArgument objects.
"""
data = parse_file(fp)
return load_job_arguments_from_dict(data)
def load_job_arguments_from_dict(data: List[Dict]) -> List[JobArguments]:
"""Load Job arguments from a list of dictionaries.
Args:
data: A list of job arguments as dictionaries.
Returns:
List - A list of of JobArgument and JobPathArgument objects.
"""
args = []
for d in data:
try:
arg_type = d['type']
except KeyError:
raise ValueError(
'Input argument with missing "type" key. Valid types are: '
f'JobArgument and JobPathArgument:\n{d}'
)
if arg_type == 'JobArgument':
arg = JobArgument.parse_obj(d)
elif arg_type == 'JobPathArgument':
arg = JobPathArgument.parse_obj(d)
else:
raise ValueError(
f'Invalid type for Job argument: {arg_type}.'
'Valid types are: JobArgument and JobPathArgument.'
)
args.append(arg)
return args
|
from math import sqrt
from problem import Problem
from utils import path
def load_words():
with path.load_file("p042_words.txt") as f:
return [w.strip('"') for w in f.read().split(",")]
class CodedTriangleNumbers(Problem, name="Coded triangle numbers", expected=162):
words = load_words()
@classmethod
def n_from_triangle_num(cls, n: int):
return (-1 + sqrt(8 * n + 1)) / 2
@classmethod
def is_triangle_number(cls, n: int) -> bool:
return cls.n_from_triangle_num(n) % 1 == 0
@classmethod
def is_triangle_word(cls, word):
letter_sum = sum(ord(l) - 64 for l in word)
return cls.is_triangle_number(letter_sum)
@Problem.solution()
def solution(self):
return sum(self.is_triangle_word(w) for w in self.words)
|
#
# E M I T T E R A N D T R E A T M E N T
#
import time
import numpy as np
from abc import ABC, abstractmethod
# from Treatment import Treatment
# from typing import Callable
# import logging
# from time import sleep
# from datetime import datetime
import nidaqmx as ni
import threading, queue
# Pieces for how NI names their ports
NI_PORT = "port"
NI_PORT0 = "port0"
NI_LINE = "line"
NI_SEPARATOR = "/"
MAX_EMITTERS = 6
#
# W A R N I N G
#
# This is temporary until I can get things integrated. This is just for debugging.
#
#
# T R E A T M E N T
#
class Treatment:
def __init__(self):
plan = []
return
#
# G E N E R A T E S A M P L E P L A N
#
# This is something that will not be used in production. Only for testing
#
@classmethod
def generateDummyPlan(self):
self._plan = [
[True, True, True, True, True, True],
[False, False, False, False, False, False],
[True, True, True, True, True, True],
[False, False, False, False, False, False],
[True, True, True, True, True, True],
[False, False, False, False, False, False],
[True, True, True, True, True, True],
[False, False, False, False, False, False],
[True, True, True, True, True, True],
[False, False, False, False, False, False],
[True, True, True, True, True, True],
[False, False, False, False, False, False],
[True, True, True, True, True, True],
[False, False, False, False, False, False],
[True, True, True, True, True, True]
]
# Access the plan
@property
def plan(self):
return self._plan
# End warning/test code
class Emitter(ABC):
def __init__(self, module: str):
"""
An emitter that can be controlled from a specific module.
This will currently not correspond to a specific set of pins within the module
:param module: A string name of the module -- usually something like Mod4
"""
# a list of all the treatments. I suspect this will never be greater than two
self._treatments = queue.SimpleQueue()
self._module = module
return
@property
def module(self):
return self._module
@abstractmethod
def applyTreatment(self, distanceTraveled: int) -> bool:
"""
Apply the treatment according to the distance traveled.
:param distanceTraveled: The distance traveled in cm
"""
raise NotImplementedError
@abstractmethod
def diagnostics(self) -> (bool, str):
"""
Run diagnostics on the emitter array.
"""
raise NotImplementedError
#@abstractmethod
def add(self, plan: Treatment) -> bool:
"""
Add the plan to the list of plans to process
:param plan:
:return:
"""
self._treatments.put(plan)
return True
@staticmethod
def channelName(module: str, port: int, line: int) -> str:
return module + NI_SEPARATOR + NI_PORT + str(port) + NI_SEPARATOR + NI_LINE + str(line)
#
# A Virtual emitter is one that does not correspond to any specific hardware
#
class VirtualEmitter(Emitter):
def applyTreatment(self, distanceTraveled: int) -> bool:
print("Apply treatment")
return True
def diagnostics(self) -> (bool, str):
return True, "Emitter passed diagnostics"
#
# A physical emitter expects a NI 9403
#
class PhysicalEmitter(Emitter):
#
# A P P L Y T R E A T M E N T
#
def applyTreatment(self, distanceTravelled: int) -> bool:
"""
Apply the treatment for the specified distance.
:param distanceTravelled: The distance covered
:param treatment: The treatment plan
"""
#
# For testing, if we have completed one plan, just generate a new one
#
sample = [True, False]
with ni.Task() as task:
task.do_channels.add_do_chan('Mod4/port0/line1')
task.do_channels.add_do_chan('Mod4/port0/line2')
task.do_channels.add_do_chan('Mod4/port0/line3')
task.do_channels.add_do_chan('Mod4/port0/line4')
task.do_channels.add_do_chan('Mod4/port0/line5')
task.do_channels.add_do_chan('Mod4/port0/line6')
#task.do_channels.add_do_chan('Mod4/port0')
print('1 Channel 6 Sample Write: ')
emitterValues = np.random.choice(sample, size=6)
print(task.write(emitterValues))
#print(task.write([True,True,True]))
time.sleep(10)
emitterValues = np.random.choice(sample, size=6)
print(task.write(emitterValues))
#print(task.write([False,False,False]))
time.sleep(2)
return True
def diagnostics(self) -> (bool, str):
"""
Execute emitter diagnostics.
:return: False on failure
"""
# Arrays for turning all the lines on and off
diagnosticsOn = [True] * MAX_EMITTERS
diagnosticsOff = [False] * MAX_EMITTERS
with ni.Task() as task:
for line in range(1, MAX_EMITTERS + 1):
# Form a channel descriptor line "Mod4/port0/line3"
channel = self.channelName(self.module, 0, line)
task.do_channels.add_do_chan(channel)
# Turn off all the emitters as cleanup
task.write(diagnosticsOff)
# Not much of a diagnostic here -- just turn the emitters on and off
diagnosticResult = True
diagnosticText = "Emitter diagnostics passed"
try:
for i in range(5):
task.write(diagnosticsOn)
time.sleep(1)
task.write(diagnosticsOff)
time.sleep(1)
except ni.errors.DaqError:
diagnosticResult = False
diagnosticText = "Error encountered in NI: "
return diagnosticResult, diagnosticText
def checkLineNames(line: str) -> (str, int):
"""
Check that the line designation is valid
:param line: The line in NI syntax (Mod4/port0/line3)
"""
lines = []
elements = line.split("/")
if len(elements)== 3:
#print("Module: {}".format(elements[0]))
#print("Port: {}".format(elements[1]))
#print("Line: {}".format(elements[2]))
# Not that flexible, but it the line designation is more than just "line", it must be a range
if len(elements[2]) > len(NI_LINE):
# What we expect here is that only the line can be extended with the lineN:M syntax
# Remove the line and match the range as low:high.
# I realize that NI accepts reverse order high:low, but I'm too lazy to support that
range = elements[2].replace('line','')
lineDescriptors = range.split(":")
#print("Range: {}".format(lineDescriptors))
numLines = int(lineDescriptors[1]) - int(lineDescriptors[0]) + 1
#print("Total lines: {}".format(numLines))
evaluationText = "Line designation OK"
else:
evaluationText = "Expected the line in the form ModN/portN/lineN or ModN/portN/lineN:M"
return (evaluationText, numLines)
#
# The emitter class as a utility for turning on and off various emitters
#
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser("RIO Emitter Utility")
group = parser.add_mutually_exclusive_group()
group.add_argument('-on', '--on', action="store_true", required=False, default=False, help="Turn the emitters on")
group.add_argument('-off', '--off', action="store_true", required=False, default=False, help="Turn the emitters off")
parser.add_argument('-e', '--emitter', action="store", required=True, help="Emitter in NI syntax, i.e., Mod4/port0/line3 or Mod4/port0/line0:5")
arguments = parser.parse_args()
# Check that the format of the lines is what we expect
evalutionText, lines = checkLineNames(arguments.emitter)
if lines == 0:
print(evalutionText)
sys.exit(-1)
sys.exit(0)
|
from app import db
from datetime import datetime
class Account(db.Model):
__tablename__ = 'account'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), nullable=False)
password = db.Column(db.String(500), nullable=False)
available = db.Column(db.Boolean, default=True)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime)
client_id = db.Column(db.Integer, db.ForeignKey('client.id'))
account_type_id = db.Column(db.Integer, db.ForeignKey('account_type.id'))
def __init__(self, message, client_id):
self.message = message
self.client_id = client_id
def __repr__(self):
return '<Account %r>' % self.message
|
"""Basic Functionality for working with Microsoft Fluent Web components
See https://github.com/microsoft/fluentui/tree/master/packages/web-components
"""
class FluentComponent: # pylint: disable=too-few-public-methods
"""The FluentWidget and FluentLayout should inherit from this"""
__javascript_modules__ = ["https://unpkg.com/@fluentui/web-components@1.6.2"] |
"""
This module shows how to ITERATE (i.e. loop) through a SEQUENCE
in ways OTHER than just going thru the sequence from BEGINNING to END.
It also shows how to SELECT items in a sequence, e.g.:
-- the items that are strings
-- the items that are even integers (e.g. 2, 4, 6, ...)
Note that:
-- SELECTING items that ARE even integers
is different from:
-- LOOKING only at items AT even-numbered indices.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Siwei Xu.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
# ----------------------------------------------------------------------
# DONE: 2. READ the program below and RUN it.
#
# When you have read it, asking questions as needed,
# and you feel that you understand:
# -- how to loop through a sequence
# in ways OTHER than just from BEGINNING to END,
# -- how to SELECT items in sequence
# -- the distinction between:
# -- SELECTING items that ARE even integers and
# -- LOOKING only at items AT even-numbered indices.
# then:
# change the above TO DO to DONE.
# ----------------------------------------------------------------------
def main():
""" Calls the TEST functions in this module. """
run_test_sum_string_lengths()
run_test_sum_even_integers()
run_test_sum_items_at_even_indices()
# ----------------------------------------------------------------------
# The TEST functions are further down in the file,
# so that you can focus on the following examples.
# ----------------------------------------------------------------------
def sum_string_lengths(sequence, m, n):
"""
What comes in:
-- A sequence of strings
-- Integers m and n,
where 0 <= m <= n < length of the sequence
(which ensures that you can safely use m and n as indices)
What goes out:
Returns the sum of the lengths of the strings
at indices m to n, inclusive, with the restriction
that the loop must go thru the sequence BACKWARDS.
Side effects: None.
Examples:
Suppose that sequence is:
['five', 'OK', 'songs', 'roxanne', 'the police', '', 'three']
Then:
-- sum_string_lengths(sequence, 1, 3)
returns the length of 'roxanne'
plus the length of 'songs' plus the length of 'OK',
which is 7 + 5 + 2, which is 14.
-- sum_string_lengths(sequence, 2, 6)
returns the length of 'three'
plus the length of '' plus the length of 'the police'
plus the length of 'roxanne' plus the length of 'songs,
which is 5 + 0 + 10 + 7 + 5, which is 27.
Type hints:
:type sequence: list or tuple (of strings)
:type m: int
:type n: int
"""
# ------------------------------------------------------------------
# EXAMPLE 1. Iterates through PART of a sequence, BACKWARDS.
# ------------------------------------------------------------------
total = 0
for k in range(n, m - 1, -1):
s = sequence[k]
total = total + len(s)
return total
# Here is an alternative (there are other alternatives as well):
total = 0
for k in range(m, n + 1):
total = total + len(sequence[m + n - k])
return total
def sum_even_integers(sequence):
"""
What comes in:
-- A sequence
What goes out:
Returns the sum of the items in the sequence that:
-- are integers AND
-- are even.
Side effects: None.
Examples:
sum_even_integers([3, 10, 6, 5, 5, 10])
returns 10 + 6 + 10, which is 26
sum_even_integers([3, 9, 10, 99, 101, 5, 6, 5, 5, 10])
still returns 10 + 6 + 10, which is 26
sum_even_integers(['hello', 3, 10, 6, 'bye', 5, 7.33, 5, 10])
still returns 10 + 6 + 10, which is 26
Type hints:
:type sequence: list or tuple
"""
# ------------------------------------------------------------------
# EXAMPLE 2. Iterates through a sequence,
# identifying and summing the items that are EVEN INTEGERS.
#
# Note how:
# -- The TYPE function returns the TYPE of its argument.
# -- An integer X is EVEN if the remainder is 0
# when you divide X by 2 and take the remainder.
# ------------------------------------------------------------------
total = 0
for k in range(len(sequence)):
item = sequence[k]
if type(item) is int:
if item % 2 == 0:
total = total + item
return total
# Here is an alternative (there are other alternatives as well):
total = 0
for k in range(len(sequence)):
if (type(sequence[k]) is int) and (sequence[k] % 2 == 0):
total = total + sequence[k]
return total
def sum_items_at_even_indices(sequence):
"""
What comes in:
-- A sequence of numbers.
What goes out:
Returns the sum of the numbers in the list that:
-- are at EVEN INDICES.
Side effects: None.
Examples:
sum_items_at_even_indices([3, 10, 6, 5, 5, 10])
returns 3 + 6 + 5, which is 14
sum_items_at_even_indices([5.5, 10, 3, 2, 10, 0, 1])
returns 5.5 + 3 + 10 + 1, which is 19.5
Type hints:
:type sequence: list or tuple (of numbers)
"""
# ------------------------------------------------------------------
# EXAMPLE 3. Iterates through and sums the items in a list
# of numbers that are at even INDICES.
#
# Constrast this example with the previous example.
# ------------------------------------------------------------------
total = 0
for k in range(0, len(sequence), 2):
total = total + sequence[k]
return total
# Here is a ** BAD alternative ** that computes the right result
# but takes twice as long to do so as needed.
total = 0
for k in range(len(sequence)): # This is a BAD solution
if k % 2 == 0:
total = total + sequence[k]
return total
# ----------------------------------------------------------------------
# Just TEST functions below here.
# ----------------------------------------------------------------------
def run_test_sum_string_lengths():
""" Tests the sum_string_lengths function. """
print()
print('--------------------------------------------------')
print('Testing the sum_string_lengths function:')
print('--------------------------------------------------')
seq = ['five', 'OK', 'songs', 'roxanne', 'the police', '', 'three']
total1 = sum_string_lengths(seq, 1, 3)
total2 = sum_string_lengths(seq, 2, 6)
print('Returned, expected:', total1, 14)
print('Returned, expected:', total2, 27)
def run_test_sum_even_integers():
""" Tests the sum_even_integers function. """
print()
print('--------------------------------------------------')
print('Testing the sum_even_integers function:')
print('--------------------------------------------------')
total1 = sum_even_integers([3, 10, 6, 5, 5, 10])
total2 = sum_even_integers(['hello', 3, 10, 6, 'bye', 5, 7.33, 5, 10])
total3 = sum_even_integers([3, 9, 10, 99, 101, 5, 6, 5, 5, 10])
print('Returned, expected:', total1, 26)
print('Returned, expected:', total2, 26)
print('Returned, expected:', total3, 26)
def run_test_sum_items_at_even_indices():
""" Tests the sum_items_at_even_indices function. """
print()
print('--------------------------------------------------')
print('Testing the sum_items_at_even_indices function:')
print('--------------------------------------------------')
total1 = sum_items_at_even_indices([3, 10, 6, 5, 5, 10])
total2 = sum_items_at_even_indices([5.5, 10, 3, 2, 10, 0, 1])
print('Returned, expected:', total1, 14)
print('Returned, expected:', total2, 19.5)
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
#!/bin/env python
"""
Parses sesame analysis
"""
import logging
import pandas as pd
import re
from functools import reduce
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from os import path, listdir
from srl_nlp.framenet.description import EXample
from srl_nlp.framenet.parse_xml import NetXMLParser
logger = logging.getLogger(__name__)
# text = """Sent#1 :
# tokens and depparse:
# your contribution to goodwill will mean more than you may know .
# gold:
# frame:GOAL
# Trajector your contribution
# Landmark goodwill
#
# prediction:Trajector your contribution
# Landmark goodwill
#
# 2.0 / 2.0 / 2.0
# gold:
# frame:GIVING
# Recipient to goodwill
# Donor your
#
# prediction:Recipient to goodwill
# Donor your
#
# 2.0 / 2.0 / 2.0
# gold:
# frame:AWARENESS
# Cognizer you
#
# prediction:Cognizer more than you
#
# 0.0 / 1.0 / 1.0
# gold:
# frame:PURPOSE
# Means your contribution to goodwill
# Value more than you may know
#
# prediction:Agent your contribution to goodwill
# Goal more than you may know
#
# 0.0 / 2.0 / 2.0
# gold:
# frame:INCREMENT
# Class than you may know
#
# prediction:Class than you
#
# 0.0 / 1.0 / 1.0
# gold:
# frame:LIKELIHOOD
# Hypothetical_event you
# Hypothetical_event know
#
# prediction:Hypothetical_event more than you
# Hypothetical_event know
#
# 0.0 / 1.0 / 1.0
# Total: 4.0 / 9.0 / 9.0
# """
sent_pattern = re.compile(r"""Sent#(\d+) :
tokens and depparse:
.*
(gold:
frame:(?:.*)
[\s\S]*?
prediction:[\s\S]*?
(?:\d*\.?\d+) / (?:\d*\.?\d+) / (?:\d*\.?\d))+
\t+Total: (?:\d*\.?\d+) / (?:\d*\.?\d+) / (?:\d*\.?\d)""")
f_pattern = re.compile(r"""gold:
frame:(\w*)
([\s\S]*?)
prediction:([\s\S]*?)
(\d*\.?\d+) / (\d*\.?\d+) / (\d*\.?\d)""")
def parse_analysis(text, suffix=''):
# type: (str, str) -> pd.DataFrame
sentences = sent_pattern.findall(text)
sids, frames, tps, fns, fps = [], [], [], [], []
for sentence in sentences:
sid = sentence[0]
scores = f_pattern.findall(sentence[1])
for score in scores:
frame, tp, fn, fp = map(lambda x: score[x], [0, 3, 4, 5])
logger.debug(",".join([sid, frame, tp, fn, fp]))
sids.append(int(sid))
frames.append(frame)
tps.append(float(tp))
fns.append(float(fn))
fps.append(float(fp))
df = pd.DataFrame({
'sid': sids,
'frame': frames,
'tp{}'.format(suffix): tps,
'fn{}'.format(suffix): fns,
'fp{}'.format(suffix): fps
})
return df
def frame_example_count(fn, foo=lambda x: x):
def count_ex(frame):
count = sum([len(fe.definition.get_elements(EXample)) for fe in frame.coreFEs + frame.peripheralFEs])
# count = count + len(frame.get_elements(EXample))
return count
return {foo(frame.name): count_ex(frame) for frame in fn}
def plot_frame_example_hist(df, title="Histogram of examples per frame", bins=30, save_fig=None, show_plot=True):
frame_count = df[["frame", "frame_example_count"]] # type: pd.DataFrame
frame_count.drop_duplicates(inplace=True)
axis = frame_count.iloc[:, 1].hist(grid=False, bins=bins) # type: Axes
axis.set_title(title)
axis.set_ylabel("Frequency")
axis.set_xlabel("Examples per Frame")
fig = axis.figure
if save_fig is not None:
fig.savefig(save_fig)
if show_plot:
plt.show()
plt.close(fig)
def plot_fn_frame_example_hist(fn_dict, title="Histogram of examples per frame", bins=30, save_fig=None,
show_plot=True):
frame_count = list(fn_dict.values())
fig, axis = plt.subplots()
axis.hist(frame_count, bins=bins)
axis.set_title(title)
axis.set_ylabel("Frequency")
axis.set_xlabel("Examples per Frame")
if save_fig is not None:
fig.savefig(save_fig)
if show_plot:
plt.show()
plt.close(fig)
if __name__ == '__main__':
import argparse
from sys import argv
from srl_nlp.logger_config import add_logger_args, config_logger
def parse_args():
parser = argparse.ArgumentParser(description='Processes sesame output')
parser.add_argument('root_path', help='path where to write the reports')
parser.add_argument('--framenet_path', default='srl_nlp/framenet/fndata-1.5', help='Path to FrameNet Folder')
parser.add_argument('--out_df', default=None, help='Path to store the generated df')
add_logger_args(parser)
args = parser.parse_args(argv[1:])
return args
def main():
args = parse_args()
config_logger(args)
exp_path = args.root_path
dfs = []
logger.info("Reading from '{}'".format(exp_path))
for d_id, au_name in enumerate(sorted(listdir(exp_path))):
au_path = path.join(exp_path, au_name)
if path.isdir(au_path) and au_name != 'test':
full_path = path.join(au_path, 'methods', 'sesame', 'logs', 'argid', 'argid-prediction-analysis.log')
if path.isfile(full_path):
with open(full_path, 'r') as f:
text = f.read()
df = parse_analysis(text, '_{}'.format(au_name))
df = df.groupby(by=['frame', 'sid'], as_index=False) \
.sum() \
.sort_values(['sid', 'frame'])
dfs.append(df)
df = reduce(lambda x, y: pd.merge(x, y, on=['frame', 'sid']), dfs) # type: pd.DataFrame
logger.info("Parsing framenet from '{}'".format(args.framenet_path))
parser = NetXMLParser()
fn = parser.parse(args.framenet_path)
frame_ex_count = frame_example_count(fn, str.upper)
df['frame_example_count'] = df['frame'].map(frame_ex_count)
if args.out_df is not None:
logger.info("Storing df as '{}'".format(args.out_df))
df.to_pickle(args.out_df)
try:
main()
except KeyboardInterrupt:
logger.error('Halted by the user')
exit(1)
except OSError as e:
logger.error('Problem reading/writing files')
logger.error(e)
raise e
|
"""
Classes for computing nucleosome occupancy
@author: Alicia Schep, Greenleaf Lab, Stanford University
"""
from scipy import signal, optimize, stats
import numpy as np
import matplotlib.pyplot as plt
from pyatac.fragmentsizes import FragmentSizes
from pyatac.tracks import Track, CoverageTrack
from pyatac.chunk import Chunk
from pyatac.utils import smooth, call_peaks, read_chrom_sizes_from_fasta
from pyatac.chunkmat2d import FragmentMat2D, BiasMat2D
from pyatac.bias import InsertionBiasTrack, PWM
from scipy.special import gamma
class FragmentMixDistribution:
"""Class for modelling insert size distribution"""
def __init__(self, lower = 0, upper =2000):
self.lower = lower
self.upper = upper
def getFragmentSizes(self, bamfile, chunklist = None):
self.fragmentsizes = FragmentSizes(self.lower, self.upper)
self.fragmentsizes.calculateSizes(bamfile, chunks = chunklist)
def modelNFR(self, boundaries = (35,115)):
"""Model NFR distribution with gamma distribution"""
b = np.where(self.fragmentsizes.get(self.lower,boundaries[1]) == max(self.fragmentsizes.get(self.lower,boundaries[1])))[0][0] + self.lower
boundaries = (min(boundaries[0],b), boundaries[1])
x = np.arange(boundaries[0],boundaries[1])
y = self.fragmentsizes.get(boundaries[0],boundaries[1])
def gamma_fit(X,o,p):
k = p[0]
theta = p[1]
a = p[2]
x_mod = X-o
res = np.zeros(len(x_mod))
if k>=1:
nz = x_mod >= 0
else:
nz = x_mod > 0
res[nz] = a * x_mod[nz]**(k-1) * np.exp(-x_mod[nz]/theta) / (theta **k * gamma(k))
return res
res_score = np.ones(boundaries[0]+1)*np.float('inf')
res_param = [0 for i in range(boundaries[0]+1)]
pranges = ((0.01,10),(0.01,150),(0.01,1))
for i in range(15,boundaries[0]+1):
f = lambda p: np.sum((gamma_fit(x,i,p) - y)**2)
tmpres = optimize.brute(f, pranges, full_output=True,
finish=optimize.fmin)
res_score[i] = tmpres[1]
res_param[i] = tmpres[0]
whichres = np.argmin(res_score)
res = res_param[whichres]
self.nfr_fit0 = FragmentSizes(self.lower,self.upper, vals = gamma_fit(np.arange(self.lower,self.upper),whichres,res_param[whichres]))
nfr = np.concatenate((self.fragmentsizes.get(self.lower,boundaries[1]), self.nfr_fit0.get(boundaries[1],self.upper)))
nfr[nfr==0] = min(nfr[nfr!=0])*0.01
self.nfr_fit = FragmentSizes(self.lower,self.upper, vals = nfr)
nuc = np.concatenate((np.zeros(boundaries[1]-self.lower),
self.fragmentsizes.get(boundaries[1],self.upper) -
self.nfr_fit.get(boundaries[1],self.upper)))
nuc[nuc<=0]=min(min(nfr)*0.1,min(nuc[nuc>0])*0.001)
self.nuc_fit = FragmentSizes(self.lower, self.upper, vals = nuc)
def plotFits(self,filename=None):
"""plot the Fits"""
fig = plt.figure()
plt.plot(list(range(self.lower,self.upper)),self.fragmentsizes.get(),
label = "Observed")
plt.plot(list(range(self.lower,self.upper)),self.nfr_fit0.get(), label = "NFR Fit")
plt.plot(list(range(self.lower,self.upper)),self.nuc_fit.get(), label = "Nucleosome Model")
plt.plot(list(range(self.lower,self.upper)),self.nfr_fit.get(), label = "NFR Model")
plt.legend()
plt.xlabel("Fragment size")
plt.ylabel("Relative Frequency")
if filename:
fig.savefig(filename)
plt.close(fig)
#Also save text output!
filename2 = ".".join(filename.split(".")[:-1]+['txt'])
out = np.vstack((self.fragmentsizes.get(), #self.smoothed.get(),
self.nuc_fit.get(), self.nfr_fit.get()))
np.savetxt(filename2,out,delimiter="\t")
else:
fig.show()
class OccupancyCalcParams:
"""Class with parameters for occupancy determination"""
def __init__(self, lower, upper , insert_dist, ci = 0.9):
self.lower = lower
self.upper = upper
#self.smooth_mat = np.tile(signal.gaussian(151,25),(upper-lower,1))
nuc_probs = insert_dist.nuc_fit.get(lower,upper)
self.nuc_probs = nuc_probs /np.sum(nuc_probs)
nfr_probs = insert_dist.nfr_fit.get(lower,upper)
self.nfr_probs = nfr_probs /np.sum(nfr_probs)
self.alphas = np.linspace(0, 1, 101)
#self.x = map(lambda alpha: np.log(alpha * self.nuc_probs + (1 - alpha) * self.nfr_probs), self.alphas)
self.l = len(self.alphas)
self.cutoff = stats.chi2.ppf(ci,1)
def calculateOccupancy(inserts, bias, params):
"""function to calculate occupancy based on insert distribution
also takes OccupancyCalcParams as input
"""
nuc_probs = params.nuc_probs * bias
nuc_probs = nuc_probs / np.sum(nuc_probs)
nfr_probs = params.nfr_probs * bias
nfr_probs = nfr_probs / np.sum(nfr_probs)
x = [np.log(alpha * nuc_probs + (1 - alpha) * nfr_probs) for alpha in params.alphas]
logliks = np.array([np.sum(x[j]*inserts) for j in range(params.l)])
logliks[np.isnan(logliks)] = -float('inf')
occ = params.alphas[np.argmax(logliks)]
#Compute upper and lower bounds for 95% confidence interval
ratios = 2*(max(logliks)-logliks)
lower = params.alphas[min(np.where(ratios < params.cutoff)[0])]
upper = params.alphas[max(np.where(ratios < params.cutoff)[0])]
return occ, lower, upper
class OccupancyTrack(Track):
"""Class for computing nucleosome occupancy"""
def __init__(self, chrom, start, end):
Track.__init__(self, chrom, start, end, "occupancy")
def calculateOccupancyMLE(self, mat, bias_mat, params):
"""Calculate Occupancy track"""
offset=self.start - mat.start
if offset<params.flank:
raise Exception("For calculateOccupancyMLE, mat does not have sufficient flanking regions")(offset)
self.vals=np.ones(self.end - self.start)*float('nan')
self.lower_bound = np.ones(self.end - self.start)*float('nan')
self.upper_bound =np.ones(self.end - self.start)*float('nan')
for i in range(params.halfstep,len(self.vals),params.step):
new_inserts = np.sum(mat.get(lower = 0, upper = params.upper,
start = self.start+i-params.flank, end = self.start+i+params.flank+1),
axis = 1)
new_bias = np.sum(bias_mat.get(lower = 0, upper = params.upper,
start = self.start+i-params.flank, end = self.start+i+params.flank+1),
axis = 1)
if sum(new_inserts)>0:
left = i - params.halfstep
right = min(i + params.halfstep + 1, len(self.vals))
self.vals[left:right],self.lower_bound[left:right],self.upper_bound[left:right] = calculateOccupancy(new_inserts, new_bias, params.occ_calc_params)
def makeSmoothed(self, window_len = 121, sd = 20):
self.smoothed_vals = smooth(self.vals, window_len, window = "gaussian", sd = sd,
mode = "same", norm = True)
self.smoothed_lower = smooth(self.lower_bound, window_len, window = "gaussian", sd = sd,
mode = "same", norm = True)
self.smoothed_upper = smooth(self.upper_bound, window_len, window = "gaussian", sd = sd,
mode = "same", norm = True)
class OccPeak(Chunk):
def __init__(self, pos, chunk):
"""Class for storing occupancy peaks"""
self.chrom = chunk.chrom
self.start = pos
self.end = pos + 1
self.strand = "*"
self.occ = chunk.occ.smoothed_vals[pos - chunk.occ.start]
self.occ_lower = chunk.occ.smoothed_lower[pos - chunk.occ.start]
self.occ_upper = chunk.occ.smoothed_upper[pos - chunk.occ.start]
self.reads = chunk.cov.get(pos = pos)
def asBed(self):
out = "\t".join(map(str,[self.chrom,self.start,self.end,self.occ,self.occ_lower,self.occ_upper,self.reads]))
return out
def write(self, handle):
"""write bed line for peak"""
handle.write(self.asBed() + "\n")
class OccupancyParameters:
"""Class for storing parmeers related to Occupancy determination"""
def __init__(self, insert_dist, upper, fasta, pwm, sep = 120, min_occ = 0.1, flank = 60,
out = None, bam = None, ci = 0.9, step = 5):
self.sep = sep
self.chrs = read_chrom_sizes_from_fasta(fasta)
self.fasta = fasta
if fasta is not None:
self.pwm = PWM.open(pwm)
self.window = flank * 2 + 1
self.min_occ = min_occ
self.flank = flank
self.bam = bam
self.upper = upper
self.occ_calc_params = OccupancyCalcParams(0, upper, insert_dist, ci = ci)
if step%2 == 0:
step = step - 1
self.step = step
self.halfstep = (self.step-1) // 2
class OccChunk(Chunk):
"""Class for calculating occupancy and occupancy peaks
"""
def __init__(self, chunk):
self.start = chunk.start
self.end = chunk.end
self.chrom = chunk.chrom
self.peaks = {}
self.nfrs = []
def getFragmentMat(self):
self.mat = FragmentMat2D(self.chrom, self.start - self.params.flank,
self.end + self.params.flank, 0, self.params.upper)
self.mat.makeFragmentMat(self.params.bam)
def makeBiasMat(self):
self.bias_mat = BiasMat2D(self.chrom, self.start - self.params.flank,
self.end + self.params.flank, 0, self.params.upper)
if self.params.fasta is not None:
bias_track = InsertionBiasTrack(self.chrom, self.start - self.params.window - self.params.upper//2,
self.end + self.params.window + self.params.upper//2 + 1, log = True)
bias_track.computeBias(self.params.fasta, self.params.chrs, self.params.pwm)
self.bias_mat.makeBiasMat(bias_track)
def calculateOcc(self):
"""calculate occupancy for chunk"""
self.occ = OccupancyTrack(self.chrom,self.start,self.end)
self.occ.calculateOccupancyMLE(self.mat, self.bias_mat, self.params)
self.occ.makeSmoothed(window_len = self.params.window, sd = self.params.flank/3.0)
def getCov(self):
"""Get read coverage for regions"""
self.cov = CoverageTrack(self.chrom, self.start, self.end)
self.cov.calculateCoverage(self.mat, 0, self.params.upper, self.params.window)
def callPeaks(self):
"""Call peaks of occupancy profile"""
peaks = call_peaks(self.occ.smoothed_vals, sep = self.params.sep, min_signal = self.params.min_occ)
for peak in peaks:
tmp = OccPeak(peak + self.start, self)
if tmp.occ_lower > self.params.min_occ and tmp.reads > 0:
self.peaks[peak] = tmp
def getNucDist(self):
"""Get nucleosomal insert distribution"""
nuc_dist = np.zeros(self.params.upper)
for peak in list(self.peaks.keys()):
sub = self.mat.get(start = self.peaks[peak].start-self.params.flank, end = self.peaks[peak].start+1+self.params.flank)
sub_sum = np.sum(sub,axis=1)
sub_sum = sub_sum / float(sum(sub_sum))
nuc_dist += sub_sum
return(nuc_dist)
def process(self, params):
"""proces chunk -- calculat occupancy, get coverage, call peaks"""
self.params = params
self.getFragmentMat()
self.makeBiasMat()
self.calculateOcc()
self.getCov()
self.callPeaks()
def removeData(self):
"""remove data from chunk-- deletes all attributes"""
names = list(self.__dict__.keys())
for name in names:
delattr(self, name)
|
messageTemplate = r'''
public class ITest__Min##__Invoke : IInterfacedMessage, IAsyncInvokable
{
public int a;
public int b;
public Type GetInterfaceType() { return typeof(ITest); }
public async Task<IValueGetable> Invoke(object target)
{
var __v = await ((ITest)target).Min##(a, b);
return (IValueGetable)(new Temp__Result { v = __v });
}
}'''
for i in range(1, 21):
print messageTemplate.replace('##', '%02d' % i)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 1 22:34:19 2018
@author: chenxiaoxu
"""
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
import sklearn.metrics as metrics
from datetime import time
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import glob, os
DirPath = '~/code/preprocessing/result/raw_feature_CV/'
forecastList = [6, 12, 18, 24, 30]
#forecastList = [6, 12, 18, 24, 30]
forecastList = [30]
saveDirPath = '~/code/preprocessing/result/processed_feature_CV/'
if not os.path.isdir(saveDirPath):
os.makedirs(saveDirPath)
os.chmod(saveDirPath, 0o755)
for forecast in forecastList:
inputFileList = sorted(glob.glob(DirPath + 'foward' + str(forecast) + '/' + '*csv'))
counter = 0
for inputFile in inputFileList:
print(inputFile)
OriginData = pd.read_csv(inputFile, header=0)
Data = np.array(OriginData)
DataXSingle = Data[:,1:-1]
DataYSingle = Data[:,-1]
if (counter == 0):
counter = 1
DataX = DataXSingle
DataY = DataYSingle
continue
DataX = np.concatenate((DataX, DataXSingle), axis = 0)
DataY = np.concatenate((DataY, DataYSingle), axis = 0)
saveFileNameX = saveDirPath + 'DataX_forward' + str(forecast) + '.npy'
saveFileNameY = saveDirPath + 'DataY_forward' + str(forecast) + '.npy'
np.save(saveFileNameX, DataX)
np.save(saveFileNameY, DataY)
|
from base64 import b64decode
import pytest
from tests.unit.conftest import CONFIG_PATH, FIXTURE_PATH, config_path, fixture_path
from whispers import core
from whispers.cli import parse_args
from whispers.secrets import WhisperSecrets
@pytest.mark.parametrize("configfile", ["exclude_keys.yml", "exclude_values.yml"])
@pytest.mark.parametrize("src", ["excluded.yml", "excluded.json", "excluded.xml"])
def test_exclude_by_keys_and_values(configfile, src):
args = parse_args([fixture_path(src)])
args.config = core.load_config(config_path(configfile), FIXTURE_PATH)
secrets = core.run(args)
assert next(secrets).key == "hardcoded_password"
with pytest.raises(StopIteration):
next(secrets)
@pytest.mark.parametrize(
("src", "expected"),
[
("privatekeys.yml", ["access", "key", "rsa", "dsa", "ec", "openssh"]),
("privatekeys.json", ["access", "key", "rsa", "dsa", "ec", "openssh"]),
("privatekeys.xml", ["access", "key", "rsa", "dsa", "ec", "openssh"]),
("aws.yml", ["aws_id", "aws_key", "aws_token"]),
("aws.json", ["aws_id", "aws_key", "aws_token"]),
("aws.xml", ["aws_id", "aws_key", "aws_token"]),
("jenkins.xml", ["noncompliantApiToken", "noncompliantPasswordHash"]),
("cloudformation.yml", ["NoncompliantDBPassword"]),
("cloudformation.json", ["NoncompliantDBPassword"]),
],
)
def test_detection_by_key(src, expected):
args = parse_args([fixture_path(src)])
secrets = core.run(args)
result = list(map(lambda x: x.key, secrets))
assert set(result) == set(expected)
@pytest.mark.parametrize(
("src", "count"),
[
("custom.yml", 0),
("custom.json", 0),
("custom.xml", 0),
("hardcoded.yml", 5),
("hardcoded.json", 5),
("hardcoded.xml", 5),
("passwords.yml", 4),
("passwords.json", 4),
("passwords.xml", 4),
("placeholders.yml", 0),
("placeholders.json", 0),
("placeholders.xml", 0),
("apikeys.yml", 10),
("apikeys.json", 10),
("apikeys.xml", 10),
(".npmrc", 3),
(".pypirc", 1),
("pip.conf", 2),
("integration.conf", 5),
("integration.yml", 5),
("integration.json", 5),
("integration.xml", 5),
("settings.conf", 1),
("settings.cfg", 1),
("settings.ini", 1),
("settings.env", 1),
("Dockerfile", 3),
(".dockercfg", 1),
("empty.dockercfg", 0),
("beans.xml", 3),
("beans.xml.dist", 3),
("beans.xml.template", 3),
("jdbc.xml", 3),
(".htpasswd", 2),
(".aws/credentials", 3),
("falsepositive.yml", 4),
("language.sh", 14),
("language.py", 11),
("language.py2", 0),
("language.js", 4),
("language.java", 3),
("language.go", 9),
("language.php", 4),
("language.html", 3),
("plaintext.txt", 2),
("uri.yml", 2),
("java.properties", 3),
("webhooks.yml", 3),
("creditcards.yml", 3),
("gitkeys.yml", 5),
],
)
def test_detection_by_value(src, count):
args = parse_args([fixture_path(src)])
args.config = core.load_config(CONFIG_PATH.joinpath("detection_by_value.yml"))
secrets = core.run(args)
result = list(map(lambda x: x.value, secrets))
for value in result:
if value.isnumeric():
continue
assert "hardcoded" in value.lower() or b"hardcoded" in b64decode(value)
def test_detection_by_filename():
expected = map(
fixture_path,
[
".aws/credentials",
".htpasswd",
".npmrc",
".pypirc",
"connection.config",
"integration.conf",
"pip.conf",
"settings.cfg",
"settings.conf",
"settings.env",
"settings.ini",
],
)
args = parse_args([fixture_path()])
args.config = core.load_config(CONFIG_PATH.joinpath("detection_by_filename.yml"))
secrets = core.run(args)
result = [secret.value for secret in secrets]
for exp in expected:
assert exp in result
@pytest.mark.parametrize(
("src", "count", "rule_id"),
[
("language.html", 3, "comments"),
],
)
def test_detection_by_rule(src, count, rule_id):
args = parse_args(["-r", rule_id, fixture_path(src)])
args.config = core.load_config(CONFIG_PATH.joinpath("detection_by_value.yml"))
secrets = core.run(args)
result = list(map(lambda x: x.value.lower(), secrets))
for value in result:
if value.isnumeric():
continue
assert "hardcoded" in value
@pytest.mark.parametrize(
("key", "value", "expectation"),
[
(None, None, False),
("", "", False),
("", "$value", False),
("", "{{value}}", False),
("", "{value}", False),
("", "{whispers~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~}", False),
("", "{d2hpc3BlcnN+fn5+fn5+fn5+fn5+fn5+fn5+fn5+fn5+fn5+fn5+fn5+}", True),
("", "${value$}", False),
("", "<value>", False),
("", "{value}", False),
("", "null", False),
("", "!Ref Value", False),
("", "{value}", False),
("", "/path/value", False),
("whispers", "WHISPERS", False),
("label", "WhispersLabel", False),
("SECRET_VALUE_KEY", "whispers", False),
("whispers", "SECRET_VALUE_PLACEHOLDER", False),
("secret", "whispers", True),
],
)
def test_is_static(key, value, expectation):
args = parse_args([fixture_path()])
args.config = core.load_config(CONFIG_PATH.joinpath("example.yml"))
secrets = WhisperSecrets(args)
assert secrets.is_static(key, value) == expectation
|
# -*- coding: utf-8 -*-
# Copyright 2020- Datastax, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import uuid
import datetime
import traceback
import medusa.config
import medusa.utils
from medusa.orchestration import Orchestration
from medusa.monitoring import Monitoring
from medusa.cassandra_utils import CqlSessionProvider, Cassandra
from medusa.storage import Storage
from medusa.network.hostname_resolver import HostnameResolver
def orchestrate(config, backup_name_arg, seed_target, stagger, enable_md5_checks, mode, temp_dir,
parallel_snapshots, parallel_uploads):
backup = None
backup_name = backup_name_arg or datetime.datetime.now().strftime('%Y%m%d%H%M')
monitoring = Monitoring(config=config.monitoring)
try:
backup_start_time = datetime.datetime.now()
if not config.storage.fqdn:
err_msg = "The fqdn was not provided nor calculated properly."
logging.error(err_msg)
raise Exception(err_msg)
if not temp_dir.is_dir():
err_msg = '{} is not a directory'.format(temp_dir)
logging.error(err_msg)
raise Exception(err_msg)
try:
# Try to get a backup with backup_name. If it exists then we cannot take another backup with that name
storage = Storage(config=config.storage)
cluster_backup = storage.get_cluster_backup(backup_name)
if cluster_backup:
err_msg = 'Backup named {} already exists.'.format(backup_name)
logging.error(err_msg)
raise Exception(err_msg)
except KeyError:
info_msg = 'Starting backup {}'.format(backup_name)
logging.info(info_msg)
backup = BackupJob(config, backup_name, seed_target, stagger, enable_md5_checks, mode, temp_dir,
parallel_snapshots, parallel_uploads)
backup.execute()
backup_end_time = datetime.datetime.now()
backup_duration = backup_end_time - backup_start_time
logging.debug('Emitting metrics')
logging.info('Backup duration: {}'.format(backup_duration.seconds))
tags = ['medusa-cluster-backup', 'cluster-backup-duration', backup_name]
monitoring.send(tags, backup_duration.seconds)
tags = ['medusa-cluster-backup', 'cluster-backup-error', backup_name]
monitoring.send(tags, 0)
logging.debug('Done emitting metrics.')
logging.info('Backup of the cluster done.')
except Exception as e:
tags = ['medusa-cluster-backup', 'cluster-backup-error', backup_name]
monitoring.send(tags, 1)
logging.error('This error happened during the cluster backup: {}'.format(str(e)))
traceback.print_exc()
if backup is not None:
err_msg = 'Something went wrong! Attempting to clean snapshots and exit.'
logging.error(err_msg)
delete_snapshot_command = ' '.join(backup.cassandra.delete_snapshot_command(backup.snapshot_tag))
pssh_run_success_cleanup = backup.orchestration_uploads\
.pssh_run(backup.hosts,
delete_snapshot_command,
hosts_variables={})
if pssh_run_success_cleanup:
info_msg = 'All nodes successfully cleared their snapshot.'
logging.info(info_msg)
else:
err_msg_cleanup = 'Some nodes failed to clear the snapshot. Cleaning snapshots manually is recommended'
logging.error(err_msg_cleanup)
sys.exit(1)
class BackupJob(object):
def __init__(self, config, backup_name, seed_target, stagger, enable_md5_checks, mode, temp_dir,
parallel_snapshots, parallel_uploads):
self.id = uuid.uuid4()
# TODO expose the argument below (Note that min(1000, <number_of_hosts>) will be used)
self.orchestration_snapshots = Orchestration(config, parallel_snapshots)
self.orchestration_uploads = Orchestration(config, parallel_uploads)
self.config = config
self.backup_name = backup_name
self.stagger = stagger
self.seed_target = seed_target
self.enable_md5_checks = enable_md5_checks
self.mode = mode
self.temp_dir = temp_dir
self.work_dir = self.temp_dir / 'medusa-job-{id}'.format(id=self.id)
self.hosts = {}
self.cassandra = Cassandra(config)
self.snapshot_tag = '{}{}'.format(self.cassandra.SNAPSHOT_PREFIX, self.backup_name)
fqdn_resolver = medusa.config.evaluate_boolean(self.config.cassandra.resolve_ip_addresses)
self.fqdn_resolver = HostnameResolver(fqdn_resolver)
def execute(self):
# Two step: Take snapshot everywhere, then upload the backups to the external storage
# Getting the list of Cassandra nodes.
seed_target = self.seed_target if self.seed_target is not None else self.config.storage.fqdn
session_provider = CqlSessionProvider([seed_target],
self.config.cassandra)
with session_provider.new_session() as session:
tokenmap = session.tokenmap()
self.hosts = [host for host in tokenmap.keys()]
# First let's take a snapshot on all nodes at once
# Here we will use parallelism of min(number of nodes, parallel_snapshots)
logging.info('Creating snapshots on all nodes')
self._create_snapshots()
# Second
logging.info('Uploading snapshots from nodes to external storage')
self._upload_backup()
def _create_snapshots(self):
# Run snapshot in parallel on all nodes,
create_snapshot_command = ' '.join(self.cassandra.create_snapshot_command(self.backup_name))
pssh_run_success = self.orchestration_snapshots.\
pssh_run(self.hosts,
create_snapshot_command,
hosts_variables={})
if not pssh_run_success:
# we could implement a retry.
err_msg = 'Some nodes failed to create the snapshot.'
logging.error(err_msg)
raise Exception(err_msg)
logging.info('A snapshot {} was created on all nodes.'.format(self.snapshot_tag))
def _upload_backup(self):
backup_command = self._build_backup_cmd()
# Run upload in parallel or sequentially according to parallel_uploads defined by the user
pssh_run_success = self.orchestration_uploads.pssh_run(self.hosts,
backup_command,
hosts_variables={})
if not pssh_run_success:
# we could implement a retry.
err_msg = 'Some nodes failed to upload the backup.'
logging.error(err_msg)
raise Exception(err_msg)
logging.info('A new backup {} was created on all nodes.'.format(self.backup_name))
def _build_backup_cmd(self):
stagger_option = '--in-stagger {}'.format(self.stagger) if self.stagger else ''
enable_md5_checks_option = '--enable-md5-checks' if self.enable_md5_checks else ''
# Use %s placeholders in the below command to have them replaced by pssh using per host command substitution
command = 'mkdir -p {work}; cd {work} && medusa-wrapper sudo medusa {config} -vvv backup-node ' \
'--backup-name {backup_name} {stagger} {enable_md5_checks} --mode {mode}' \
.format(work=self.work_dir,
config=f'--config-file {self.config.file_path}' if self.config.file_path else '',
backup_name=self.backup_name,
stagger=stagger_option,
enable_md5_checks=enable_md5_checks_option,
mode=self.mode)
logging.debug('Running backup on all nodes with the following command {}'.format(command))
return command
|
from analysis import data_gen_all
import os
basepath = os.environ['BASEPATH']
path = os.path.join(basepath, 'results/CRF_log/type78/CRF_path')
pathL = os.path.join(basepath, 'results/CRF_log/type78/CRF+LDA_pathL')
data_gen_all(path, path_L, 'multi-col', './output') |
import theano.tensor as tt
class Activation:
"""
Defines a bunch of activations as callable classes.
Useful for printing and specifying activations as strings.
(via activation_by_name)
"""
def __init__(self, fn, name):
self.fn = fn
self.name = name
def __call__(self, *args):
return self.fn(*args)
def __str__(self):
return self.name
activation_list = [
tt.nnet.sigmoid,
tt.nnet.softplus,
tt.nnet.softmax,
Activation(lambda x: x, 'linear'),
Activation(lambda x: 1.7*tt.tanh(2 * x / 3), 'scaled_tanh'),
Activation(lambda x: tt.maximum(0, x), 'relu'),
Activation(lambda x: tt.tanh(x), 'tanh'),
] + [
Activation(lambda x, i=i: tt.maximum(0, x) + tt.minimum(0, x) * i/100,
'relu{:02d}'.format(i))
for i in range(100)
]
def activation_by_name(name):
"""
Get an activation function or callabe-class from its name.
Activation Names
sigmoid, softplus, softmax, linear, scaled_tanh, tanh,
relu, relu00, relu01, ..., relu99
:param string name:
:return: callable activation
"""
for act in activation_list:
if name == str(act):
return act
else:
raise NotImplementedError('Unknown Activation Specified: ' + name) |
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
import imutils
import time
import math
import numpy as np
"""
"""
def calculate_distance(x1, y1, x2, y2):
return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
def main():
image = cv.imread("G:\\Project\\opencv-ascs-resources\\meter_pointer_roi\\2020-03-05_22-18-30.jpeg")
# image = cv.imread("G:\\Project\\opencv-ascs-resources\\save\\box.bmp")
start = time.time()
image = imutils.resize(image, width=300)
# HSV 分割
hsv = cv.cvtColor(image.copy(), cv.COLOR_BGR2HSV)
hsv_min = (0, 0, 0)
hsv_max = (180, 255, 50)
mask = cv.inRange(hsv, hsv_min, hsv_max)
lines = cv.HoughLinesP(mask, 1, np.pi / 180, 10, None, 30, 10)
if None is lines:
print("未检测到直线")
return
line = lines[0][0] # 检测到的直线信息
# 获取指针位置
cv.line(image, (line[0], line[1]), (line[2], line[3]), (0, 255, 255), 1, cv.LINE_AA)
# kernel = cv.getStructuringElement(cv.MORPH_RECT, (15, 15))
# edged = cv.morphologyEx(mask, cv.MORPH_DILATE, kernel)
# contours, _ = cv.findContours(edged, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# contours = sorted(contours, key=cv.contourArea, reverse=True)
# # 获取指针位置
# x, y, w, h = cv.boundingRect(contours[0])
# cv.circle(image, (line[0], line[1]), int(calculate_distance(line[0], line[1], line[2], line[3])), (255, 0, 0), 2, cv.LINE_AA)
# cv.line(image.copy(), (x, y), (x + w, y + h), (255, 255, 0), 2, cv.LINE_AA)
hsv = cv.cvtColor(image.copy(), cv.COLOR_BGR2HSV)
hsv_min = (0, 0, 0)
hsv_max = (180, 255, 150)
mask = cv.inRange(hsv, hsv_min, hsv_max)
cv.imshow("mask", mask)
# cnts = cv.findContours(threshed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# cnts = imutils.grab_contours(cnts)
# dig_cnts = []
#
# for cnt in cnts:
# x, y, w, h = cv.boundingRect(cnt)
# # print(x, y, w, h)
# # print("---------------------------")
# if 3 < h < 20 and 1 < w < 75:
# rect = cv.minAreaRect(cnt)
# cx, cy = rect[0]
# dig_cnts.append((x, y, w, h, cx, cy)) # 提取数字
# for cnt in dig_cnts:
# x, y, w, h, cx, cy = cnt
# cv.rectangle(image, (x, y), (x + w, y + h), (255, 255, 0), 2, cv.LINE_8)
print("[INFO] tried_unknow detection took {:.6f} seconds".format(time.time() - start))
cv.imshow("image", image)
# cv.imshow("threshed", threshed)
# cv.imshow("edged", edged)
# cv.imshow("dst", dst)
cv.waitKey(0)
pass
if "__main__" == __name__:
main()
cv.destroyAllWindows()
|
import os
from flask import Flask, render_template, send_from_directory, request, flash
from flask_wtf import Form
from wtforms import TextField, BooleanField, TextAreaField, SubmitField, validators, ValidationError
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
app.secret_key = 'development identification key'
@app.route('/')
def index():
return render_template('home.html', title="MLH Fellow", url=os.getenv("URL"))
@app.route('/projects')
def projects():
return render_template('projects.html', title="Projects", url=os.getenv("URL"))
@app.route('/about')
def about():
return render_template('about.html', title="About", url=os.getenv("URL"))
@app.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm()
if request.method == 'POST':
if form.validate() == False:
flash('All fields are required.')
return render_template('contact.html', form=form)
else:
return 'Form submitted.'
elif request.method == 'GET':
return render_template('contact.html', form=form)
class ContactForm(Form):
name = TextField("Name", [validators.Required("Please enter your name.")])
email = TextField("Email", [validators.Required("Please enter your email address."), validators.Email("Please enter a valid email address")])
subject = TextField("Subject", [validators.Required("Please enter a subject.")])
message = TextAreaField("Message", [validators.Required("Please enter a message.")])
submit = SubmitField("Send")
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
from __future__ import annotations
from aws_cdk import (
core,
aws_iam as iam
)
import typing
from aws_metrics.aws_metrics_stack import AWSMetricsStack
from aws_metrics.real_time_data_processing import RealTimeDataProcessing
from aws_metrics.data_ingestion import DataIngestion
from aws_metrics.dashboard import Dashboard
from aws_metrics.data_lake_integration import DataLakeIntegration
from aws_metrics.batch_processing import BatchProcessing
from aws_metrics.batch_analytics import BatchAnalytics
import aws_metrics.aws_metrics_constants as constants
from aws_metrics.policy_statements_builder.user_policy_statements_builder import UserPolicyStatementsBuilder
class AdminPolicyStatementsBuilder(UserPolicyStatementsBuilder):
"""
Build the admin user policy statement list for the AWSMetrics gem
"""
def __init__(self):
super().__init__()
def add_aws_metrics_stack_policy_statements(self, component: AWSMetricsStack) -> AdminPolicyStatementsBuilder:
"""
Add the additional policy statements to update the CloudFormation stack for admin.
:param component: CloudFormation stack created by the metrics gem.
:return: The policy statement builder itself.
"""
self._policy_statement_mapping['bootstrap'] = iam.PolicyStatement(
actions=[
's3:GetBucketLocation',
's3:GetObject',
's3:ListBucket',
's3:PutObject'
],
effect=iam.Effect.ALLOW,
resources=[
core.Fn.sub('arn:${AWS::Partition}:s3:::cdktoolkit-stagingbucket-*'),
core.Fn.sub('arn:${AWS::Partition}:s3:::cdktoolkit-stagingbucket-*/*'),
],
sid='UpdateBootstrapBucket'
)
self._policy_statement_mapping['cloudformation'] = iam.PolicyStatement(
actions=[
"cloudformation:DescribeStacks",
"cloudformation:GetTemplate",
'cloudformation:CreateChangeSet',
'cloudformation:DescribeChangeSet',
'cloudformation:ExecuteChangeSet',
'cloudformation:DeleteChangeSet',
'cloudformation:DescribeStackEvents'
],
effect=iam.Effect.ALLOW,
resources=[
core.Fn.sub(
body='arn:${AWS::Partition}:cloudformation:${AWS::Region}:${AWS::AccountId}:stack/${StackName}/*',
variables={
'StackName': component.stack_name
}
),
core.Fn.sub('arn:${AWS::Partition}:cloudformation:${AWS::Region}:${AWS::AccountId}:stack/CDKToolkit/*')
],
sid='UpdateResourcesStacks'
)
return self
def add_data_ingestion_policy_statements(self, component: DataIngestion) -> AdminPolicyStatementsBuilder:
"""
Add the additional policy statement to check service APIs log and the input data stream for admin.
:param component: Data ingestion component created by the metrics gem.
:return: The policy statement builder itself.
"""
super().add_data_ingestion_policy_statements(component)
self._policy_statement_mapping['kinesis_stream'] = iam.PolicyStatement(
actions=[
'kinesis:DescribeStreamSummary'
],
effect=iam.Effect.ALLOW,
resources=[component.input_stream_arn],
sid='DescribeKinesisStream'
)
self._add_to_logs_policy_statement(
[
core.Fn.sub(
body='arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:'
'API-Gateway-Execution-Logs_${RestApiId}/${Stage}:log-stream:*',
variables={
'RestApiId': component.rest_api_id,
'Stage': component.deployment_stage
}
)
]
)
return self
def add_real_time_data_processing_policy_statements(self, component: RealTimeDataProcessing) -> AdminPolicyStatementsBuilder:
"""
Add the additional policy statements to update the Kinesis Data Analytics application
and analytics processing Lambda for admin.
:param component: Real-time data processing component created by the metrics gem.
:return: The policy statement builder itself.
"""
self._policy_statement_mapping['kinesis_analytics'] = iam.PolicyStatement(
actions=[
'kinesisanalytics:AddApplicationOutput',
'kinesisanalytics:DeleteApplicationOutput',
'kinesisanalytics:DescribeApplication',
'kinesisanalytics:StartApplication',
'kinesisanalytics:StopApplication',
'kinesisanalytics:UpdateApplication'
],
effect=iam.Effect.ALLOW,
resources=[
core.Fn.sub(
body='arn:${AWS::Partition}:kinesisanalytics:${AWS::Region}:${AWS::AccountId}:application/'
'${AnalyticsApplicationName}',
variables={
'AnalyticsApplicationName': component.analytics_application_name
}
)
],
sid='UpdateAnalyticsApplication'
)
self._add_to_iam_policy_statement(
[
component.analytics_application_role_arn,
component.analytics_application_lambda_role_arn
]
)
self._add_to_lambda_policy_statement([component.analytics_processing_lambda_arn])
self._add_to_logs_policy_statement(
[
core.Fn.sub(
body='arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:'
'/aws/lambda/${AnalyticsProcessingLambdaName}:log-stream:*',
variables={
'AnalyticsProcessingLambdaName': component.analytics_processing_lambda_name
}
)
]
)
return self
def add_dashboard_policy_statements(self, component: Dashboard) -> AdminPolicyStatementsBuilder:
"""
Add the additional policy statements to update the CloudWatch dashboard for admin.
:param component: CloudWatch dashboard component created by the metrics gem.
:return: The policy statement builder itself.
"""
self._policy_statement_mapping['dashboard'] = iam.PolicyStatement(
actions=[
'cloudwatch:GetDashboard',
'cloudwatch:PutDashboard'
],
effect=iam.Effect.ALLOW,
resources=[
core.Fn.sub(
body='arn:${AWS::Partition}:cloudwatch::${AWS::AccountId}:dashboard/${DashboardName}',
variables={
'DashboardName': component.dashboard_name
}
)
],
sid='UpdateDashboard'
)
return self
def add_data_lake_integration_policy_statements(
self,
component: DataLakeIntegration) -> AdminPolicyStatementsBuilder:
"""
Add the policy statements to retrieve the analytics bucket content and
update Glue database, table and crawler for admin.
:param component: CloudWatch dashboard component created by the metrics gem.
:return: The policy statement builder itself.
"""
if not component:
return self
self._policy_statement_mapping['glue_database'] = iam.PolicyStatement(
actions=[
'glue:GetDatabase',
'glue:UpdateDatabase'
],
effect=iam.Effect.ALLOW,
resources=[
core.Fn.sub('arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog'),
core.Fn.sub(
body='arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:database/${EventsDatabaseName}',
variables={
'EventsDatabaseName': component.events_database_name
}
)
],
sid='UpdateEventsDatabase'
)
self._policy_statement_mapping['glue_table'] = iam.PolicyStatement(
actions=[
'glue:GetTable',
'glue:GetTableVersion',
'glue:GetTableVersions',
'glue:UpdateTable',
'glue:GetPartitions'
],
effect=iam.Effect.ALLOW,
resources=[
core.Fn.sub('arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog'),
core.Fn.sub(
body='arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:table/'
'${EventsDatabaseName}/${EventsTableName}',
variables={
'EventsDatabaseName': component.events_database_name,
'EventsTableName': component.events_table_name
}
),
core.Fn.sub(
body='arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:'
'database/${EventsDatabaseName}',
variables={
'EventsDatabaseName': component.events_database_name
}
)
],
sid='UpdateEventsTable'
)
self._policy_statement_mapping['glue_crawler'] = iam.PolicyStatement(
actions=[
'glue:GetCrawler',
'glue:StartCrawler',
'glue:StopCrawler',
'glue:UpdateCrawler'
],
effect=iam.Effect.ALLOW,
resources=[core.Fn.sub(
body='arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:crawler/${EventsCrawlerName}',
variables={
'EventsCrawlerName': component.events_crawler_name
}
)],
sid='UpdateEventsCrawler'
)
self._policy_statement_mapping['s3_read'] = iam.PolicyStatement(
actions=[
's3:GetObject',
's3:ListBucket'
],
effect=iam.Effect.ALLOW,
resources=[
core.Fn.sub(
body='arn:${AWS::Partition}:s3:::${AnalyticsBucketName}',
variables={
'AnalyticsBucketName': component.analytics_bucket_name
}
),
core.Fn.sub(
body='arn:${AWS::Partition}:s3:::${AnalyticsBucketName}/*',
variables={
'AnalyticsBucketName': component.analytics_bucket_name
}
)
],
sid='GetAnalyticsBucketObjects'
)
self._policy_statement_mapping['s3_write'] = iam.PolicyStatement(
actions=[
's3:PutObject'
],
effect=iam.Effect.ALLOW,
resources=[
core.Fn.sub(
body='arn:${AWS::Partition}:s3:::${AnalyticsBucketName}/${AthenaOutputDirectory}',
variables={
'AnalyticsBucketName': component.analytics_bucket_name,
'AthenaOutputDirectory': constants.ATHENA_OUTPUT_DIRECTORY
}
),
core.Fn.sub(
body='arn:${AWS::Partition}:s3:::${AnalyticsBucketName}/${AthenaOutputDirectory}/*',
variables={
'AnalyticsBucketName': component.analytics_bucket_name,
'AthenaOutputDirectory': constants.ATHENA_OUTPUT_DIRECTORY
}
)
],
sid='PutQueryResults'
)
self._add_to_iam_policy_statement([component.events_crawler_role_arn])
self._add_to_logs_policy_statement(
[
core.Fn.sub('arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:'
'/aws-glue/crawlers:log-stream:*')
]
)
return self
def add_batch_processing_policy_statements(self, component: BatchProcessing) -> AdminPolicyStatementsBuilder:
"""
Add the policy statements to update the Kinesis Data Firehose delivery stream
and events processing Lambda for admin.
:param component: batch processing component created by the metrics gem.
:return: The policy statement builder itself.
"""
if not component:
return self
self._policy_statement_mapping['firehose'] = iam.PolicyStatement(
actions=[
'firehose:DescribeDeliveryStream',
'firehose:UpdateDestination'
],
effect=iam.Effect.ALLOW,
resources=[
core.Fn.sub(
body='arn:${AWS::Partition}:firehose:${AWS::Region}:${AWS::AccountId}:'
'deliverystream/${DeliveryStreamName}',
variables={
'DeliveryStreamName': component.delivery_stream_name
}
)
],
sid='UpdateDeliveryStreamDestination'
)
self._add_to_lambda_policy_statement([component.events_processing_lambda_arn])
self._add_to_iam_policy_statement(
[
component.delivery_stream_role_arn,
component.events_processing_lambda_role_arn
]
)
self._add_to_logs_policy_statement(
[
core.Fn.sub(
body='arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:'
'/aws/lambda/${EventsProcessingLambdaName}:log-stream:*',
variables={
'EventsProcessingLambdaName': component.events_processing_lambda_name
}
),
core.Fn.sub(
body='arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:'
'${DeliveryStreamLogGroupName}:log-stream:*',
variables={
'DeliveryStreamLogGroupName': component.delivery_stream_log_group_name
}
)
]
)
return self
def add_batch_analytics_policy_statements(self, component: BatchAnalytics) -> AdminPolicyStatementsBuilder:
"""
Add the policy statements to get named queries/executions and update the work group for admin.
:param component: Batch processing component created by the metrics gem.
:return: The policy statement builder itself.
"""
if not component:
return self
self._policy_statement_mapping['athena'] = iam.PolicyStatement(
actions=[
'athena:BatchGetNamedQuery',
'athena:BatchGetQueryExecution',
'athena:GetNamedQuery',
'athena:GetQueryExecution',
'athena:GetQueryResults',
'athena:StartQueryExecution',
'athena:StopQueryExecution',
'athena:ListNamedQueries',
'athena:ListQueryExecutions',
'athena:GetWorkGroup',
'athena:UpdateWorkGroup'
],
effect=iam.Effect.ALLOW,
resources=[
core.Fn.sub(
body='arn:${AWS::Partition}:athena:${AWS::Region}:${AWS::AccountId}:workgroup/${WorkGroupName}',
variables={
'WorkGroupName': component.athena_work_group_name
}
)
],
sid='UpdateAthenaWorkGroupAndRunQuery'
)
return self
def _add_to_logs_policy_statement(self, resource_list: typing.List[str]) -> None:
"""
Add new resources to the logs policy statement.
:param resource_list: Resources to add.
"""
if not self._policy_statement_mapping.get('logs'):
self._policy_statement_mapping['logs'] = iam.PolicyStatement(
actions=[
'logs:DescribeLogStreams',
'logs:GetLogEvents'
],
effect=iam.Effect.ALLOW,
resources=resource_list,
sid='AccessLogs'
)
else:
self._policy_statement_mapping['logs'].add_resources(*resource_list)
def _add_to_lambda_policy_statement(self, resource_list: typing.List[str]) -> None:
"""
Add new resources to the Lambda policy statement.
:param resource_list: Resources to add.
"""
if not self._policy_statement_mapping.get('lambda'):
self._policy_statement_mapping['lambda'] = iam.PolicyStatement(
actions=[
'lambda:GetFunction',
'lambda:GetFunctionConfiguration',
'lambda:UpdateFunctionCode',
'lambda:UpdateFunctionConfiguration',
'lambda:ListTags',
'lambda:TagResource',
'lambda:UntagResource'
],
effect=iam.Effect.ALLOW,
resources=resource_list,
sid='UpdateLambda'
)
else:
self._policy_statement_mapping['lambda'].add_resources(*resource_list)
def _add_to_iam_policy_statement(self, resource_list: typing.List[str]) -> None:
"""
Add new resources to the IAM policy statement.
:param resource_list: Resources to add.
"""
if not self._policy_statement_mapping.get('iam'):
self._policy_statement_mapping['iam'] = iam.PolicyStatement(
actions=[
'iam:PassRole',
'iam:GetRole'
],
effect=iam.Effect.ALLOW,
resources=resource_list,
sid='PassRole'
)
else:
self._policy_statement_mapping['iam'].add_resources(*resource_list)
|
import argparse
import json
import logging
import os
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from prometheus_client.twisted import MetricsResource
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.server import Site
logger = logging.getLogger(__name__)
SHA256_PREFIX_LENGTH = len('sha256:')
class RegistryCollector:
def __init__(self, base_path):
self._base_path = base_path
def _find_repositories(self):
repositories = []
for dirname, dirnames, filenames in os.walk(os.path.join(self._base_path, 'repositories')):
if '_manifests' in dirnames:
repositories.append(dirname.replace(os.path.join(self._base_path, 'repositories'), '')[1:])
# Don't need to recurse any further
for terminaldirname in ['_manifests', '_layers', '_uploads']:
if terminaldirname in dirnames:
dirnames.remove(terminaldirname)
return repositories
def _scrape_tags(self, repository):
tags_path = os.path.join(self._base_path, 'repositories', repository, '_manifests', 'tags')
return os.listdir(tags_path)
def _scrape_revisions(self, repository):
tags_path = os.path.join(self._base_path, 'repositories', repository, '_manifests', 'revisions', 'sha256')
return os.listdir(tags_path)
def _scrape_manifest(self, repository, tag):
tags_path = os.path.join(self._base_path, 'repositories', repository, '_manifests', 'tags')
with open(os.path.join(tags_path, tag, 'current', 'link'), 'r') as link_file:
manifest_id = link_file.readline()[SHA256_PREFIX_LENGTH:].replace('\n', '')
with open(os.path.join(self._base_path, 'blobs', 'sha256', manifest_id[0:2], manifest_id, 'data'),
'r') as manifest_file:
return json.load(manifest_file)
def collect(self):
repository_tags_total = GaugeMetricFamily('repository_tags_total', 'Number of tags for each repo',
labels=['repository'])
repository_revisions_total = GaugeMetricFamily('repository_revisions_total',
'Number of revisions for each repo', labels=['repository'])
repository_tag_layers_total = GaugeMetricFamily('repository_tag_layers_total', 'Number of layers in each tag',
labels=['repository', 'tag'])
repository_tag_size_bytes = GaugeMetricFamily('repository_tag_size_bytes', 'Size of each tag',
labels=['repository', 'tag'])
repositories = self._find_repositories()
logger.debug('Found %s repositories: %s', len(repositories), repositories)
for repository in repositories:
logger.debug('Scanning %s for tags', repository)
tags = self._scrape_tags(repository)
repository_tags_total.add_metric([repository], len(tags))
revisions = self._scrape_revisions(repository)
repository_revisions_total.add_metric([repository], len(revisions))
for tag in tags:
manifest = self._scrape_manifest(repository, tag)
repository_tag_layers_total.add_metric([repository, tag], len(manifest['layers']))
size = 0
for layer in manifest['layers']:
size += layer['size'] if 'size' in layer else 0
repository_tag_size_bytes.add_metric([repository, tag], size)
yield repository_tags_total
yield repository_revisions_total
yield repository_tag_layers_total
yield repository_tag_size_bytes
def run_metrics_server():
root = Resource()
root.putChild(b'metrics', MetricsResource())
factory = Site(root)
reactor.listenTCP(8080, factory)
reactor.run()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description='Exports statistics from a private Docker registry')
parser.add_argument('path', help='File path to root of registry disk (eg. /var/lib/registry/docker/registry/v2/).' +
' This directory should contain subdirectories "repositories" and "blobs"')
args = parser.parse_args()
collector = RegistryCollector(args.path)
REGISTRY.register(collector)
run_metrics_server()
|
#!/usr/bin/env python
"""
WorkQueuManager test
"""
from __future__ import print_function, division
from builtins import next
import unittest
from WMCore_t.WorkQueue_t.WorkQueueTestCase import WorkQueueTestCase
from WMCore.WorkQueue.WorkQueue import globalQueue
def getFirstTask(wmspec):
"""Return the 1st top level task"""
return next(wmspec.taskIterator())
class WorkQueueManagerTest(WorkQueueTestCase):
"""
TestCase for WorkQueueManagerTest module
"""
_maxMessage = 10
def setSchema(self):
self.schema = []
self.couchApps = ["WorkQueue"]
def getConfig(self):
"""
_createConfig_
General config file
"""
# configPath=os.path.join(WMCore.WMInit.getWMBASE(), \
# 'src/python/WMComponent/WorkQueueManager/DefaultConfig.py')):
config = self.testInit.getConfiguration()
# http://www.logilab.org/ticket/8961
# pylint: disable=E1101, E1103
config.component_("WorkQueueManager")
config.section_("General")
config.General.workDir = "."
config.WorkQueueManager.team = 'team_usa'
config.WorkQueueManager.requestMgrHost = 'cmssrv49.fnal.gov:8585'
config.WorkQueueManager.serviceUrl = "http://cmssrv18.fnal.gov:6660"
config.WorkQueueManager.logLevel = 'INFO'
config.WorkQueueManager.pollInterval = 10
config.WorkQueueManager.level = "GlobalQueue"
return config
def setupGlobalWorkqueue(self):
"""Return a workqueue instance"""
globalQ = globalQueue(CacheDir=self.workDir,
QueueURL='global.example.com',
Teams=["The A-Team", "some other bloke"],
DbName='workqueue_t_global')
return globalQ
def testComponentBasic(self):
"""
Tests the components, as in sees if they load.
Otherwise does nothing.
"""
return
# TODO: What used to be here, stopping pylint from complaining about unreachable code
# myThread = threading.currentThread()
#
# config = self.getConfig()
#
# testWorkQueueManager = WorkQueueManager(config)
# testWorkQueueManager.prepareToStart()
#
# time.sleep(30)
# print("Killing")
# myThread.workerThreadManager.terminateWorkers()
#
# return
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.