content stringlengths 5 1.05M |
|---|
from gclang.gen.GuardedParser import GuardedParser
from gclang.gen.GuardedVisitor import GuardedVisitor
from dataclasses import InitVar, dataclass
@dataclass
class MacroOperator:
name: str
parameters: tuple[str]
body: GuardedParser.OperatorListContext
@dataclass
class MacroExpression:
name: str
parameters: tuple[str]
body: GuardedParser.ExpressionContext
class MacroVisitor(GuardedVisitor):
def __init__(self) -> None:
super().__init__()
self._replacement_stack = []
def _get_vars(self):
pass
def _set_vars(self, vars):
pass
def visitActualParameters(self, ctx: GuardedParser.ActualParametersContext):
return [self.visit(node) for node in ctx.getTypedRuleContexts(GuardedParser.ExpressionContext)]
def visitExprMacroCall(self, ctx: GuardedParser.ExprMacroCallContext):
macro_name = ctx.getToken(GuardedParser.ID, 0).getText()
macro = self._get_vars()[macro_name]
assert isinstance(macro, MacroExpression)
parameters = self.visitActualParameters(ctx.getChild(0, GuardedParser.ActualParametersContext))
assert len(macro.parameters) == len(parameters)
vars = self._get_vars()
snap = dict(vars)
vars |= dict(zip(macro.parameters, parameters))
result = self.visit(macro.body)
self._set_vars(snap)
return result
def visitMacroExpressionDefinition(self, ctx: GuardedParser.MacroExpressionDefinitionContext):
vars = self._get_vars()
macro_name = ctx.getChild(0).getText()
parameters_ctx = ctx.getChild(0, GuardedParser.FormalParametersContext)
parameters = tuple(map(str, parameters_ctx.getTokens(GuardedParser.ID)))
body = ctx.getChild(0, GuardedParser.ExpressionContext)
vars[macro_name] = MacroExpression(macro_name, parameters, body) |
import re
from thefuck.utils import for_app
@for_app('heroku')
def match(command):
return 'Run heroku _ to run' in command.stderr
def get_new_command(command):
return re.findall('Run heroku _ to run ([^.]*)', command.stderr)[0]
|
import json
import torch
from base.base_dataset import BaseADDataset
from networks.main import build_network
from optim import ClassifierTrainer
class Classifier(object):
"""A class for an anomaly detection classifier model.
Attributes:
objective: Hypersphere ('hsc'), binary cross-entropy ('bce'), or focal loss ('focal') classifier.
hsc_norm: Set specific norm to use with HSC ('l1', 'l2', 'l2_squared', 'l2_squared_linear').
focal_gamma: Gamma parameter of the focal loss.
net_name: A string indicating the name of the neural network to use.
net: The neural network.
trainer: ClassifierTrainer to train the classifier model.
optimizer_name: A string indicating the optimizer to use for training.
results: A dictionary to save the results.
"""
def __init__(self, objective: str, hsc_norm: str = 'l2_squared_linear', focal_gamma: float = 2.0):
"""Inits Classifier."""
self.objective = objective
self.hsc_norm = hsc_norm
self.focal_gamma = focal_gamma
self.net_name = None
self.net = None
self.trainer = None
self.optimizer_name = None
self.results = {
'train_time': None,
'train_scores': None,
'test_time': None,
'test_scores': None,
'test_auc': None
}
def set_network(self, net_name, rep_dim=64, bias_terms=False):
"""Builds the neural network."""
self.net_name = net_name
self.net = build_network(net_name, rep_dim=rep_dim, bias_terms=bias_terms)
def train(self, dataset: BaseADDataset, oe_dataset: BaseADDataset = None, optimizer_name: str = 'adam',
lr: float = 0.001, n_epochs: int = 50, lr_milestones: tuple = (), batch_size: int = 128,
weight_decay: float = 1e-6, device: str = 'cuda', n_jobs_dataloader: int = 0):
"""Trains the classifier on the training data."""
self.optimizer_name = optimizer_name
self.trainer = ClassifierTrainer(self.objective, self.hsc_norm, self.focal_gamma, optimizer_name=optimizer_name,
lr=lr, n_epochs=n_epochs, lr_milestones=lr_milestones, batch_size=batch_size,
weight_decay=weight_decay, device=device, n_jobs_dataloader=n_jobs_dataloader)
# Get results
self.net = self.trainer.train(dataset=dataset, oe_dataset=oe_dataset, net=self.net)
self.results['train_time'] = self.trainer.train_time
self.results['train_scores'] = self.trainer.train_scores
def test(self, dataset: BaseADDataset, device: str = 'cuda', n_jobs_dataloader: int = 0):
"""Tests the Classifier on the test data."""
if self.trainer is None:
self.trainer = ClassifierTrainer(self.objective, self.hsc_norm, self.focal_gamma, device=device,
n_jobs_dataloader=n_jobs_dataloader)
self.trainer.test(dataset, self.net)
# Get results
self.results['test_time'] = self.trainer.test_time
self.results['test_scores'] = self.trainer.test_scores
self.results['test_auc'] = self.trainer.test_auc
def save_model(self, export_model):
"""Save the classifier model to export_model."""
net_dict = self.net.state_dict()
torch.save({'net_dict': net_dict}, export_model)
def load_model(self, model_path, map_location='cpu'):
"""Load the classifier model from model_path."""
model_dict = torch.load(model_path, map_location=map_location)
self.net.load_state_dict(model_dict['net_dict'])
def save_results(self, export_json):
"""Save results dict to a JSON-file."""
with open(export_json, 'w') as fp:
json.dump(self.results, fp)
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is adapted from https://github.com/PyTorchLightning
# /pytorch-lightning/blob/master/pytorch_lightning/plugins/training_type/ddp_spawn.py
#
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, List, Optional, Callable
import multiprocessing
from bigdl.nano.pytorch.utils import TORCH_VERSION_LESS_1_10
import torch
from torch.nn.parallel.distributed import DistributedDataParallel
from torch.multiprocessing.spawn import _wrap, ProcessContext
import pytorch_lightning as pl
from pytorch_lightning.overrides import LightningDistributedModule
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.environments import LightningEnvironment
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities.seed import reset_seed
from bigdl.nano.common.cpu_schedule import schedule_workers
from bigdl.nano.deps.ipex.ipex_api import ipex_device, ipex_optimize
import logging
import warnings
log = logging.getLogger(__name__)
def start_processes_new(fn, args=(), nprocs=1, join=True, daemon=False,
start_method='spawn', cpu_procs=None):
"""Start processess with optimized environment variables."""
mp = multiprocessing.get_context(start_method)
error_queues = []
processes = []
if cpu_procs is None:
cpu_procs = schedule_workers(nprocs)
init_KMP_AFFINITY = os.environ.get("KMP_AFFINITY")
init_OMP_NUM_THREADS = os.environ.get("OMP_NUM_THREADS")
for i in range(nprocs):
os.environ["KMP_AFFINITY"] = f"granularity=fine,proclist"\
f"=[{','.join([str(i) for i in cpu_procs[i]])}],explicit"
os.environ["OMP_NUM_THREADS"] = str(len(cpu_procs[i]))
log.debug(f"[Process {i}]: using KMP_AFFINITY: {os.environ['KMP_AFFINITY']}")
log.debug(f"[Process {i}]: using OMP_NUM_THREADS: {os.environ['OMP_NUM_THREADS']}")
error_queue = mp.SimpleQueue()
process = mp.Process(
target=_wrap,
args=(fn, i, args, error_queue),
daemon=daemon,
)
process.start()
error_queues.append(error_queue)
processes.append(process)
context = ProcessContext(processes, error_queues)
if not join:
return context
# Loop on join until it returns True or throw an exception.
while not context.join():
pass
os.environ["KMP_AFFINITY"] = init_KMP_AFFINITY
os.environ["OMP_NUM_THREADS"] = init_OMP_NUM_THREADS
class DDPSpawnPlugin(pl.plugins.DDPSpawnPlugin):
"""Extending DDPSpawnPlugin to support launch subprocesses with optimized env variables."""
distributed_backend = "ddp_spawn"
def __init__(
self,
num_processes: int = 1,
cpu_for_each_process: Optional[List[List[int]]] = None,
use_ipex=False,
enable_bf16=False,
):
"""Create a DDPSpawnPlugin, adding a cpu_for_each_process parameter."""
device = ipex_device() if use_ipex and TORCH_VERSION_LESS_1_10 else 'cpu'
parallel_devices = [torch.device(device) for _ in range(num_processes)]
cluster_environment = LightningEnvironment()
super().__init__(parallel_devices,
cluster_environment=cluster_environment)
self.cpu_for_each_process = cpu_for_each_process
self.is_distributed = True
self.use_ipex = use_ipex
self.enable_bf16 = enable_bf16
@property
def mp_spawn_kwargs(self):
"""Return the kwargs that will be passed to spawn to start a new process."""
return {
"args": (self.lightning_module.trainer, self.mp_queue),
"nprocs": self.num_processes,
"cpu_procs": self.cpu_for_each_process
}
def start_training(self, trainer):
"""Setup start_training hook for the plugin."""
# reset ortsess, since InferenceSession can not be pickled
self.model._ortsess = None
start_processes_new(self.new_process, **self.mp_spawn_kwargs)
# reset optimizers, since main process is never used for training
# and thus does not have a valid optim state
trainer.optimizers = []
def start_evaluating(self, trainer):
"""Setup start_evaluting hook for the plugin."""
print("evaluate")
start_processes_new(self.new_process, **self.mp_spawn_kwargs)
def start_predicting(self, trainer):
"""Setup start_predicting hook for the plugin."""
print("predict")
start_processes_new(self.new_process, **self.mp_spawn_kwargs)
def new_process(self, process_idx, trainer, mp_queue):
"""The fucntion to run in each new process."""
self.mp_queue = mp_queue
reset_seed()
self.set_world_ranks(process_idx)
# set warning rank
rank_zero_only.rank = self.global_rank
# set up server using proc 0's ip address
# try to init for 20 times at max in case ports are taken
# where to store ip_table
self.init_ddp_connection(self.global_rank, self.world_size)
# TODO: we moved it to the trainer.fit after calling pre_dispatch
# ... need to double check that it is the correct place
# self.trainer.call_setup_hook(self.model)
# on world_size=0 let everyone know training is starting
if self.is_global_zero and not torch.distributed.is_initialized():
log.info("-" * 100)
log.info(f"distributed_backend={self.distributed_backend}")
log.info(f"All DDP processes registered. Starting ddp with {self.world_size} processes")
log.info("-" * 100)
# set the ranks and devices
self.dist.rank = self.global_rank
self.dist.device = self.root_device
if self.use_ipex and not TORCH_VERSION_LESS_1_10:
dtype = torch.bfloat16 if self.enable_bf16 else None
num_optimizers = len(self.lightning_module.trainer.accelerator.optimizers)
if num_optimizers == 1:
optimizer = self.lightning_module.trainer.accelerator.optimizers[0]
ipex_optimize(self.model, optimizer=optimizer,
inplace=True, dtype=dtype)
elif num_optimizers == 0:
ipex_optimize(self.model, inplace=True, dtype=dtype)
else:
warnings.warn(f"IPEX currently only support single optimizers, "
f"but got {num_optimizers}. Skip IPEX")
if self.sync_batchnorm:
self.model = self.configure_sync_batchnorm(self.model)
self.configure_ddp()
# Move this line here so that we can temporarily use cpu while configuring ddp
# and use ipex.DEVICE later on
# move the model to the correct device
self.model_to_device()
self.barrier()
results = trainer.run_stage()
# persist info in ddp_spawn
self.transfer_distrib_spawn_state_on_fit_end(results)
def configure_ddp(self):
"""Setup the configuration for pytorch ddp."""
self.pre_configure_ddp()
self._model = DistributedDataParallel(
LightningDistributedModule(self.model),
**self._ddp_kwargs,
)
self._register_ddp_hooks()
|
import unittest
from check_seventeen import check_seventeen_
class TestCheckSeventeen(unittest.TestCase):
def test_obvious(self):
y = 0
while y < 25000:
#if there is no remainder after division
if y % 17 == 0:
#it is divisible by ___ and should be rejected
self.assertTrue(check_seventeen_(str(y)))
else:
#otherwise it is not and should
self.assertFalse(check_seventeen_(str(y)))
y+=1
def test_long(self):
#divisible by ___
self.assertTrue(check_seventeen_('3665133073327088571'))
self.assertTrue(check_seventeen_('14295447902675205'))
self.assertTrue(check_seventeen_('398144434437141421'))
#not divisible by ___
self.assertFalse(check_seventeen_('123456789987654321'))
self.assertFalse(check_seventeen_('1234567891011121314151617'))
self.assertFalse(check_seventeen_('16049382583144577083971021'))
if __name__ == '__main__':
unittest.main()
|
"""A histoprogramming utility to go from some input lines to something plottable.
Licensed under the 3-clause BSD License:
Copyright (c) 2011-2014, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys
from utils import specialize
def optimalBinSize(vals):
"""Returns "optimal" bin size for a histogram using Scott's Rule:
b = 3.49 * stdev*N^(-1/3),
stdev = standard deviation of the N descriptor values.
From D. Scott - On optimal and data-based histograms.
"""
from utils import getStdDev
n = len(vals)
sd = getStdDev(vals)
return 3.49 * sd*(n**(-1/3))
def makehist(vals, incr=1, normalize=0):
"""Makes a histogram for the given vals and returns a dict.
Incr is how much each item counts for
If normalize is true, then the sum of return values is normalize.
"""
ret = {}
sum = 0
for v in vals:
if v not in ret:
ret[v] = 0
ret[v] += incr
sum += incr
if normalize:
for k in ret:
ret[k] *= (normalize/sum)
return ret
def centers2edges(centers):
"""Converts a set of bin centers into edges"""
centers = sorted(set(centers))
ret = [-1e99]
ret.extend((c1+c2)/2.0 for c1, c2 in zip(centers, centers[1:]))
ret.append(1e99)
return ret
def histfromcenters(vals, centers, incr=1, normalize=0):
"""Makes a histogram from a set of numbers, with bin centers given.
incr is how much to increment each bin per item.
if normalize is true, then output has sum=normalize"""
from bisect import bisect
edges = centers2edges(centers)
hist = []
for v in vals:
i = bisect(edges, v)
assert i > 0
hist.append(centers[i-1])
if type(vals[0]) == type(1.3) or normalize:
incr = float(incr)
hist = makehist(hist, incr, normalize)
return hist
def histogram(vals, binwidth=1, incr=1, normalize=0):
"""Makes a histogram from a set of values of arbitrary type.
binwidth determines which values all map to the same value.
incr is how much to increment each bin per item.
if normalize is true, then output has sum=normalize"""
try:
hist = [(v//binwidth)*binwidth for v in vals]
except TypeError:
hist = vals
if type(vals[0]) == type(1.3) or normalize:
incr = float(incr)
hist = makehist(hist, incr, normalize)
return hist
def cumhist(hist):
"""Takes a histogram and makes a cumulative histogram out of it"""
ret = {}
cur = 0
for k in sorted(hist):
ret[k] = hist[k]+cur
cur = ret[k]
return ret
def multhist(hists, asone=1):
"""Takes a set of histograms and combines them.
If asone is true, then returns one histogram of key->[val1, val2, ...].
Otherwise, returns one histogram per input"""
ret = {}
num = len(hists)
for i, h in enumerate(hists):
for k in sorted(h):
if k not in ret:
ret[k] = [0]*num
ret[k][i] = h[k]
if asone: return ret
# otherwise, convert to separate ones
toret = []
for i in hists:
toret.append({})
for k, vals in ret.iteritems():
for i, v in enumerate(vals):
toret[i][k] = v
return toret
def collapsebins(hist, bin, func=lambda b: b>bin):
"""Collapses bins of a histogram into one, based on the given function.
The function is given all bins (keys) and for every bin that tests positive,
it will collapse it to the chosen bin.
This function copies the given histogram, rather than modifying it directly.
"""
hist = dict(**hist)
todel = []
for b in hist:
if func(b):
hist[bin] += hist[b]
todel.append(b)
for b in todel:
del hist[b]
return hist
if __name__ == '__main__':
lines = [specialize(l.strip()) for l in sys.stdin]
if not lines: sys.exit()
args = [specialize(a) for a in sys.argv[1:]]
hist = histogram(lines, *args)
for k in sorted(hist):
print '%s %s' % (k, hist[k])
|
# -*- coding: utf-8 -*-
'''
Created by 15 cm on 11/22/15 6:33 PM
Copyright © 2015 15cm. All rights reserved.
'''
__author__ = '15cm'
from data.data_handler import DataHandler
from feature.superpixel import SuperPixel
from mynp import np
import os
CURPATH = os.path.split(os.path.realpath(__file__))[0]
class Bow:
def __init__(self,kmeans):
self.kmeans = kmeans
self.bow_path = os.path.join(CURPATH,'bow')
def train_sift(self,X_list):
bow_list = []
for X in X_list:
bow_list.append(self.compute(X))
self.bow_matrix = reduce(np.vstack,bow_list)
dh = DataHandler()
dh.load()
sample_y = np.empty((len(X_list),1))
for i in range(len(sample_y)):
sample_y[i][0] = dh.get_lables(id=i)
sample_data = np.hstack(sample_y,self.bow_matrix)
# save sample data
np.savetxt(os.path.join(self.bow_path,'bow_sift.txt'),sample_data)
def train_pixel(self,image_list):
superpixel_list = [SuperPixel(x) for x in image_list]
for sp in superpixel_list:
sp.segment()
sp.count_descriptors()
# for
def compute(self,X):
bow = [0 for x in range(self.kmeans.n_cluster)]
clusters = self.kmeans.predict(X)
for i in clusters:
bow[i] += 1
return bow
def load(self,bow_name):
self.bow_matrix = np.loadtxt(self.bow_path)
|
"""Script for running Pokemon Simulation."""
from threading import Thread
from queue import Queue
from time import time
from agent.basic_pokemon_agent import PokemonAgent
from agent.basic_planning_pokemon_agent import BasicPlanningPokemonAgent
from battle_engine.pokemon_engine import PokemonEngine
from file_manager.log_writer import LogWriter
from file_manager.team_reader import TeamReader
from simulation.base_type_logging_simulation import BaseLoggingSimulation
from simulation.base_simulation import load_config
from stats.calc import calculate_avg_elo
class PokemonSimulation(BaseLoggingSimulation):
"""Class for Pokemon Simulation."""
def __init__(self, **kwargs):
"""
Initialize this simulation.
Args:
config (str): Filename for the population configs.
data_delay (int): Number of matches between gathering type data.
multithread (bool): Whether or not to run this simulation multithreaded.
"""
pkmn_kwargs = kwargs
pkmn_kwargs["game"] = PokemonEngine()
pkmn_kwargs["prefix"] = "PKMN"
self.config = load_config(kwargs["config"])
self.type_log_writer = None
self.data_delay = kwargs["data_delay"]
self.multithread = kwargs.get("multithread", False)
super().__init__(pkmn_kwargs)
def add_agents(self):
"""Add the agents to this model."""
for conf in self.config:
conf_tr = TeamReader(prefix=conf["team_file"])
conf_tr.process_files()
conf_team = conf_tr.teams[0]
for _ in range(int(self.num_players * conf["proportion"])):
pkmn_agent = None
if conf["agent_class"] == "basic":
pkmn_agent = PokemonAgent(
team=conf_team
)
pkmn_agent.type = conf["agent_type"]
elif conf["agent_class"] == "basicplanning":
pkmn_agent = BasicPlanningPokemonAgent(
tier=conf["agent_tier"],
team=conf_team
)
pkmn_agent.type = conf["agent_type"]
else:
raise RuntimeError("Invalid agent_class: {}".format(conf["agent_class"]))
self.ladder.add_player(pkmn_agent)
def init_type_log_writer(self):
"""Initialize Type Average Elo LogWriter."""
header = []
for conf in self.config:
header.append(conf["agent_type"])
self.type_log_writer = LogWriter(header, prefix="PKMNTypes", directory=self.directory)
def run(self):
"""Run this simulation."""
if not self.multithread:
super().run()
return
battle_queue = Queue()
battle_results_queue = Queue()
type_results_queue = Queue()
for num in range(self.num_games):
battle_queue.put(num)
start_time = time()
# Threads to run the battles
for _ in range(4):
battle_thread = Thread(target=battle, args=(self,
battle_queue,
battle_results_queue,
type_results_queue,
start_time))
battle_thread.start()
battle_queue.join()
while not battle_results_queue.empty():
output, player1, player2 = battle_results_queue.get()
self.write_player_log(output, player1, player2)
battle_results_queue.task_done()
while not type_results_queue.empty():
data_line = type_results_queue.get()
self.type_log_writer.write_line(data_line)
type_results_queue.task_done()
def battle(main_sim, battle_queue, output_queue, type_queue, start_time):
"""
Code for a single battle thread to run.
Args:
main_sim (BaseSimulation): Simulation that is spawning this thread.
battle_queue (Queue): Queue with placeholders to count number of
battles remaining.
output_queue (Queue): Queue to hold the results of the battles.
type_queue (Queue): Queue to hold the rating data broken down by agent type.
start_time (time): Time object to hold simulation starting time.
"""
while not battle_queue.empty():
battle_queue.get()
results = None
while results is None:
try:
results = main_sim.ladder.run_game()
except RuntimeError as rte:
print(rte, main_sim.ladder.thread_lock.locked())
output_queue.put(results)
if battle_queue.qsize() % main_sim.data_delay == 0:
type_queue.put(calculate_avg_elo(main_sim.ladder))
main_sim.print_progress_bar(main_sim.num_games - battle_queue.qsize(), start_time)
battle_queue.task_done()
|
from .category import Category
class Insect(Category):
_name = "insect"
_values = [
"ant",
"bee",
"beetle",
"butterfly",
"cicada",
"cockroach",
"cricket",
"dragonfly",
"earwig",
"firefly",
"fly",
"grasshopper",
"hornet",
"ladybird",
"locust",
"maggot",
"mantis",
"mosquito",
"moth",
"scarab",
"silkworm",
"wasp",
"woodlouse",
"worm",
]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config_source import ConfigResult, ConfigSource, ObjectType
from .errors import ConfigLoadError
__all__ = [
"ConfigSource",
"ConfigLoadError",
"ObjectType",
"ConfigResult",
]
|
#!/usr/bin/env python3
# Copyright 2020 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
from typing import Any
import gear # type: ignore
def connect(host : str) -> Any:
client = gear.Client()
client.addServer(host, 4730, 'tls.key', 'tls.crt', 'ca.crt')
client.waitForServer(timeout=10)
return client
def run(client : Any, job_name : str, args : Any = dict()) -> Any:
job = gear.Job(job_name.encode('utf-8'), json.dumps(args).encode('utf-8'))
client.submitJob(job, timeout=300)
while not job.complete:
time.sleep(0.1)
return json.loads(job.data[0])
if __name__ == '__main__':
print(run(connect("scheduler"), "status"))
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 tianyou pan <sherry0429 at SOAPython>
"""
import falcon
class BaseFalcon(object):
OK = falcon.HTTP_200
ERROR = falcon.HTTP_404
|
import socket
from .task import Task
from . import helpers
from .app_settings import TASKS_HOST,TASKS_PORT
def push_task_to_queue(a_callable,*args,**kwargs):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new_task = Task(a_callable,*args,**kwargs)
new_task = helpers.save_task_to_db(new_task) #returns with db_id
sock.connect((TASKS_HOST, TASKS_PORT))
sock.send(helpers.serielize(new_task))
received = sock.recv(1024)
sock.close()
return received |
# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from datetime import datetime
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects import postgresql
db = SQLAlchemy()
class TimestampMixin(object):
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, onupdate=datetime.utcnow)
class Model(TimestampMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(256), nullable=False)
model_serialized = db.Column(postgresql.JSONB, nullable=False)
organism_id = db.Column(db.Integer, nullable=False)
project_id = db.Column(db.Integer)
default_biomass_reaction = db.Column(db.String(256), nullable=False)
preferred_map_id = db.Column(db.Integer, nullable=True)
ec_model = db.Column(db.Boolean, nullable=False)
def __repr__(self):
"""Return a printable representation."""
return f"<{self.__class__.__name__} {self.id}: {self.name}>"
|
# -*- coding: utf-8 -*-
from flask_restx._http import HTTPStatus as flaskplus_HTTPStatus
HTTPStatus = flaskplus_HTTPStatus
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch.nn as nn
def plot_filters_single_channel_big(t, place_to_store):
# setting the rows and columns
nrows = t.shape[0] * t.shape[2]
ncols = t.shape[1] * t.shape[3]
npimg = np.array(t.numpy(), np.float32)
npimg = npimg.transpose((0, 2, 1, 3))
npimg = npimg.ravel().reshape(nrows, ncols)
npimg = npimg.T
fig, ax = plt.subplots(figsize=(ncols / 10, nrows / 200))
imgplot = sns.heatmap(npimg, xticklabels=False, yticklabels=False, cmap='gray', ax=ax, cbar=False)
def plot_filters_single_channel(t, place_to_store):
# looping through all the kernels in each channel
for i in range(t.shape[0]):
for j in range(t.shape[1]):
plt.figure(figsize=(7, 7))
npimg = np.array(t[i, j].numpy(), np.float32)
npimg = (npimg - np.mean(npimg)) / np.std(npimg)
npimg = np.minimum(1, np.maximum(0, (npimg + 0.5)))
plt.imshow(npimg)
name = str(i) + '-' + str(j)
plt.axis('off')
plt.savefig(f'{place_to_store}/{name}.png')
plt.close()
def plot_filters_multi_channel(t, place_to_store):
for i in range(t.shape[0]):
plt.figure(figsize=(7, 7))
npimg = np.array(t[i].numpy(), np.float32)
npimg = (npimg - np.mean(npimg)) / np.std(npimg)
npimg = np.minimum(1, np.maximum(0, (npimg + 0.5)))
npimg = npimg.transpose((1, 2, 0))
plt.imshow(npimg)
plt.axis('off')
name = str(i)
plt.savefig(f'{place_to_store}/{name}.png')
plt.close()
# place_to_store example: './multi' - you have to make such a folder before running.
# layer_num: index of convolutional layer you are interested in.
# single_channel = True works only in case on 3 channels (so gives out the RGB pictures).
def plot_weights(model, layer_num, place_to_store, single_channel=True, collated=False):
# extracting the model features at the particular layer number
layer = model.features[layer_num]
# checking whether the layer is convolution layer or not
if isinstance(layer, nn.Conv2d):
# getting the weight tensor data
weight_tensor = model.features[layer_num].weight.data
if single_channel:
if collated:
plot_filters_single_channel_big(weight_tensor, place_to_store)
else:
plot_filters_single_channel(weight_tensor, place_to_store)
else:
if weight_tensor.shape[1] == 3:
plot_filters_multi_channel(weight_tensor, place_to_store)
else:
print("Can only plot weights with three channels with single channel = False")
else:
print("Can only visualize layers which are convolutional")
def extract_filters(generator, layer_num):
layer = generator.features[layer_num]
if isinstance(layer, nn.Conv2d):
t = generator.features[layer_num].weight.data
filters = []
for i in range(t.shape[0]):
filter = t[i]
filters.append(filter)
return (filters)
|
# -*- coding: utf-8 -*-
import json
import os
import sys
import time
from datetime import datetime, timedelta
from config import checkin_map, get_checkin_info, get_notice_info, env2config
from motto import Motto
from utils.message import push_message
def main_handler(event, context):
start_time = time.time()
utc_time = datetime.utcnow() + timedelta(hours=8)
if "IS_GITHUB_ACTION" in os.environ:
message = os.getenv("ONLY_MESSAGE")
data = env2config()
else:
if isinstance(event, dict):
message = event.get("Message")
else:
message = None
with open(os.path.join(os.path.dirname(__file__), "config/config.json"), "r", encoding="utf-8") as f:
data = json.loads(f.read())
try:
motto = data.get("MOTTO")
notice_info = get_notice_info(data=data)
check_info = get_checkin_info(data=data)
except Exception as e:
raise e
content_list = [f"当前时间: {utc_time}"]
if message == "xmly":
if check_info.get("xmly_cookie_list"):
check_name, check_func = checkin_map.get("XMLY_COOKIE_LIST")
for check_item in check_info.get("xmly_cookie_list", []):
if "xxxxxx" not in str(check_item) and "多账号" not in str(check_item):
try:
msg = check_func(check_item).main()
content_list.append(f"【{check_name}】\n{msg}")
except Exception as e:
content_list.append(f"【{check_name}】\n{e}")
print(check_name, e)
else:
print(f"检测【{check_name}】脚本到配置文件包含模板配置,进行跳过")
else:
for one_check, check_tuple in checkin_map.items():
check_name, check_func = check_tuple
if one_check not in ["XMLY_COOKIE_LIST"]:
if check_info.get(one_check.lower()):
print(f"----------已检测到正确的配置,并开始执行 {one_check} 签到----------")
for check_item in check_info.get(one_check.lower(), []):
if "xxxxxx" not in str(check_item) and "多账号" not in str(check_item):
try:
msg = check_func(check_item).main()
content_list.append(f"【{check_name}】\n{msg}")
except Exception as e:
content_list.append(f"【{check_name}】\n{e}")
print(check_name, e)
else:
print(f"检测【{check_name}】脚本到配置文件包含模板配置,进行跳过")
else:
print(f"----------未检测到正确的配置,并跳过执行 {one_check} 签到----------")
if motto:
try:
msg_list = Motto().main()
except Exception as e:
print(e)
msg_list = []
content_list += msg_list
content_list.append(f"任务使用时间: {int(time.time() - start_time)} 秒")
if message == "xmly":
if utc_time.hour in [9, 18] and utc_time.minute == 0:
flag = True
else:
flag = False
else:
flag = True
if flag:
push_message(content_list=content_list, notice_info=notice_info)
return
if __name__ == "__main__":
args = sys.argv
if len(args) > 1:
event = {"Message": args[1]}
else:
event = None
main_handler(event=event, context=None)
|
__author__ = 'HaoBin'
class Logger(object):
def __init__(self, filename="output.txt"):
self.terminal = sys.stdout
filename = "permute_output_" + str(time.time()) + ".txt"
self.log = open(filename, "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
"""
Contains Canvas DAO implementations.
"""
from restclients_core.dao import DAO, LiveDAO
from commonconf import settings
from urllib3 import PoolManager
from urllib3.util.retry import Retry
from os.path import abspath, dirname
import os
import re
class Canvas_DAO(DAO):
def service_name(self):
return "canvas"
def service_mock_paths(self):
return [abspath(os.path.join(dirname(__file__), "resources"))]
def _custom_headers(self, method, url, headers, body):
bearer_key = self.get_service_setting("OAUTH_BEARER", "")
return {"Authorization": "Bearer {}".format(bearer_key)}
def _edit_mock_response(self, method, url, headers, body, response):
if "POST" == method or "PUT" == method:
if response.status != 400:
path = "{path}/resources/canvas/file{url}.{method}".format(
path=abspath(dirname(__file__)), url=url, method=method)
try:
handle = open(path)
response.data = handle.read()
response.status = 200
except IOError:
response.status = 404
class CanvasFileDownload_DAO(DAO):
def service_name(self):
return "canvas"
def _get_live_implementation(self):
return CanvasFileDownloadLiveDAO(self.service_name(), self)
class CanvasFileDownloadLiveDAO(LiveDAO):
def _fix_url_host(self, url):
# Ensure file url matches the hostname in settings,
# to avoid mixing Canvas prod/test/beta hosts
host = self.dao.get_service_setting("HOST")
url = re.sub(r'^https://[^/]+', host, url)
return url
def load(self, method, url, headers, body):
url = self._fix_url_host(url)
pool = self.get_pool()
return pool.urlopen(method, url, headers=headers)
def get_pool(self):
return self.create_pool()
def create_pool(self):
# Use a PoolManager to allow redirects to other hosts
return PoolManager(
cert_reqs="CERT_REQUIRED",
ca_certs=self.dao.get_setting("CA_BUNDLE",
"/etc/ssl/certs/ca-bundle.crt"),
timeout=self._get_timeout(),
maxsize=self._get_max_pool_size(),
block=True,
retries=Retry(total=1, connect=0, read=0, redirect=1))
|
# coding: utf-8
from findaconf import manager
manager.run()
|
from PiControl.models import Pin, Schedule
import rollbar
import sys
import RPi.GPIO as RPIO
class PinController(object):
def __init__(self):
RPIO.setmode(RPIO.BCM)
self.my_pins = None
self.set_all_pins()
def get_thermometers(self):
return self.my_pins.filter(is_thermometer=True)
def set_all_pins(self):
RPIO.cleanup()
self.my_pins = Pin.objects.all()
for pin in self.my_pins:
try:
RPIO.setup(pin.pin_number, pin.get_direction())
except:
rollbar.report_exc_info(sys.exc_info())
self.my_pins.exclude(id=pin.id)
def get_next_schedules(self, amount=3):
Schedule.objects.all()
return None
def get_dashboard_data(self):
return {
'pins': self.my_pins,
'thermometers': self.get_thermometers(),
'schedules': self.get_next_schedules()
}
def get_all_pins(self):
return {'pins': self.my_pins}
|
import json
def normalization(artwork):
if artwork:
artwork['artist'] = artwork['artist'].replace("Attributed to Goya (Francisco de Goya y Lucientes)", "Francisco Goya")
artwork['artist'] = artwork['artist'].replace("Goya (Francisco de Goya y Lucientes)", "Francisco Goya")
artwork['artist'] = artwork['artist'].replace("El Greco (Domenikos Theotokopoulos)", "El Greco")
artwork['artist'] = artwork['artist'].replace("El Greco (Domenikos Theotokopoulos) and Workshop", "El Greco")
artwork['artist'] = artwork['artist'].replace("Jacques Louis David", "Jacques-Louis David")
artwork['artist'] = artwork['artist'].replace("Vel\u00e1zquez (Diego Rodr\u00edguez de Silva y Vel\u00e1zquez)", "Diego Vel\u00e1zquez")
artwork['artist'] = artwork['artist'].replace("Raphael (Raffaello Sanzio or Santi)", "Raphael")
artwork['artist'] = artwork['artist'].replace("Titian (Tiziano Vecellio)", "Titian")
artwork['artist'] = artwork['artist'].replace('Caravaggio (Michelangelo Merisi)', 'Caravaggio')
artwork['artist'] = artwork['artist'].replace("Rembrandt (Rembrandt van Rijn)", "Rembrandt")
artwork['artist'] = artwork['artist'].replace('Fra Angelico (Guido di Pietro)', 'Fra Angelico')
artwork['artist'] = artwork['artist'].replace("Botticelli (Alessandro di Mariano Filipepi)", 'Botticelli')
artwork['artist'] = artwork['artist'].replace("Paolo Uccello (Paolo di Dono)", "Paolo Uccello")
artwork['artist'] = artwork['artist'].replace("Giotto di Bondone", "Giotto")
artwork['artist'] = artwork['artist'].replace("Duccio di Buoninsegna", "Duccio")
artwork['artist'] = artwork['artist'].replace("Pierre-Pierre-Pierre-Pierre-Auguste Renoir", "Pierre-Auguste Renoir")
artwork['artist'] = artwork['artist'].replace("Camille Corot", "Jean-Baptiste-Camille Corot")
artwork['artist'] = artwork['artist'].replace("Jacopo Tintoretto (Jacopo Robusti)", "Tintoretto")
return artwork
met = json.load(open("met_painting_dump.json"))
met = map(normalization, met)
json.dump(met, open("met_painting_dump.json", 'wb'))
|
class Solution:
def solve(self, s):
num = ''
ans = 0
import string
letters = string.ascii_letters
for i in s:
if i not in letters:
num += i
else:
print(num)
if num:
ans += int(num)
num = ''
if num:
ans += int(num)
return ans |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StorageCredentialArgs', 'StorageCredential']
@pulumi.input_type
class StorageCredentialArgs:
def __init__(__self__, *,
aws_iam_role: Optional[pulumi.Input['StorageCredentialAwsIamRoleArgs']] = None,
azure_service_principal: Optional[pulumi.Input['StorageCredentialAzureServicePrincipalArgs']] = None,
comment: Optional[pulumi.Input[str]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a StorageCredential resource.
:param pulumi.Input[str] name: Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource.
"""
if aws_iam_role is not None:
pulumi.set(__self__, "aws_iam_role", aws_iam_role)
if azure_service_principal is not None:
pulumi.set(__self__, "azure_service_principal", azure_service_principal)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if metastore_id is not None:
pulumi.set(__self__, "metastore_id", metastore_id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="awsIamRole")
def aws_iam_role(self) -> Optional[pulumi.Input['StorageCredentialAwsIamRoleArgs']]:
return pulumi.get(self, "aws_iam_role")
@aws_iam_role.setter
def aws_iam_role(self, value: Optional[pulumi.Input['StorageCredentialAwsIamRoleArgs']]):
pulumi.set(self, "aws_iam_role", value)
@property
@pulumi.getter(name="azureServicePrincipal")
def azure_service_principal(self) -> Optional[pulumi.Input['StorageCredentialAzureServicePrincipalArgs']]:
return pulumi.get(self, "azure_service_principal")
@azure_service_principal.setter
def azure_service_principal(self, value: Optional[pulumi.Input['StorageCredentialAzureServicePrincipalArgs']]):
pulumi.set(self, "azure_service_principal", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="metastoreId")
def metastore_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "metastore_id")
@metastore_id.setter
def metastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metastore_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _StorageCredentialState:
def __init__(__self__, *,
aws_iam_role: Optional[pulumi.Input['StorageCredentialAwsIamRoleArgs']] = None,
azure_service_principal: Optional[pulumi.Input['StorageCredentialAzureServicePrincipalArgs']] = None,
comment: Optional[pulumi.Input[str]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering StorageCredential resources.
:param pulumi.Input[str] name: Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource.
"""
if aws_iam_role is not None:
pulumi.set(__self__, "aws_iam_role", aws_iam_role)
if azure_service_principal is not None:
pulumi.set(__self__, "azure_service_principal", azure_service_principal)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if metastore_id is not None:
pulumi.set(__self__, "metastore_id", metastore_id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="awsIamRole")
def aws_iam_role(self) -> Optional[pulumi.Input['StorageCredentialAwsIamRoleArgs']]:
return pulumi.get(self, "aws_iam_role")
@aws_iam_role.setter
def aws_iam_role(self, value: Optional[pulumi.Input['StorageCredentialAwsIamRoleArgs']]):
pulumi.set(self, "aws_iam_role", value)
@property
@pulumi.getter(name="azureServicePrincipal")
def azure_service_principal(self) -> Optional[pulumi.Input['StorageCredentialAzureServicePrincipalArgs']]:
return pulumi.get(self, "azure_service_principal")
@azure_service_principal.setter
def azure_service_principal(self, value: Optional[pulumi.Input['StorageCredentialAzureServicePrincipalArgs']]):
pulumi.set(self, "azure_service_principal", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="metastoreId")
def metastore_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "metastore_id")
@metastore_id.setter
def metastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metastore_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class StorageCredential(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws_iam_role: Optional[pulumi.Input[pulumi.InputType['StorageCredentialAwsIamRoleArgs']]] = None,
azure_service_principal: Optional[pulumi.Input[pulumi.InputType['StorageCredentialAzureServicePrincipalArgs']]] = None,
comment: Optional[pulumi.Input[str]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
> **Private Preview** This feature is in [Private Preview](https://docs.databricks.com/release-notes/release-types.html). Contact your Databricks representative to request access.
To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage:
- `StorageCredential` represents authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential.
- ExternalLocation are objects that combine a cloud storage path with a Storage Credential that can be used to access the location.
## Example Usage
For AWS
```python
import pulumi
import pulumi_databricks as databricks
external = databricks.StorageCredential("external",
aws_iam_role=databricks.StorageCredentialAwsIamRoleArgs(
role_arn=aws_iam_role["external_data_access"]["arn"],
),
comment="Managed by TF")
external_creds = databricks.Grants("externalCreds",
storage_credential=external.id,
grants=[databricks.GrantsGrantArgs(
principal="Data Engineers",
privileges=["CREATE TABLE"],
)])
```
For Azure
```python
import pulumi
import pulumi_databricks as databricks
external = databricks.StorageCredential("external",
azure_service_principal=databricks.StorageCredentialAzureServicePrincipalArgs(
directory_id=var["tenant_id"],
application_id=azuread_application["ext_cred"]["application_id"],
client_secret=azuread_application_password["ext_cred"]["value"],
),
comment="Managed by TF")
external_creds = databricks.Grants("externalCreds",
storage_credential=external.id,
grants=[databricks.GrantsGrantArgs(
principal="Data Engineers",
privileges=["CREATE TABLE"],
)])
```
## Import
This resource can be imported by namebash
```sh
$ pulumi import databricks:index/storageCredential:StorageCredential this <name>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[StorageCredentialArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
> **Private Preview** This feature is in [Private Preview](https://docs.databricks.com/release-notes/release-types.html). Contact your Databricks representative to request access.
To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage:
- `StorageCredential` represents authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential.
- ExternalLocation are objects that combine a cloud storage path with a Storage Credential that can be used to access the location.
## Example Usage
For AWS
```python
import pulumi
import pulumi_databricks as databricks
external = databricks.StorageCredential("external",
aws_iam_role=databricks.StorageCredentialAwsIamRoleArgs(
role_arn=aws_iam_role["external_data_access"]["arn"],
),
comment="Managed by TF")
external_creds = databricks.Grants("externalCreds",
storage_credential=external.id,
grants=[databricks.GrantsGrantArgs(
principal="Data Engineers",
privileges=["CREATE TABLE"],
)])
```
For Azure
```python
import pulumi
import pulumi_databricks as databricks
external = databricks.StorageCredential("external",
azure_service_principal=databricks.StorageCredentialAzureServicePrincipalArgs(
directory_id=var["tenant_id"],
application_id=azuread_application["ext_cred"]["application_id"],
client_secret=azuread_application_password["ext_cred"]["value"],
),
comment="Managed by TF")
external_creds = databricks.Grants("externalCreds",
storage_credential=external.id,
grants=[databricks.GrantsGrantArgs(
principal="Data Engineers",
privileges=["CREATE TABLE"],
)])
```
## Import
This resource can be imported by namebash
```sh
$ pulumi import databricks:index/storageCredential:StorageCredential this <name>
```
:param str resource_name: The name of the resource.
:param StorageCredentialArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StorageCredentialArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws_iam_role: Optional[pulumi.Input[pulumi.InputType['StorageCredentialAwsIamRoleArgs']]] = None,
azure_service_principal: Optional[pulumi.Input[pulumi.InputType['StorageCredentialAzureServicePrincipalArgs']]] = None,
comment: Optional[pulumi.Input[str]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StorageCredentialArgs.__new__(StorageCredentialArgs)
__props__.__dict__["aws_iam_role"] = aws_iam_role
__props__.__dict__["azure_service_principal"] = azure_service_principal
__props__.__dict__["comment"] = comment
__props__.__dict__["metastore_id"] = metastore_id
__props__.__dict__["name"] = name
super(StorageCredential, __self__).__init__(
'databricks:index/storageCredential:StorageCredential',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
aws_iam_role: Optional[pulumi.Input[pulumi.InputType['StorageCredentialAwsIamRoleArgs']]] = None,
azure_service_principal: Optional[pulumi.Input[pulumi.InputType['StorageCredentialAzureServicePrincipalArgs']]] = None,
comment: Optional[pulumi.Input[str]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'StorageCredential':
"""
Get an existing StorageCredential resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _StorageCredentialState.__new__(_StorageCredentialState)
__props__.__dict__["aws_iam_role"] = aws_iam_role
__props__.__dict__["azure_service_principal"] = azure_service_principal
__props__.__dict__["comment"] = comment
__props__.__dict__["metastore_id"] = metastore_id
__props__.__dict__["name"] = name
return StorageCredential(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="awsIamRole")
def aws_iam_role(self) -> pulumi.Output[Optional['outputs.StorageCredentialAwsIamRole']]:
return pulumi.get(self, "aws_iam_role")
@property
@pulumi.getter(name="azureServicePrincipal")
def azure_service_principal(self) -> pulumi.Output[Optional['outputs.StorageCredentialAzureServicePrincipal']]:
return pulumi.get(self, "azure_service_principal")
@property
@pulumi.getter
def comment(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "comment")
@property
@pulumi.getter(name="metastoreId")
def metastore_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "metastore_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource.
"""
return pulumi.get(self, "name")
|
def min2Broja(a, b):
if a < b:
return a
else:
return b
def min3Broja(a, b, c):
return min2Broja(min2Broja(a, b), c)
prviBroj = float(input("Unesite 1. broj: "))
drugiBroj = float(input("Unesite 2. broj: "))
treciBroj = float(input("Unesite 3. broj: "))
# manjiOdPrvaDva = min2Broja(prviBroj, drugiBroj)
# manjiOdPrvaDvaITreceg = min2Broja(manjiOdPrvaDva, treciBroj)
manjiOdPrvaDvaITreceg = min3Broja(prviBroj, drugiBroj, treciBroj)
print("Manji od ta 3 je: " + str(manjiOdPrvaDvaITreceg))
# 1. NE uzima argumente i NE vraca vrijednost
# 2. NE uzima argumente i VRACA vrijednsot
# 3. UZIMA argumente i VRACA vrijednost
# 4. UZIMA argumente i NE VRACA vrijednosti
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Autocommit pg database wrapper
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# Third-party modules
import psycopg2
from django.db.backends.postgresql_psycopg2.base import DatabaseWrapper as PGDatabaseWrapper
# NOC modules
from .monitor import SpanCursor
class DatabaseWrapper(PGDatabaseWrapper):
def _savepoint_allowed(self):
return False
def get_new_connection(self, conn_params):
"""
Return raw psycopg connection. Do not mess with django setup phase
:param conn_params:
:return:
"""
return psycopg2.connect(cursor_factory=SpanCursor, **conn_params)
def init_connection_state(self):
"""
:return:
"""
self.connection.autocommit = True
self.connection.set_client_encoding("UTF8")
def _set_isolation_level(self, level):
pass
def _set_autocommit(self, state):
pass
|
# Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from baskerville.features.updateable_features import UpdaterRate
from pyspark.sql import functions as F
from baskerville.features.base_feature import TimeBasedFeature
from baskerville.features.feature_minutes_total import FeatureMinutesTotal
from baskerville.features.feature_unique_query_total import \
FeatureUniqueQueryTotal
from baskerville.features.helpers import update_rate
class FeatureUniqueQueryRate(TimeBasedFeature, UpdaterRate):
"""
For each IP compute the total number of unique queries per minute.
"""
DEFAULT_VALUE = 1.
COLUMNS = ['querystring', '@timestamp']
DEPENDENCIES = [FeatureMinutesTotal, FeatureUniqueQueryTotal]
def __init__(self):
super(FeatureUniqueQueryRate, self).__init__()
self.group_by_aggs.update({
'num_unique_queries': (F.countDistinct(F.col('querystring'))),
})
def compute(self, df):
df = df.withColumn(
self.feature_name,
F.when(F.col('dt') != 0.,
(F.col('num_unique_queries').cast('float') /
F.col('dt').cast('float')).cast('float')
).otherwise(F.lit(self.feature_default).cast('float'))
).fillna({self.feature_name: self.feature_default})
return df
@classmethod
def update_row(cls, current, past, *args, **kwargs):
return update_rate(
past.get(FeatureUniqueQueryTotal.feature_name_from_class()),
current[FeatureUniqueQueryTotal.feature_name_from_class()],
current[FeatureMinutesTotal.feature_name_from_class()]
)
def update(self, df):
return super().update(
df,
FeatureMinutesTotal.feature_name_from_class(),
numerator=FeatureUniqueQueryTotal.feature_name_from_class()
)
|
import data
import matplotlib.pyplot as plt
import numpy as np
import datetime
def time_graph():
df, emails, timestamp = data.get_data()
df = timestamp.to_numpy()
s = np.arange(0, len(df))
df = np.column_stack((df, s))
for i in range(378):
temp = df[i, 0]
temp1, temp2 = temp.split()
month, day, year = map(int, temp1.split("/"))
hour, minute, second = map(int, temp2.split(":"))
df[i, 0] = datetime.datetime(
year=year, month=month, day=day, hour=hour, minute=minute, second=second
)
fig, ax = plt.subplots()
ax.plot(df[:, 0], df[:, 1])
ax.set_title("Number of Responses")
fig.savefig("Response_times/graph.jpg")
plt.close()
|
from orphics.theory.cosmology import Cosmology
import orphics.tools.stats as stats
import numpy as np
import orphics.tools.io as io
from enlib import enmap
# let's define the bin edges for this test
ellmin = 2
ellmax = 4000
bin_width = 200
bin_edges = np.arange(ellmin,ellmax,bin_width)
# a typical map might be 400sq.deg. with 0.5 arcmin pixels
shape, wcs = enmap.get_enmap_patch(width_arcmin=20*60.,px_res_arcmin=0.5)
# let's get the "mod ell" or |ell| map, which tells us the magnitude of
# the angular wavenumbers in each fourier pixel
modlmap = enmap.modlmap(shape,wcs)
# this let's us create a 2D fourier space binner
binner2d = stats.bin2D(modlmap,bin_edges)
# the 1d binner just needs to know about the bin edges
binner1d = stats.bin1D(bin_edges)
# initialize a cosmology; make sure you have an "output" directory
# for pickling to work
cc = Cosmology(lmax=6000,pickling=True)
theory = cc.theory
# the fine ells we will use
fine1d_ells = np.arange(ellmin,ellmax,1)
# let's test on TT and kk, lCl for TT and gCl for kk
for spec in ['TT','kk']:
try:
cl1d = theory.lCl(spec,fine1d_ells) # 1d power spectrum, as obtained from CAMB
cl2d = theory.lCl(spec,modlmap) # power spectrum on 2d as done with data
except:
cl1d = theory.gCl(spec,fine1d_ells)
cl2d = theory.gCl(spec,modlmap)
# the mean binning method
cents1d, bin1d = binner1d.binned(fine1d_ells,cl1d)
# isotropic mean intended to mimic isotropic 2d spectra
# this is the most correct way to do it if you don't want to
# deal with 2d matrices
cents1d, bin1d_iso = binner1d.binned(fine1d_ells,cl1d*fine1d_ells)
cents1d, bin1d_iso_norm = binner1d.binned(fine1d_ells,fine1d_ells)
bin1d_iso /= bin1d_iso_norm
# the most correctest way to do it if you can work with 2d matrices
cents2d, bin2d = binner2d.bin(cl2d)
assert np.all(np.isclose(cents1d,cents2d))
# the not so great way to do it, especially if your spectrum is very
# red like with TT, or has peaks and troughs like with TT
try:
interped1d = theory.lCl(spec,cents1d)
except:
interped1d = theory.gCl(spec,cents1d)
# a percentage difference function
pdiff = lambda x,y: (x-y)*100./y
# define 2d binning as the truth
truth = bin2d
cents = cents2d
pl = io.Plotter(labelX="$\ell$",labelY="% diff")
pl.add(cents,pdiff(bin1d,truth),label="1d mean")
pl.add(cents,pdiff(bin1d_iso,truth),label="1d iso mean")
pl.add(cents,pdiff(interped1d,truth),label="interpolated")
pl.legendOn(loc="upper right")
pl._ax.set_ylim(-5,30)
pl.done(spec+"_bin.png")
|
from setuptools import setup
setup(name='worin',
version='0.1.0',
description='A Korean POS Tagger using Neural Network',
author='YU Jaemyoung',
author_email='yu@mindscale.kr',
url='https://github.com/mindscale/worin',
packages=['worin'],
package_data={
'': ['*.txt', '*.md'],
'worin': ['*.hdf5'],
},
install_requires=[
'numpy',
'tensorflow',
],
entry_points={
},
)
|
#!/usr/bin/env python
# Copyright 2017 Informatics Matters Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import collections
import uuid
from rdkit import rdBase
from . import cluster_butina
from pipelines_utils import parameter_utils, utils
from pipelines_utils_rdkit import rdkit_utils
### start field name defintions #########################################
field_Similarity = "Similarity"
### functions #########################################
def MapClusterToMols(clusters, mols):
i = 0
for cluster in clusters:
print("Cluster:", i, cluster)
for c in cluster:
# print("Assigning mol",c,"to cluster",i)
mols[c].SetIntProp("Cluster", i)
i += 1
j = 0
for mol in mols:
mol.SetIntProp("MolNum", j)
j += 1
# print(mol.GetPropsAsDict())
def MapClusterToMols(clusters, mols):
i = 0
for cluster in clusters:
print("Cluster:", i, cluster)
for c in cluster:
# print("Assigning mol",c,"to cluster",i)
mols[c].SetIntProp("Cluster", i)
i += 1
j = 0
for mol in mols:
mol.SetIntProp("MolNum", j)
j += 1
# print(mol.GetPropsAsDict())
def GetDistance(x, y, matrix):
if x == y:
return 1.0
if x > y:
x2 = y
y2 = x
else:
x2 = x
y2 = y
# print("row",",".join(["%.2f" % x for x in matrix[y2-1]]))
return matrix[y2 - 1][x2]
def GenerateId(cluster, structure):
row = "%03i" % cluster
row += "."
row += "%04i" % structure
return row
### start main execution #########################################
def main():
### command line args defintions #########################################
parser = argparse.ArgumentParser(description='RDKit Butina Cluster Matrix')
parameter_utils.add_default_input_args(parser)
parser.add_argument('-o', '--output', help="Base name for output file (no extension). If not defined then SDTOUT is used for the structures and output is used as base name of the other files.")
parser.add_argument('-of', '--outformat', choices=['tsv', 'json'], default='tsv', help="Output format. Defaults to 'tsv'.")
parser.add_argument('--meta', action='store_true', help='Write metadata and metrics files')
parser.add_argument('-t', '--threshold', type=float, default=0.7, help='Similarity clustering threshold (1.0 means identical)')
parser.add_argument('-mt', '--matrixThreshold', type=float, default=0.5, help='Threshold for outputting values (1.0 means identical)')
parser.add_argument('-d', '--descriptor', type=str.lower, choices=list(cluster_butina.descriptors.keys()), default='rdkit', help='descriptor or fingerprint type (default rdkit)')
parser.add_argument('-m', '--metric', type=str.lower, choices=list(cluster_butina.metrics.keys()), default='tanimoto', help='similarity metric (default tanimoto)')
parser.add_argument('-q', '--quiet', action='store_true', help='Quiet mode')
args = parser.parse_args()
utils.log("Cluster Matrix Args: ", args)
descriptor = cluster_butina.descriptors[args.descriptor]
if descriptor is None:
raise ValueError('Invalid descriptor name ' + args.descriptor)
input,suppl = rdkit_utils.default_open_input(args.input, args.informat)
# handle metadata
source = "cluster_butina_matrix.py"
datasetMetaProps = {"source":source, "description": "Butina clustering using RDKit " + rdBase.rdkitVersion}
clsMappings = {
"Cluster1": "java.lang.Integer",
"Cluster2": "java.lang.Integer",
"ID1": "java.lang.String",
"ID2": "java.lang.String",
"M1": "java.lang.String",
"M2": "java.lang.String",
"Similarity": "java.lang.Float"
}
fieldMetaProps = [{"fieldName":"Cluster", "values": {"source":source, "description":"Cluster number"}}]
fieldNames = collections.OrderedDict()
fieldNames['ID1'] = 'ID1'
fieldNames['ID2'] ='ID2'
fieldNames['Cluster1'] = 'Cluster1'
fieldNames['Cluster2'] = 'Cluster2'
fieldNames['Similarity'] = 'Similarity'
fieldNames['M1'] = 'M1'
fieldNames['M2'] = 'M2'
writer,output_base = utils.\
create_simple_writer(args.output, 'cluster_butina_matrix',
args.outformat, fieldNames,
valueClassMappings=clsMappings,
datasetMetaProps=datasetMetaProps,
fieldMetaProps=fieldMetaProps)
### generate fingerprints
mols = [x for x in suppl if x is not None]
fps = [descriptor(x) for x in mols]
input.close()
### do clustering
utils.log("Clustering with descriptor", args.descriptor, "metric", args.metric, "and threshold", args.threshold)
clusters, dists, matrix, = cluster_butina.ClusterFps(fps, args.metric, 1.0 - args.threshold)
utils.log("Found", len(clusters), "clusters")
MapClusterToMols(clusters, mols)
if not args.quiet:
utils.log("Clusters:", clusters)
writer.writeHeader()
size = len(matrix)
#utils.log("len(matrix):", size)
count = 0
for i in range(size ):
#utils.log("element",i, "has length", len(matrix[i]))
writer.write(create_values(mols, i, i, 1.0))
count += 1
for j in range(len(matrix[i])):
#utils.log("writing",i,j)
dist = matrix[i][j]
if dist > args.matrixThreshold:
# the matrix is the lower left segment without the diagonal
x = j
y = i + 1
writer.write(create_values(mols, x, y, dist))
writer.write(create_values(mols, y, x, dist))
count += 2
writer.write(create_values(mols, size, size, 1.0))
writer.writeFooter()
writer.close()
if args.meta:
utils.write_metrics(output_base, {'__InputCount__':i, '__OutputCount__':count, 'RDKitCluster':i})
def create_values(mols, x, y, dist):
c1 = mols[x].GetIntProp("Cluster")
c2 = mols[y].GetIntProp("Cluster")
bo = collections.OrderedDict()
bo["uuid"] = str(uuid.uuid4())
props = {}
props["Cluster1"] = c1 + 1
props["Cluster2"] = c2 + 1
props["ID1"] = GenerateId(c1 + 1, x + 1)
props["ID2"] = GenerateId(c2 + 1, y + 1)
props[field_Similarity] = dist
if mols[x].HasProp("uuid"):
props["M1"] = mols[x].GetProp("uuid")
if mols[y].HasProp("uuid"):
props["M2"] = mols[y].GetProp("uuid")
return props
if __name__ == "__main__":
main()
|
import pytest
from api_test_utils.api_test_session_config import APITestSessionConfig
from api_test_utils import poll_until, PollTimeoutError
from api_test_utils.api_session_client import APISessionClient
@pytest.fixture
def api_test_config() -> APITestSessionConfig:
yield APITestSessionConfig(base_uri="https://httpbin.org")
@pytest.mark.asyncio
async def test_wait_for_poll_does_timeout(api_client: APISessionClient):
with pytest.raises(PollTimeoutError) as exec_info:
await poll_until(lambda: api_client.get('status/404'), timeout=1, sleep_for=0.3)
error = exec_info.value # type: PollTimeoutError
assert len(error.responses) > 0
assert error.responses[0][0] == 404
@pytest.mark.asyncio
async def test_wait_for_200_bytes(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('bytes/100'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'application/octet-stream'
assert isinstance(body, bytes)
@pytest.mark.asyncio
async def test_wait_for_200_json(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('json'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'application/json'
assert body['slideshow']['title'] == 'Sample Slide Show'
@pytest.mark.asyncio
async def test_wait_for_200_html(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('html'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'text/html'
assert isinstance(body, str)
assert body.startswith('<!DOCTYPE html>')
@pytest.mark.asyncio
async def test_wait_for_200_json_gzip(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('gzip'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'application/json'
assert body['gzipped'] is True
@pytest.mark.asyncio
async def test_wait_for_200_json_deflate(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('deflate'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'application/json'
assert body['deflated'] is True
@pytest.mark.skip('we probably do not need brotli support just yet, but if we do .. add brotlipy')
@pytest.mark.asyncio
async def test_wait_for_200_json_brotli(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('brotli'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'application/json'
assert body['brotli'] is True
|
from model import Test
from repo import TestSetsRepo
|
from server.app.tests import registration, login, shared, combined_utils, user_requ
from graphene_django.utils.testing import GraphQLTestCase
from graphql_jwt.testcases import JSONWebTokenTestCase
from django.contrib.auth import get_user_model
from copy import deepcopy
class UserRegistrationTest(GraphQLTestCase, registration.RegistrationTestMixin,
shared.UsersMixin, shared.VerificationTestMixin):
def test_single_user(self):
self.register_and_verify_user(self, self.user_info)
@shared.ExceptionsTestUtils.true_if_exception_cls
def test_same_user_caught(self):
self.register_and_verify_user(self, self.user_info)
self.register_and_verify_user(self, self. user_info)
def test_single_user_no_email(self):
self.generic_user_no_attr(self.user_info, 'email')
@shared.ExceptionsTestUtils.true_if_exception_cls
def test_single_user_no_username(self):
self.generic_user_no_attr(self.user_info, 'username')
@shared.ExceptionsTestUtils.true_if_exception_cls
def test_single_user_no_password(self):
self.generic_user_no_attr(self.user_info, 'password')
def generic_user_no_attr(self, user_info, attr):
user_info_copy = deepcopy(user_info)
del user_info_copy[attr]
self.register_and_verify_user(self, user_info_copy)
class UserLoginTest(GraphQLTestCase, registration.RegistrationTestMixin,
login.LoginTestMixin, shared.UsersMixin,
shared.VerificationTestMixin):
def test_single_user(self):
registered_info = self.register_and_verify_user(self, self.user_info)
login_info = self.login_verify_and_get_info(self, self.user_info)
@shared.ExceptionsTestUtils.true_if_exception_cls
def test_single_nonexistent_user(self):
self.login_verify_and_get_info(self, self.user_info)
@shared.ExceptionsTestUtils.true_if_exception_cls
def test_single_user_no_username(self):
self.generic_user_no_attr(self.user_info, 'username')
@shared.ExceptionsTestUtils.true_if_exception_cls
def test_single_user_no_password(self):
self.generic_user_no_attr(self.user_info, 'password')
def generic_user_no_attr(self, user_info, attr):
user_info_copy = deepcopy(user_info)
del user_info_copy[attr]
self.login_verify_and_get_info(self, user_info_copy)
class UserInfoTest(shared.JWTAuthAndUsersMixin, user_requ.UserRequTestMixin,
shared.VerificationTestMixin):
def test_query_single_user_with_perm(self):
self.authenticate()
resp = self.get_user_info(self.client, self.user_info)
userInfoObj = resp['userInfo']
profileObj = userInfoObj['profile']
userObj = profileObj['owner']
res_username, res_email = userObj['username'], userObj['email']
self.assertEqual([self.user_info['username'], self.user_info['email']], [res_username, res_email])
@shared.ExceptionsTestUtils.true_if_exception_cls
def test_query_single_user_no_perm(self):
resp = self.get_user_info(self.client, self.user_info)
def test_query_all_users_with_perm(self):
self.authenticate()
test_users = self.create_test_users()
resp = self.get_users_info(self.client)
usersInfo = resp['usersInfo']
success = self.validate_users_info(self.user, test_users, usersInfo)
self.assertTrue(success)
def test_query_all_users_no_perm(self):
test_users = self.create_test_users()
resp = self.get_users_info(self.client)
def validate_users_info(self, owner, created_users, received_users):
received_usernames = [received_user['username'] for received_user in received_users]
received_emails = [received_user['email'] for received_user in received_users]
owner_username = owner.username
owner_email = owner.email
for created_user in created_users:
c_username = created_user.username
c_email = created_user.email
not_owner = c_username != owner_username or c_email != owner_email
not_received = not (c_username in received_usernames and c_email in received_emails)
if not_owner and not_received:
return False
return True |
from django.db.models import Count, Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404, redirect
from.models import Post
from marketing.models import Signup
from.forms import CommentForm
def search(request):
queryset = Post.objects.all()
query = request.GET.get('q')
if query:
queryset = queryset.filter(
Q(title__icontains=query)|
Q(overview__icontains=query)
).distinct()
context = {
'queryset': queryset
}
return render(request, 'search_results.html', context)
def get_category_count():
queryset = Post.objects.values('categories__title').annotate(Count('categories__title'))
return queryset
def index(request):
featured = Post.objects.filter(featured=True)
latest = Post.objects.order_by('-timestamp')[0:3]
if request.method == "POST":
email = request.POST["email"]
new_signup = Signup()
new_signup.email = email
new_signup.save()
context = {
'object_list': featured,
'latest': latest
}
return render(request, 'index.html', context)
def blog(request):
category_count = get_category_count()
most_recent = Post.objects.order_by('-timestamp')[:3]
post_list = Post.objects.all()
paginator = Paginator(post_list, 4)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
paginated_queryset = paginator.page(page)
except PageNotAnInteger:
paginated_queryset = paginator.page(1)
except EmptyPage:
paginated_queryset = paginator.page(paginator.num_pages)
context = {
'queryset': paginated_queryset,
'most_recent': most_recent,
'page_request_var': page_request_var,
'category_count': category_count
}
return render(request, 'blog.html', context)
def post(request, id):
category_count = get_category_count()
most_recent = Post.objects.order_by('-timestamp')[:3]
post = get_object_or_404(Post, id=id)
post.view_count = post.view_count + 1
post.save()
form = CommentForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
form.instance.post = post
form.save()
return redirect(f"/post/{post.id}/")
context = {
'form': form,
'post': post,
'most_recent': most_recent,
'category_count': category_count
}
return render(request, 'post.html', context)
|
import os
import re
from setuptools import setup
HERE = os.path.dirname(__file__)
with open(os.path.join(HERE, "src", "JupyterLibrary", "_version.py")) as fp:
version = re.findall(r"""__version__ = "([^"]+)""", fp.read())[0]
setup(version=version)
|
"""Get the code for the Travis Badge."""
from write_me.list_files import get_yml_files
from write_me.project_data import get_project_url
YAMALS = get_yml_files()
BADGE = "[](https://travis-ci.org/{0}/{1})"
COVERALLS = "[](https://coveralls.io/github/{0}/{1})"
def get_travis_file():
"""Return 'travis.yml' file if its in the list of yml files."""
for yamal in YAMALS:
if 'travis.yml' in yamal:
return yamal
def get_travis_badge():
"""Get app name from development.ini."""
travis_file = get_travis_file()
coveralls = False
if travis_file:
with open(travis_file, 'r') as tf:
for line in tf:
if "- coveralls" in line:
coveralls = True
project_info = get_project_url()
user = project_info['project_user']
name = project_info['project_name']
badge = BADGE.format(user, name)
if coveralls:
badge_with_coverall = COVERALLS.format(user, name)
badge = badge + " " + badge_with_coverall
return badge
return
if __name__ == '__main__': # pragma no cover
res = get_travis_badge()
print(res)
|
from bs4 import BeautifulSoup
import datetime
import pytz
import re
from utils import unprocessed_archive_urls, process_crawled_archive_response_chunk
import logging
PUBLISHER = "Bild"
@unprocessed_archive_urls(PUBLISHER)
def archive_urls():
date_start = datetime.datetime(2006, 1, 5)
date_end = datetime.datetime(2020, 12, 28)
for day in range((date_end - date_start).days + 1):
date = date_start + datetime.timedelta(days=day)
yield f"https://www.bild.de/archive/{date.year}/{date.month}/{date.day}/index.html"
@process_crawled_archive_response_chunk(PUBLISHER, write_to_db=False)
def scrape_articles(resp):
published_date = tuple(map(int, [re.search(r"archive/(.+?)/", resp.url).group(1),
re.search(r"archive/[0-9]{4}/(.+?)/", resp.url).group(1),
re.search(r"archive/[0-9]{4}/[0-9]{1,2}/(.+?)/", resp.url).group(1)]))
soup = BeautifulSoup(resp.text, "lxml")
try:
for el in soup.select("article .txt > h2.crossheading")[0].next_siblings:
try:
url = "https://www.bild.de" + el.a.attrs.get("href")
title = el.a.string
published_str = el.contents[0]
published_hour = int(re.search(r"^(.+?):", published_str).group(1))
published_minute = int(re.search(r"[0-9][0-9]:(.+?) ", published_str).group(1))
published_datetime = datetime.datetime(*published_date, published_hour, published_minute)
tz = pytz.timezone("Europe/Berlin")
published_datetime = tz.localize(published_datetime).astimezone(pytz.utc)
yield url, PUBLISHER, title, published_datetime, None, None
except:
pass
except:
logging.exception(f"Failed to parse archive page: {resp.url}")
|
import torch
class ResidualBlock(torch.nn.Module):
"""
A single Residual Block
"""
def __init__(self, num_filts):
"""
Parameters
----------
num_filts : int
number of input and output channels/filters
"""
super().__init__()
self.block = torch.nn.Sequential(
torch.nn.Conv2d(num_filts, num_filts, 3, 1, 1),
torch.nn.BatchNorm2d(num_filts),
torch.nn.ReLU(inplace=True),
torch.nn.Conv2d(num_filts, num_filts, 3, 1, 1),
torch.nn.BatchNorm2d(num_filts),
)
def forward(self, x):
"""
Computes the residual output for a single batch
Parameters
----------
x : :class:`torch.Tensor`
the image batch
Returns
-------
:class:`torch.Tensor`
the image batch
"""
return x + self.block(x)
class Generator(torch.nn.Module):
"""
The generator topology
"""
def __init__(self, latent_dim, num_channels, img_size, n_residual_blocks,
num_filts=64):
"""
Parameters
----------
latent_dim : int
size of the latent dimension
num_channels : int
number of image channels to generate
img_size : int
number of pixels per side of the image
n_residual_blocks : int
number of residual blocks inside the generator
num_filts : int
number of filters inside each of the blocks
"""
super().__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.fc = torch.nn.Linear(latent_dim, num_channels * img_size ** 2)
self.l1 = torch.nn.Sequential(
torch.nn.Conv2d(num_channels * 2, 64, 3, 1, 1),
torch.nn.ReLU(inplace=True))
resblocks = []
for _ in range(n_residual_blocks):
resblocks.append(ResidualBlock(num_filts))
self.resblocks = torch.nn.Sequential(*resblocks)
self.l2 = torch.nn.Sequential(
torch.nn.Conv2d(64, num_channels, 3, 1, 1), torch.nn.Tanh())
def forward(self, img, z):
"""
Feeds a set of batches through the network
Parameters
----------
img : :class:`torch.Tensor`
the image tensor
z : :class:`torch.Tensor`
the noise tensor
Returns
-------
:class:`torch.Tensor`
batch of generated images
"""
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
out = self.l1(gen_input)
out = self.resblocks(out)
img_ = self.l2(out)
return img_
class Discriminator(torch.nn.Module):
"""
A discriminator network
"""
def __init__(self, num_channels):
"""
Parameters
----------
num_channels : int
number of channels per input image
"""
super().__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [torch.nn.Conv2d(in_features, out_features, 3, stride=2,
padding=1),
torch.nn.LeakyReLU(0.2, inplace=True)]
if normalization:
layers.append(torch.nn.InstanceNorm2d(out_features))
return layers
self.model = torch.nn.Sequential(
*block(num_channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
torch.nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
"""
Feeds a single batch through the network
Parameters
----------
img : :class:`torch.Tensor`
a single image batch
Returns
-------
:class:`torch.Tensor`
the resulting validity batch
"""
validity = self.model(img)
return validity
class Classifier(torch.nn.Module):
"""
Classifier Network
"""
def __init__(self, num_channels, img_size, n_classes):
"""
Parameters
----------
num_channels : int
number of image channels
img_size : int
number of pixels per side
n_classes : int
number of classes
"""
super().__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [torch.nn.Conv2d(in_features, out_features, 3,
stride=2, padding=1),
torch.nn.LeakyReLU(0.2, inplace=True)]
if normalization:
layers.append(torch.nn.InstanceNorm2d(out_features))
return layers
self.model = torch.nn.Sequential(
*block(num_channels, 64, normalization=False), *block(64, 128),
*block(128, 256), *block(256, 512)
)
# downsampled size
dsize = self.model(torch.rand(1, num_channels, img_size, img_size)
).size(2)
self.output_layer = torch.nn.Sequential(
torch.nn.Linear(512 * dsize ** 2, n_classes),
torch.nn.Softmax())
def forward(self, img):
"""
Feeds a single batch through the network
Parameters
----------
img : :class:`torch.Tensor`
the image batch
Returns
-------
:class:`torch.Tensor`
the resulting label batch
"""
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# Create Class to create data and create batches
class TimeSeriesData():
def __init__(self, num_points, xmin, xmax): # Creating data, a sinus function
self.xmin = xmin
self.xmax = xmax
self.num_points = num_points
self.resolution = (xmax-xmin)/num_points
self.x_data = np.linspace(xmin, xmax, num_points)
self.y_true = np.sin(self.x_data)
def ret_true(self, x_series): # Convinience Function
return np.sin(x_series)
def next_batch(self, batch_size, steps, return_batch_ts=False): # Generating batches from this data
# Grab random starting point for each batch
rand_start = np.random.rand(batch_size, 1)
# Convert to be on time series
ts_start = rand_start * (self.xmax - self.xmin - (steps*self.resolution))
# Create batch time series on the x-axis
batch_ts = ts_start + np.arange(0.0, steps+1) * self.resolution
# Create the Y data for the time series x-axis from prev step
y_batch = np.sin(batch_ts)
# Formatting for RNN
if return_batch_ts:
return y_batch[:, :-1].reshape(-1, steps, 1) , y_batch[:, 1:].reshape(-1, steps, 1), batch_ts
else:
return y_batch[:, :-1].reshape(-1, steps, 1) , y_batch[:, 1:].reshape(-1, steps, 1) # Everything along the rows and everything along the column -1
# Let's create some data
ts_data = TimeSeriesData(250, 0, 10) #250 points between 0 and 10
plt.plot(ts_data.x_data, ts_data.y_true)
# Creating random batches
num_time_steps = 30
y1, y2, ts = ts_data.next_batch(1, num_time_steps, True) # 1 Batch, 30 steps
plt.plot(ts.flatten()[1:], y2.flatten(), '*')
plt.plot(ts_data.x_data, ts_data.y_true, label='Sin(t)')
plt.plot(ts.flatten()[1:], y2.flatten(), '*', label='Single Training Instance')
plt.legend()
plt.tight_layout()
plt.show()
# Training data
# Training instance
train_inst = np.linspace(5, 5 + ts_data.resolution * (num_time_steps+1), num_time_steps+1 )
plt.title('A training instance')
plt.plot(train_inst[:-1], ts_data.ret_true(train_inst[:-1]), 'bo', markersize=15, alpha=0.5, label='Instance')
plt.plot(train_inst[1:], ts_data.ret_true(train_inst[1:]), 'ko', markersize=7, label='Target')
plt.show()
tf.reset_default_graph()
# Constants
# Just one feature, the time series
num_inputs = 1
# 100 neuron layer, play with this
num_neurons = 100
# Just one output, predicted time series
num_outputs = 1
# learning rate, 0.0001 default, but you can play with this
learning_rate = 0.0001
# how many iterations to go through (training steps), you can play with this
num_train_iterations = 2000
# Size of the batch of data
batch_size = 1
# Placeholders
X = tf.placeholder(tf.float32, [None, num_time_steps, num_inputs])
y = tf.placeholder(tf.float32, [None, num_time_steps, num_inputs])
# RNN Cell Layer
cell = tf.contrib.rnn.OutputProjectionWrapper(
tf.contrib.rnn.BasicRNNCell(
num_units=num_neurons,
activation=tf.nn.relu),
output_size=num_outputs
)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
# Loss Function
## MSE
loss = tf.reduce_mean(tf.square(outputs - y))
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Train
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
# Session
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
for iteration in range(num_train_iterations):
X_batch, y_batch = ts_data.next_batch(batch_size, num_time_steps)
sess.run(train, feed_dict={X:X_batch, y:y_batch})
if iteration % 100 == 0:
mse = loss.eval(feed_dict={X:X_batch, y:y_batch})
print(iteration, ' \tMSE', mse)
saver.save(sess, './rnn_time_series_model')
with tf.Session() as sess:
saver.restore(sess, './rnn_time_series_model')
X_new = np.sin(np.array(train_inst[:-1].reshape(-1, num_time_steps, num_inputs)))
y_pred = sess.run(outputs, feed_dict={X:X_new})
plt.title('Testing the model')
## Training instance
plt.plot(
train_inst[:-1],
np.sin(train_inst[:-1]),
'bo',
markersize=15,
alpha=0.5,
label='Training Instance'
)
## Target to predict (correct test values np.sin(train))
plt.plot(
train_inst[1:],
np.sin(train_inst[1:]),
'ko',
markersize=10,
label='Target'
)
## Models prediction
plt.plot(
train_inst[1:],
y_pred[0, :, 0],
'r.',
markersize=10,
label='Predictions'
)
plt.xlabel('Time')
plt.legend()
plt.tight_layout()
plt.show()
|
"""
Google, medium
Given k sorted singly linked lists, write a function to merge all the lists
into one sorted singly linked list.
"""
import heapq
from typing import List, Tuple
class Node:
def __init__(self, value=None, next=None):
self.value = value
self.next = next
def merge_k_lists(lists: List[Node]) -> Node:
"""Merge k sorted lists by initializing a min-heap to the first element in
each list. We put the first k elements into a min heap, using the element's
value as the key and the index of the list as the value. Every time we
extract the min value, we increment the pointer for the linked list it
corresponds to, and add the next value to the cheap. Continue until the heap
is empty.
This method is O(nk) time and O(nlogk) space
"""
h: List[Tuple[int, int]] = []
head = Node()
it = head
for i, l in enumerate(lists):
heapq.heappush(h, (l.value, i))
while h:
(min_val, idx) = heapq.heappop(h)
next_node = Node(min_val)
it.next = next_node
it = it.next
list_node = lists[idx]
list_node = list_node.next
if list_node:
heapq.heappush(h, (list_node.value, idx))
return head.next
|
import numpy as np
import os
import csv
from collections import OrderedDict
class Hazard(object):
def __init__(self, hazard_scenario_name, scenario_hazard_data,
hazard_input_method):
self.hazard_scenario_name = hazard_scenario_name
self.scenario_hazard_data = scenario_hazard_data
self.hazard_input_method = hazard_input_method
self.round_off = 2
def get_hazard_intensity_at_location(self, longitude, latitude):
for comp in self.scenario_hazard_data:
if self.hazard_input_method == 'hazard_array':
return comp["hazard_intensity"]
else:
if (round(float(comp["longitude"]), self.round_off)
== round(float(longitude), self.round_off)) and \
(round(float(comp["latitude"]), self.round_off)
== round(float(latitude), self.round_off)):
return comp["hazard_intensity"]
raise Exception("Invalid Values for Longitude or Latitude")
def get_seed(self):
seed = 0
for i, letter in enumerate(self.hazard_scenario_name):
seed = seed + (i + 1) * ord(letter)
return seed
def __str__(self):
output = self.hazard_scenario_name+'\n'
for hazrd in self.scenario_hazard_data:
output = output + \
"longitude: "+str(hazrd["longitude"]) + \
" latitude: " + str(hazrd["latitude"]) + \
" hazard_intensity: "+ str(hazrd["hazard_intensity"]) +'\n'
return output
class HazardsContainer(object):
"""
The idea is to abstract the number and type of hazards to allow greater
flexibility in the type and number of hazards to be modelled.
"""
def __init__(self, configuration):
# string variables
self.listOfhazards = []
self.hazard_type = configuration.HAZARD_TYPE
self.intensity_measure_param = configuration.INTENSITY_MEASURE_PARAM
self.intensity_measure_unit = configuration.INTENSITY_MEASURE_UNIT
self.focal_hazard_scenarios = configuration.FOCAL_HAZARD_SCENARIOS
# get hazard data from scenario file
if configuration.HAZARD_INPUT_METHOD == "scenario_file":
self.scenario_hazard_data, self.hazard_scenario_list = \
HazardsContainer.populate_scenario_hazard_using_hazard_file(
configuration.SCENARIO_FILE)
self.num_hazard_pts = len(self.hazard_scenario_list)
# get hazard data from an array of hazard intensity values
elif configuration.HAZARD_INPUT_METHOD == "hazard_array":
self.num_hazard_pts = \
int(round((configuration.INTENSITY_MEASURE_MAX
- configuration.INTENSITY_MEASURE_MIN) /
float(configuration.INTENSITY_MEASURE_STEP) + 1
)
)
# Using the limits and step generate a list of hazard
# intensity values
self.hazard_scenario_list \
= np.linspace(configuration.INTENSITY_MEASURE_MIN,
configuration.INTENSITY_MEASURE_MAX,
num=self.num_hazard_pts)
# containing hazard value for each location
self.scenario_hazard_data, self.hazard_scenario_name = \
HazardsContainer.populate_scenario_hazard_using_hazard_array(
self.hazard_scenario_list)
self.hazard_scenario_list = ["%0.3f" % np.float(x)
for x in self.hazard_scenario_list]
for hazard_scenario_name in self.scenario_hazard_data.keys():
self.listOfhazards.append(
Hazard(
hazard_scenario_name,
self.scenario_hazard_data[hazard_scenario_name],
configuration.HAZARD_INPUT_METHOD
)
)
# self.hazard_scenario_name = self.hazard_scenario_list
def get_listOfhazards(self):
for hazard_intensity in self.listOfhazards:
yield hazard_intensity
@staticmethod
def populate_scenario_hazard_using_hazard_file(scenario_file):
root = os.path.dirname(os.path.abspath(__file__))
csv_path = os.path.join(root, "hazard", scenario_file )
scenario_hazard_data = {}
with open(csv_path, "rb") as f_obj:
reader = csv.DictReader(f_obj, delimiter=',')
hazard_scenario_list \
= [scenario for scenario in reader.fieldnames if
scenario not in ["longitude", "latitude"]]
for scenario in hazard_scenario_list:
scenario_hazard_data[scenario] = []
for row in reader:
for col in row:
if col not in ["longitude", "latitude"]:
hazard_intensity = row[col]
scenario_hazard_data[col].append(
{"longitude": row["longitude"],
"latitude": row["latitude"],
"hazard_intensity": hazard_intensity})
return scenario_hazard_data, hazard_scenario_list
@staticmethod
def populate_scenario_hazard_using_hazard_array(num_hazard_pts):
scenario_hazard_data = OrderedDict()
hazard_scenario_name = []
for i, hazard_intensity in enumerate(num_hazard_pts):
hazard_scenario_name.append("s_"+str(i))
scenario_hazard_data["s_"+str(i)] \
= [{"longitude": 0,
"latitude": 0,
"hazard_intensity": hazard_intensity}]
return scenario_hazard_data, hazard_scenario_name
|
# https://leetcode.com/problems/longest-repeating-character-replacement/
# You are given a string s and an integer k. You can choose any character of the
# string and change it to any other uppercase English character. You can perform
# this operation at most k times.
# Return the length of the longest substring containing the same letter you can
# get after performing the above operations.
################################################################################
# sliding window -> move when right - left - max_char_count > k
# use dict to record char count
from collections import Counter
class Solution:
def characterReplacement(self, s: str, k: int) -> int:
n = len(s)
if n == 1: return 1
max_len = 0
window = Counter()
max_char_freq = 0 # max char freq for windown
left = right = 0
while right < n:
# update window
new_char = s[right]
window[new_char] += 1
max_char_freq = max(max_char_freq, window[new_char])
# shrink window
while right - left + 1 - max_char_freq > k:
old_char = s[left]
window[old_char] -= 1
left += 1
# update max_len
max_len = max(max_len, right - left + 1)
# expand window
right += 1
return max_len
|
from utils import prime_numbers
def sum_of_primes(stop: int) -> int:
return sum(prime_numbers(stop))
assert sum_of_primes(10) == 17
assert sum_of_primes(2_000_000) == 142_913_828_922
|
# coding=utf-8
__author__ = 'Anatoli Kalysch'
import pip
import sys
from os import getcwd, system, remove
from shutil import copyfile
def do(action, dependency):
return pip.main([action, dependency])
def usage():
print "Usage: python setup.py <install | uninstall>"
dependencies = ["distorm3", 'idacute']
if __name__ == '__main__':
print '[*] Starting dependency handling!'
stub_name = 'VMAttack_plugin_stub.py'
for dependency in dependencies:
try:
if sys.argv[1] in ["install", "uninstall"]:
retval = do(sys.argv[1], dependency)
else:
retval = do("install", dependency)
if retval == 0:
continue
else:
print '[!] An error occured! Please resolve issues with dependencies and try again.'
except IndexError:
usage()
sys.exit(1)
try:
if sys.argv[1] == 'uninstall':
with open('install_dir') as f:
ida_dir = f.read()
if ida_dir:
remove(ida_dir + stub_name)
sys.exit(0)
except:
pass
print '[*] Setting up environment and installing Plugin.'
# set up environment variable on Windows: setx Framework C:\path\to\Framework\
plugin_dir = getcwd()
system('setx VMAttack %s' % plugin_dir)
# copy stub into the IDA PRO Plugin directory
ida_dir = raw_input('Please input full path to the IDA *plugin* folder (e.g. X:\IDA\plugins\): ')
if not ida_dir.endswith(r'\\'):
ida_dir += r'\\'
with open('install_dir', 'w') as f:
f.write(ida_dir)
copyfile(stub_name, ida_dir+stub_name)
print '[*] Install complete. All Done!'
|
from threading import Thread,currentThread, Lock, RLock, Semaphore
import requests
import os
semaphore = Semaphore(0)
hasilperhitungan=0
class rollySemaphoreDeleteFile (Thread):
def __init__(self,name,thread_number,namafile):
Thread.__init__(self)
self.threadLock = Lock()
self.name = name
self.thread_number = thread_number
self.namafile=os.path.join(os.path.dirname(__file__), namafile)
self.semaphore = semaphore
def run(self):
print("\n"+str(self.thread_number)+". ---> " + self.name + "jalan")
print('mau menjalankan semaphore acquire untuk baca dan delete file')
self.threadLock.acquire()
self.semaphore.acquire()
print('melakukan baca file : '+self.namafile)
self.readfile()
print('melakukan rename file : '+self.namafile)
self.renamefile()
self.threadLock.release()
print("\n"+str(self.thread_number)+". ---> " + currentThread().getName() + "selesai")
def readfile(self):
f = open(self.namafile, "r")
print("Isi Filenya : "+f.read())
def renamefile(self):
os.rename(self.namafile,self.namafile+'.croot')
class rollyDua113040087 (Thread):
def __init__(self, name,thread_number,a,b ,namafile):
Thread.__init__(self)
self.threadLock = Lock()
self.semaphore = semaphore
self.rlock = RLock()
self.name = name
self.namafile=os.path.join(os.path.dirname(__file__), namafile)
self.thread_number = thread_number
self.a=a
self.b=b
def run(self):
print("\n"+str(self.thread_number)+". ---> " + self.name + "jalan")
self.threadLock.acquire()
print("threeadlock acquire utama")
self.hitung()
self.threadLock.release()
print("\n"+str(self.thread_number)+". ---> " + currentThread().getName() + "selesai")
def apipangkat(self):
with self.rlock:
print('didalam rlock apipangkat, akses web service...')
apiurl='https://api.mathjs.org/v4/?expr='
eq=str(self.a)+'^'+str(self.b)
response = requests.get(apiurl+eq)
html=response.content.decode(response.encoding)
hasil = int(html)
print("hasil : "+str(hasil))
self.createfile(hasil)
def hitung(self):
with self.rlock:
print('rlock hitung')
self.apipangkat()
def createfile(self,isi):
print('membuat file baru : '+ self.namafile)
f = open(self.namafile, "x")
f.write(str(isi))
f.close()
print('sudah membuat file baru, mau relese semaphore')
self.semaphore.release()
print('di dalam Semaphore release, semaphore sudah di release')
|
# Fonte: https://leetcode.com/problems/pascals-triangle/
# Autor: Bruno Harlis
# Data: 23/08/2021
"""
PROBLEMA PROPOSTO:
Dado um inteiro numRows, retorna o primeiro numRows do triângulo de Pascal.
No triângulo de Pascal , cada número é a soma dos dois números diretamente
acima dele, conforme mostrado:
1
1 1
1 2 1
1 3 3 1
1 4 6 4 1
Exemplo 1:
Entrada: numRows = 5
Saída: [[1], [1,1], [1,2,1], [1,3,3,1], [1,4,6,4,1]]
Tempo de execução: 32 ms, mais rápido do que 59,95 % das submissões.
Uso de Memória: 14,2 MB, menos de 81,98 % de python3 submissões.
"""
def generate(n: int):
if n == 1:
return [[1]]
l = [[1], [1, 1]]
for i in range(1, n-1):
l.append([1])
for j in range(0, len(l[i])):
if l[i][j+1] == 1:
l[i+1].append(l[i][j] + l[i][j+1])
break
l[i+1].append(l[i][j] + l[i][j+1])
l[i+1].append(1)
return l
if __name__ == '__main__':
resp = generate(4)
print(resp)
|
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="MapiContactNamePropertySetDto.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import pprint
import re
import six
from typing import List, Set, Dict, Tuple, Optional
from datetime import datetime
class MapiContactNamePropertySetDto(object):
"""The properties are used to specify the name of the person represented by the contact
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'display_name': 'str',
'display_name_prefix': 'str',
'file_under': 'str',
'file_under_id': 'int',
'generation': 'str',
'given_name': 'str',
'initials': 'str',
'middle_name': 'str',
'nickname': 'str',
'surname': 'str'
}
attribute_map = {
'display_name': 'displayName',
'display_name_prefix': 'displayNamePrefix',
'file_under': 'fileUnder',
'file_under_id': 'fileUnderId',
'generation': 'generation',
'given_name': 'givenName',
'initials': 'initials',
'middle_name': 'middleName',
'nickname': 'nickname',
'surname': 'surname'
}
def __init__(self, display_name: str = None, display_name_prefix: str = None, file_under: str = None, file_under_id: int = None, generation: str = None, given_name: str = None, initials: str = None, middle_name: str = None, nickname: str = None, surname: str = None):
"""
The properties are used to specify the name of the person represented by the contact
:param display_name: Full name of the contact
:type display_name: str
:param display_name_prefix: Title of the contact
:type display_name_prefix: str
:param file_under: Name under which to file this contact when displaying a list of contacts
:type file_under: str
:param file_under_id: Value specifying how to generate and recompute the property when other properties are changed
:type file_under_id: int
:param generation: Generation suffix of the contact
:type generation: str
:param given_name: Given name (first name) of the contact
:type given_name: str
:param initials: Initials of the contact
:type initials: str
:param middle_name: Middle name of the contact
:type middle_name: str
:param nickname: Nickname of the contact
:type nickname: str
:param surname: Surname (family name) of the contact
:type surname: str
"""
self._display_name = None
self._display_name_prefix = None
self._file_under = None
self._file_under_id = None
self._generation = None
self._given_name = None
self._initials = None
self._middle_name = None
self._nickname = None
self._surname = None
if display_name is not None:
self.display_name = display_name
if display_name_prefix is not None:
self.display_name_prefix = display_name_prefix
if file_under is not None:
self.file_under = file_under
if file_under_id is not None:
self.file_under_id = file_under_id
if generation is not None:
self.generation = generation
if given_name is not None:
self.given_name = given_name
if initials is not None:
self.initials = initials
if middle_name is not None:
self.middle_name = middle_name
if nickname is not None:
self.nickname = nickname
if surname is not None:
self.surname = surname
@property
def display_name(self) -> str:
"""
Full name of the contact
:return: The display_name of this MapiContactNamePropertySetDto.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name: str):
"""
Full name of the contact
:param display_name: The display_name of this MapiContactNamePropertySetDto.
:type: str
"""
self._display_name = display_name
@property
def display_name_prefix(self) -> str:
"""
Title of the contact
:return: The display_name_prefix of this MapiContactNamePropertySetDto.
:rtype: str
"""
return self._display_name_prefix
@display_name_prefix.setter
def display_name_prefix(self, display_name_prefix: str):
"""
Title of the contact
:param display_name_prefix: The display_name_prefix of this MapiContactNamePropertySetDto.
:type: str
"""
self._display_name_prefix = display_name_prefix
@property
def file_under(self) -> str:
"""
Name under which to file this contact when displaying a list of contacts
:return: The file_under of this MapiContactNamePropertySetDto.
:rtype: str
"""
return self._file_under
@file_under.setter
def file_under(self, file_under: str):
"""
Name under which to file this contact when displaying a list of contacts
:param file_under: The file_under of this MapiContactNamePropertySetDto.
:type: str
"""
self._file_under = file_under
@property
def file_under_id(self) -> int:
"""
Value specifying how to generate and recompute the property when other properties are changed
:return: The file_under_id of this MapiContactNamePropertySetDto.
:rtype: int
"""
return self._file_under_id
@file_under_id.setter
def file_under_id(self, file_under_id: int):
"""
Value specifying how to generate and recompute the property when other properties are changed
:param file_under_id: The file_under_id of this MapiContactNamePropertySetDto.
:type: int
"""
if file_under_id is None:
raise ValueError("Invalid value for `file_under_id`, must not be `None`")
self._file_under_id = file_under_id
@property
def generation(self) -> str:
"""
Generation suffix of the contact
:return: The generation of this MapiContactNamePropertySetDto.
:rtype: str
"""
return self._generation
@generation.setter
def generation(self, generation: str):
"""
Generation suffix of the contact
:param generation: The generation of this MapiContactNamePropertySetDto.
:type: str
"""
self._generation = generation
@property
def given_name(self) -> str:
"""
Given name (first name) of the contact
:return: The given_name of this MapiContactNamePropertySetDto.
:rtype: str
"""
return self._given_name
@given_name.setter
def given_name(self, given_name: str):
"""
Given name (first name) of the contact
:param given_name: The given_name of this MapiContactNamePropertySetDto.
:type: str
"""
self._given_name = given_name
@property
def initials(self) -> str:
"""
Initials of the contact
:return: The initials of this MapiContactNamePropertySetDto.
:rtype: str
"""
return self._initials
@initials.setter
def initials(self, initials: str):
"""
Initials of the contact
:param initials: The initials of this MapiContactNamePropertySetDto.
:type: str
"""
self._initials = initials
@property
def middle_name(self) -> str:
"""
Middle name of the contact
:return: The middle_name of this MapiContactNamePropertySetDto.
:rtype: str
"""
return self._middle_name
@middle_name.setter
def middle_name(self, middle_name: str):
"""
Middle name of the contact
:param middle_name: The middle_name of this MapiContactNamePropertySetDto.
:type: str
"""
self._middle_name = middle_name
@property
def nickname(self) -> str:
"""
Nickname of the contact
:return: The nickname of this MapiContactNamePropertySetDto.
:rtype: str
"""
return self._nickname
@nickname.setter
def nickname(self, nickname: str):
"""
Nickname of the contact
:param nickname: The nickname of this MapiContactNamePropertySetDto.
:type: str
"""
self._nickname = nickname
@property
def surname(self) -> str:
"""
Surname (family name) of the contact
:return: The surname of this MapiContactNamePropertySetDto.
:rtype: str
"""
return self._surname
@surname.setter
def surname(self, surname: str):
"""
Surname (family name) of the contact
:param surname: The surname of this MapiContactNamePropertySetDto.
:type: str
"""
self._surname = surname
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MapiContactNamePropertySetDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from redis_dec.CacheDecorator import Cache
__title__ = 'redis_decorator'
__author__ = 'Louis Lou'
__license__ = 'Apache License 2.0'
__copyright__ = 'Copyright 2018 Qijia (Louis) Lou and Contributors'
|
#! /usr/bin/env python3
import pygame
from itertools import repeat
from math import floor, acos, pi, cos, sin, sqrt, pow
from orientation import NORTH, SOUTH, EAST, WEST
from geometry import inscribe_polygon, graphics_affines, scale_points, inscribe_angles, rotate_angles, angles_to_polygon
from random_util import random_relatively_prime_pair, random_relatively_prime_to
from lc import f2lc, lc2str
from itertools import cycle, starmap
from constants import OPAQUE
from geometry import to_degrees, reflect_angles
from circle_app import CircleApp
class MagicCircle (CircleApp): # composite app, child is also circle, animated app, pos/neg space has ranges
def __init__ (self, text=None, font=None, *args, **kwargs):
CircleApp.__init__ (self, *args, **kwargs)
if text is None:
# if child is none, use own source (get subtype source), else query child for model source, else use child source
text = f2lc (MagicCircle)
text = lc2str (text)
self.text = text
self.font = font
self.a = None
self.b = None
self.n = None
def set_subsurface (self, ss):
CircleApp.set_subsurface (self, ss)
if self.font is None:
df = pygame.font.get_default_font ()
font = pygame.font.Font (df, 8)
self.font = font
texts, tw, th, minn, maxn, x, y, w, h = self.compute_sizes ()
# TODO handle change in sizes
self.texts = texts
self.tw = tw
self.th = th
self.minn = minn
self.maxn = maxn
self.x = x
self.y = y
self.w = w
self.h = h
self.next_cycle ()
def next_cycle (self):
a = self.a
b = self.b
n = self.n
first_cycle = True
if a is None or b is None or n is None:
assert a is None
assert b is None
assert n is None
else: first_cycle = False
a, b, n, pts, angles = self.get_polygons (a, b, n)
self.a = a
self.b = b
self.n = n
self.pts = pts
self.angles = angles
self.xforms = self.get_transforms ()
self.sections = tuple (self.get_sections (first_cycle))
self.sectioni = 0
def compute_sizes (self):
text = self.text
print ("text: %s" % (text,))
N = len (text)
print ("N: %s" % (N,))
font = self.font
crfg = (0, 255, 0, 255)
f = lambda c: (font.render (c, True, crfg), *font.size (c))
g = lambda c: str (c)
texts = map (g, text)
texts = map (f, texts)
texts = tuple (texts) # image, w, h
f = lambda iwh: iwh[1]
tw = max (texts, key=f)[1]
f = lambda iwh: iwh[2]
th = max (texts, key=f)[2]
print ("tw: %s, th: %s" % (tw, th))
#texts = repeat (texts)
# each char of text is rotated => text is a polygon, circle is inscribed
X, Y, W, H = self.inner_rect () # outer radii
print ("(X, Y): (%s, %s) (W: %s, H: %s)" % (X, Y, W, H))
#w, h = W - 2 * tw, H - 2 * th # make room for text aligned at axes
x, y, w, h = X + tw / 2, Y + th / 2, W - tw, H - th # text center
print ("w: %s, h: %s" % (w, h))
# text is rendered between outer and inner radii
# find max n s.t. polygon side length >= text width
##f = lambda k: ceil (2 * pi / arccos (k))
#f = lambda k: floor (2 * pi / acos (k))
#maxn1 = f (1 - tw / 2)
#maxn2 = f (1 + tw / 2)
##maxn1 = f (1 - tw / W * tw / H / 2)
##maxn2 = f (1 + tw / W * tw / H / 2)
#maxn = max (maxn1, maxn2) # TODO ?
#print ("maxn1: %s, maxn2: %s, maxn: %s" % (maxn1, maxn2, maxn))
minn = 3 # min number of chars that will look "arcane"
n = minn
while True: # TODO if the formula doesn't work, at least use an interpolated binary search
n = n + 1
i = 0
theta1 = (i + 0) / n * 2 * pi
theta2 = (i + 1) / n * 2 * pi
dx = cos (theta2) - cos (theta1)
dy = sin (theta2) - sin (theta1)
sl = sqrt (pow (W * dx, 2) + pow (H * dy, 2)) # side length of polygon
if sl < tw: break
maxn = n - 1
print ("maxn: %s" % (maxn,))
assert maxn >= minn * (minn + 1) # lower bound is minn^2, and the numbers must be different
return texts, tw, th, minn, maxn, x, y, w, h
def get_polygons (self, a=None, b=None, n=None):
texts = self.texts # image, w, h
tw = self.tw
th = self.th
minn = self.minn
maxn = self.maxn
X, Y, W, H = self.get_rect ()
x, y, w, h = self.x, self.y, self.w, self.h
if a is None or b is None or n is None:
assert a is None
assert b is None
assert n is None
a, b, n = random_relatively_prime_pair (minn, maxn) # relatively prime pair a, b s.t. a >= minn, b >= minn, a * b <= maxn
if a > b: # smooth out first frame
c = a
a = b
b = c
else:
a = b
b, n = random_relatively_prime_to (a, minn, maxn) # random number b, relatively prime to a, s.t., minn <= a * b <= maxn
print ("a: %s, b: %s, n: %s" % (a, b, n))
orientation = NORTH
#pts = inscribe_polygon (n, orientation.radians ())
angles = inscribe_angles (n)
angles = rotate_angles (angles, orientation.radians ())
angles = reflect_angles (angles)
angles = tuple (angles)
pts = angles_to_polygon (angles)
pts = graphics_affines (pts)
rect = x, y, w, h # text center
pts = scale_points (pts, rect)
pts = map (lambda pt: tuple (pt), pts)
pts = tuple (pts)
print ("pts: %s" % (pts,))
#while True:
# a = b
# b, n = random_relatively_prime_to (a, minn, maxn)
# print ("a: %s, b: %s, n: %s" % (a, b, n))
return a, b, n, pts, angles
def section_helper (self, a, b, n, rev, first_section):
rng = range (0, n, b) # main points: render a chars, skip n / b at a time
rng = tuple (rng)
assert len (rng) == a
nloop = b // 2 # number of chars on either side of main points until they meet
#sections = []
if first_section: kstart = 0
else: kstart = 1
#K = range (first_section, nloop)
K = range (kstart, nloop)
if rev: K = K[::-1]
for p in K: # k chars on either side
section = []
P = range (0, p + 1)
#if rev: P = P[::-1]
for k in P:
for base in rng: # for each of the main points
section.append (base)
if k == 0: continue
section = [base - k] + section
#section.prepend (base - k)
#section.append (base + k)
section = section + [base + k]
#sections.extend (section)
#sections = sections + [section]
#yield a, tuple (section)
# TODO a or b ?
#yield b, tuple (section)
yield tuple (section)
#f = lambda ndx: texts[ndx][0], pts[ndx]
#sections = map (f, sections)
#sections = tuple (sections)
#yield a, sections
def get_sections (self, first_section):
n = self.n
a = self.a
b = self.b
yield from self.section_helper (a, b, n, False, first_section) # first section starts at 0
yield from self.section_helper (b, a, n, True, b % 2 != 0) # if odd, then n sections don't display long enough
def transform_helper (self, text, w, h, angle):
intermediate_alpha_surface = pygame.Surface ((w, h), flags=pygame.SRCALPHA)
intermediate_alpha_surface.fill (pygame.Color (*OPAQUE))
text_rect = text.get_rect ()
text_rect.center = (w / 2, h / 2)
intermediate_alpha_surface.blit (text, text_rect, special_flags=pygame.BLEND_RGBA_MIN)
# when angle is 0 , rad is - pi / 2
# when angle is +pi / 2, rad is 0
# when angle is pi , rad is + pi / 2
# when angle is -pi / 2, rad is 0
#if 0 <= angle and angle <= pi: rad = angle
#else: rad = angle - pi
rad = angle - pi / 2
#orientation = NORTH
degrees = to_degrees (rad)
#degrees = 0
xform = pygame.transform.rotate (intermediate_alpha_surface, degrees)
#xform = pygame.transform.rotate (text, angle)
return xform
def get_transforms (self):
texts = self.texts # image, w, h
angles = self.angles
# TODO might have to blit onto a temp surface
f = lambda text, angle: self.transform_helper (*text, angle)
ntext = len (texts)
nangle = len (angles)
#assert ntext == nangle, "ntext: %s, nangle: %s" % (ntext, nangle)
k = zip (cycle (texts), angles)
xforms = starmap (f, k)
xforms = tuple (xforms)
return xforms
# def minsz: minsz of inner circle... + tw, th => minsz of outer
# 3 * 4 = 12 points on polygon...
#def draw_foreground (self, temp):
def draw_cropped_scene (self, temp):
print ("circular_matrix_text.draw_foreground ()")
#CircleApp.draw_foreground (self, temp)
CircleApp.draw_cropped_scene (self, temp)
xforms = self.xforms # image, w, h
n = self.n
ndx = self.sectioni
pts = self.pts
angles = self.angles
print ("nsection: %s, ndx: %s" % (len (self.sections), ndx))
#k, section = self.sections[ndx]
section = self.sections[ndx]
#for i in range (0, n, k):
for i in section:
theta = angles[i]
xform = xforms[i]
pt = pts[i]
#rect = text.get_rect ()
rect = xform.get_rect ()
rect.center = (round (pt[0]), round (pt[1]))
temp.blit (xform, rect)
self.increment_section_index () # TODO move this to the troller
def increment_section_index (self):
ndx = self.sectioni + 1
if ndx == len (self.sections):
self.rotate_texts ()
self.next_cycle ()
else: self.sectioni = ndx
def rotate_texts (self):
texts = self.texts
N = len (texts)
n = self.n
while n >= N: n = n - N
self.texts = tuple (texts[n:] + texts[:n])
"""
from text_ring import TextRing
class MagicCircle (TextRing): # composite app, child is also circle, animated app, pos/neg space has ranges
def __init__ (self, child, text=None, font=None, *args, **kwargs):
#assert child is not None
if text is None:
# if child is none, use own source (get subtype source), else query child for model source, else use child source
text = f2lc (MagicCircle)
text = lc2str (text)
TextRing.__init__ (self, child, text, font, *args, **kwargs)
self.a = None
self.b = None
self.n = None
#assert self.child is not None
def set_subsurface (self, ss):
TextRing.set_subsurface (self, ss)
self.next_cycle ()
def next_cycle (self):
a = self.a
b = self.b
n = self.n
first_cycle = True
if a is None or b is None or n is None:
assert a is None
assert b is None
assert n is None
else: first_cycle = False
a, b, n, pts, angles = self.get_polygons (a, b, n)
self.a = a
self.b = b
self.n = n
self.set_n (n)
#self.pts = pts
self.set_pts (pts)
self.angles = angles
self.xforms = self.get_transforms ()
self.sections = tuple (self.get_sections (first_cycle))
self.sectioni = 0
if self.child is not None: self.child.pts = pts
def get_text_for_transforms (self): return cycle (self.texts)
def get_polygons (self, a=None, b=None, n=None):
texts = self.texts # image, w, h
tw = self.tw
th = self.th
minn = self.minn
maxn = self.maxn
X, Y, W, H = self.get_rect ()
x, y, w, h = self.x, self.y, self.w, self.h
if a is None or b is None or n is None:
assert a is None
assert b is None
assert n is None
a, b, n = random_relatively_prime_pair (minn, maxn) # relatively prime pair a, b s.t. a >= minn, b >= minn, a * b <= maxn
if a > b: # smooth out first frame
c = a
a = b
b = c
else:
a = b
b, n = random_relatively_prime_to (a, minn, maxn) # random number b, relatively prime to a, s.t., minn <= a * b <= maxn
print ("a: %s, b: %s, n: %s" % (a, b, n))
orientation = NORTH
#pts = inscribe_polygon (n, orientation.radians ())
angles = inscribe_angles (n)
angles = rotate_angles (angles, orientation.radians ())
angles = reflect_angles (angles)
angles = tuple (angles)
pts = angles_to_polygon (angles)
pts = graphics_affines (pts)
rect = x, y, w, h # text center
pts = scale_points (pts, rect)
pts = map (lambda pt: tuple (pt), pts)
pts = tuple (pts)
print ("pts: %s" % (pts,))
#while True:
# a = b
# b, n = random_relatively_prime_to (a, minn, maxn)
# print ("a: %s, b: %s, n: %s" % (a, b, n))
return a, b, n, pts, angles
def section_helper (self, a, b, n, rev, first_section):
rng = range (0, n, b) # main points: render a chars, skip n / b at a time
rng = tuple (rng)
assert len (rng) == a
nloop = b // 2 # number of chars on either side of main points until they meet
#sections = []
if first_section: kstart = 0
else: kstart = 1
K = range (first_section, nloop)
if rev: K = K[::-1]
for p in K: # k chars on either side
section = []
P = range (0, p + 1)
#if rev: P = P[::-1]
for k in P:
for base in rng: # for each of the main points
section.append (base)
if k == 0: continue
section = [base - k] + section
#section.prepend (base - k)
#section.append (base + k)
section = section + [base + k]
#sections.extend (section)
#sections = sections + [section]
#yield a, tuple (section)
# TODO a or b ?
#yield b, tuple (section)
yield tuple (section)
#f = lambda ndx: texts[ndx][0], pts[ndx]
#sections = map (f, sections)
#sections = tuple (sections)
#yield a, sections
def get_sections (self, first_section):
n = self.n
a = self.a
b = self.b
yield from self.section_helper (a, b, n, False, first_section) # first section starts at 0
yield from self.section_helper (b, a, n, True, b % 2 != 0) # if odd, then n sections don't display long enough
def draw_cropped_scene (self, temp):
print ("circular_matrix_text.draw_foreground ()")
#assert self.child is not None
TextRing.draw_cropped_scene (self, temp)
self.increment_section_index () # TODO move this to the troller
def increment_section_index (self):
ndx = self.sectioni + 1
if ndx == len (self.sections):
self.rotate_texts ()
self.next_cycle ()
else: self.sectioni = ndx
def rotate_texts (self):
texts = self.texts
N = len (texts)
n = self.n
while n >= N: n = n - N
self.texts = tuple (texts[n:] + texts[:n])
"""
if __name__ == "__main__":
from rotation import ANGLED, STRAIGHT
from orientation import NORTH, SOUTH, EAST, WEST
from gui import GUI
from hal import HAL9000
from constants import SECONDARY_BACKGROUND
from polygoned_circle import EqualPolygonedCircle
def main ():
if False:
b = None
###b = CircleApp (background=SECONDARY_BACKGROUND)
##c = None
#c = CircleApp ()
#b = EqualPolygonedCircle (None, c, background=SECONDARY_BACKGROUND)
a = MagicCircle (b)
#a = RecursiveCompositeTest ()
else:
#b = None
a = MagicCircle ()
with HAL9000 (app=a, exit_on_close=False) as g:
#g.setApp (a)
g.run ()
main ()
quit ()
|
from .users import UsersViewSet |
import time
from dataclasses import dataclass
from PIL import Image, ImageDraw, ImageFont
from threading import Thread
from .screen import BaseTopNavScreen, ButtonListScreen
from ..components import GUIConstants, Fonts, TextArea, calc_text_centering
from seedsigner.hardware.buttons import HardwareButtonsConstants
from seedsigner.models import DecodeQR, DecodeQRStatus
@dataclass
class ScanScreen(BaseTopNavScreen):
decoder: DecodeQR = None
instructions_text: str = "Scan a QR code"
def __post_init__(self):
from seedsigner.hardware.camera import Camera
# Customize defaults
self.title = "Scan"
# Initialize the base class
super().__post_init__()
self.camera = Camera.get_instance()
self.camera.start_video_stream_mode(resolution=(480, 480), framerate=12, format="rgb")
# Prep the bottom semi-transparent instruction bar
self.instructions_background = Image.new("RGBA", (self.canvas_width, 40), color="black")
self.instructions_background_y = self.canvas_height - self.instructions_background.height
# Pre-calc where the instruction text goes
self.instructions_font = Fonts.get_font(GUIConstants.BUTTON_FONT_NAME, GUIConstants.BUTTON_FONT_SIZE)
# TODO: Add the QR code icon and adjust start_x
(self.instructions_text_x, self.instructions_text_y) = calc_text_centering(
font=self.instructions_font,
text=self.instructions_text,
is_text_centered=True,
total_width=self.canvas_width,
total_height=self.instructions_background.height,
start_x=0,
start_y=0
)
def _run(self):
"""
_render() is mostly meant to be a one-time initial drawing call to set up the
Screen. Once interaction starts, the display updates have to be managed in
_run(). The live preview is an extra-complex case.
"""
def live_preview():
while True:
frame = self.camera.read_video_stream(as_image=True)
if frame is not None:
scan_text = self.instructions_text
if self.decoder.get_percent_complete() > 0 and self.decoder.is_psbt:
scan_text = str(self.decoder.get_percent_complete()) + "% Complete"
# TODO: Render TopNav & instructions_background w/transparency
# img = Image.new(mode='RGBA', size=(self.canvas_width, self.canvas_height))
# img.paste(frame.resize((self.canvas_width, self.canvas_height)))
self.renderer.show_image_with_text(frame.resize((self.canvas_width, self.canvas_height), resample=Image.NEAREST), scan_text, font=self.instructions_font, text_color="white", text_background=(0,0,0,225))
# self.top_nav.render()
# self.renderer.show_image()
time.sleep(0.1) # turn this up or down to tune performance while decoding psbt
if self.camera._video_stream is None:
break
# putting live preview in its own thread to improve psbt decoding performance
t = Thread(target=live_preview)
t.start()
while True:
frame = self.camera.read_video_stream()
if frame is not None:
status = self.decoder.add_image(frame)
if status in (DecodeQRStatus.COMPLETE, DecodeQRStatus.INVALID):
self.camera.stop_video_stream_mode()
break
# TODO: KEY_UP gives control to NavBar; use its back arrow to cancel
if self.hw_inputs.check_for_low(HardwareButtonsConstants.KEY_RIGHT) or self.hw_inputs.check_for_low(HardwareButtonsConstants.KEY_LEFT):
self.camera.stop_video_stream_mode()
break
time.sleep(0.2) # time to let live preview thread complete to avoid race condition on display
@dataclass
class SettingsUpdatedScreen(ButtonListScreen):
config_name: str = None
title: str = "Settings QR"
is_bottom_list: bool = True
def __post_init__(self):
# Customize defaults
self.button_data = ["Home"]
super().__post_init__()
start_y = self.top_nav.height + 20
if self.config_name:
self.config_name_textarea = TextArea(
text=f'"{self.config_name}"',
is_text_centered=True,
auto_line_break=True,
screen_y=start_y
)
self.components.append(self.config_name_textarea)
start_y = self.config_name_textarea.screen_y + 50
self.components.append(TextArea(
text="Settings imported successfully!",
is_text_centered=True,
auto_line_break=True,
screen_y=start_y
))
|
"""
PROVINCES
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
labels = ['Punjab','Gilgit Baltistan','Balochistan', 'KPK', 'Sindh', 'Federal Territory']
sizes = [154,1,1,6,2,2]
explode = (0.25,0.5,0,0.9,0.1,0.5)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',shadow=True, startangle=90)
ax1.axis('equal')
plt.tight_layout()
plt.show() |
from reification import *
from caty.jsontools import tag, untagged, tagged
class ReifyType(SafeReifier):
def setup(self, opts, arg):
SafeReifier.setup(self, opts, arg)
def _execute(self):
system = self.current_app._system
app_name, module_name, name = split_colon_dot_path(self._cdpath)
if not app_name or app_name == 'this':
app = self.current_app
else:
app = system.get_app(app_name)
if not module_name:
throw_caty_exception('BadArg', u'$arg', arg=self._cdpath)
if not name:
throw_caty_exception('BadArg', u'$arg', arg=self._cdpath)
module = app._schema_module.get_module(module_name)
attrs = set()
try:
for k, v in app._schema_module.get_module(u'sweet').get_type(u'SweetAttributes').items():
attrs.add(k)
except:
throw_caty_exception(u'RuntimeError', u'sweet:SweetAttributes is required by sweet:reif')
try:
for k, v in app._schema_module.get_module(u'sweet').get_type(u'SweetItemAttributes').items():
attrs.add(k)
except:
throw_caty_exception(u'RuntimeError', u'sweet:SweetItemAttributes is required by sweet:reif')
reifier = SweetFormReifier(attrs)
# 型の展開を行った後の物に限る。
try:
r = reifier.reify_type(module.get_type(name))
except CatyException as e:
throw_caty_exception(u'BadArg', u'not a sweet type: $type', type=self._cdpath)
return r
class SweetFormReifier(ShallowReifier):
def __init__(self, sweet_attrs):
ShallowReifier.__init__(self)
self.SWEET_ATTRIBUTES = frozenset(sweet_attrs)
def reify_type(self, t):
if self._is_predefined(t):
return tagged(u'predefined', {u'typeName': t.canonical_name})
sr = ShallowReifier.reify_type(self, t)
return ObjectDumper(sr[u'location'], self.SWEET_ATTRIBUTES).visit(t.body)
def _is_predefined(self, node):
if u'predefined' in node.annotations:
return True
elif isinstance(node.body, (Root, Ref)):
return self._is_predefined(node.body)
return False
SINGLETON_TYPES = set([u'string-val', u'binary-val', u'number-val', u'boolean-val'])
class ObjectDumper(TypeBodyReifier):
def __init__(self, location, sweet_attrs):
self.default_loc = location
self._history = {}
self.SWEET_ATTRIBUTES = sweet_attrs
def _extract_common_data(self, node):
r = TypeBodyReifier._extract_common_data(self, node)
anno = r.pop(u'anno', {})
for k, v in anno.items():
if k in self.SWEET_ATTRIBUTES:
r[k] = v
return r
def _visit_root(self, node):
if u'predefined' in node.annotations:
return tagged(u'predefined', {u'typeName': node.canonical_name})
return node.body.accept(self)
def _visit_union(self, node):
r = untagged(TypeBodyReifier._visit_union(self, node))
types = r[u'specified']
items = []
for t in types:
if tag(t) not in SINGLETON_TYPES:
throw_caty_exception(u'BadArg', u'not a sweet type')
else:
i = untagged(t)
v = {u'value': i[u'value']}
if u'label' in i.get(u'anno', {}):
v[u'label'] = i[u'anno'][u'label']
items.append(tagged(u'item', v))
return tagged(u'enum', items)
def _visit_bag(self, node):
r = untagged(TypeBodyReifier._visit_bag(self, node))
types = r[u'items']
items = []
for bagitem in types:
i = untagged(bagitem)
t = i[u'type']
if tag(t) not in SINGLETON_TYPES:
throw_caty_exception(u'BadArg', u'not a sweet type')
else:
o = untagged(t)
v = {u'value': o[u'value']}
if u'label' in o[u'anno']:
v[u'label'] = o[u'anno'][u'label']
v[u'minOccurs'] = i[u'minOccurs']
v[u'maxOccurs'] = i[u'maxOccurs']
items.append(tagged(u'multi-item', v))
return tagged(u'multi-enum', items)
def _visit_option(self, node):
r = node.body.accept(self)
b = untagged(r)
if isinstance(b, dict):
b[u'optional'] = True
else:
for i in b:
untagged(i)[u'optional'] = True
return r
def _visit_symbol(self, node):
from caty.core.schema import TypeReference, TypeVariable
if isinstance(node, (TypeReference)):
if node.canonical_name in self._history:
throw_caty_exception(u'BadArg', u'not a sweet type')
self._history[node.canonical_name] = True
try:
return node.body.accept(self)
finally:
del self._history[node.canonical_name]
elif isinstance(node, TypeVariable):
throw_caty_exception(u'BadArg', u'not a sweet type')
else:
return TypeBodyReifier._visit_symbol(self, node)
@format_result(u'array-of')
def _visit_array(self, node):
r = self._extract_common_data(node)
r[u'specified'] = []
for v in node:
r[u'specified'].append(v.accept(self))
if r[u'repeat']:
rep = r[u'specified'].pop(-1)
untagged(rep)[u'optional'] = True
r[u'additional'] = rep
del r[u'repeat']
else:
r[u'additional'] = tagged(u'builtin', {'typeName': u'never'})
return r
|
import pandas as pd
from data_aug.tezro_data import TezroDataset
from torchvision.transforms import transforms
from data_aug.gaussian_blur import GaussianBlur
from data_aug.view_generator import ContrastiveLearningViewGenerator
def get_simclr_pipeline_transform(size, s=1):
"""Return a set of data augmentation transformations as described in the SimCLR paper."""
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
data_transforms = transforms.Compose([
transforms.RandomResizedCrop(size=size),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color_jitter], p=0.8),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(kernel_size=int(0.1 * size)),
# transforms.ToTensor()
])
return data_transforms
ds = TezroDataset('demo.csv', './datasets', transform=ContrastiveLearningViewGenerator(
get_simclr_pipeline_transform(165),
2))
# print(pd.read_csv('demo.csv').iloc[0]['image_id_root'])
# print(ds[0][0][1].show())
# print(next(iter(ds)))
|
# Hangman game
import random, string
def loadWords():
print("Loading word list from file...")
inFile = open(WORDLIST_FILENAME, 'r')
line = inFile.readline()
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
return random.choice(wordlist)
def isWordGuessed(secretWord, lettersGuessed):
for letter in secretWord:
if not (letter in lettersGuessed):
return False
return True
def getGuessedWord(secretWord, lettersGuessed):
string = ''
for letter in secretWord:
if letter in lettersGuessed:
string += letter
else:
string += '_'
return string
def getAvailableLetters(lettersGuessed):
string = ''
for letter in string.ascii_lowercase:
if not (letter in lettersGuessed):
string += letter
return string
def hangman(secretWord):
availableLetters = string.ascii_lowercase
lettersGuessed = ''
numGuesses = 8
endGame = False
while not endGame:
guess = input('Guess:').lower()
if guess in lettersGuessed:
print('Duplicated letter:')
elif guess in availableLetters:
#insere
lettersGuessed += guess
#atualiza
getAvailableLetters(lettersGuessed)
if guess in secretWord:
endGame = isWordGuessed(secretWord, lettersGuessed)
else:
numGuesses -= 1
else:
print('Invalid letter:')
WORDLIST_FILENAME = "words.txt"
wordlist = loadWords()
secretWord = chooseWord(wordlist).lower()
hangman(secretWord)
|
import paho.mqtt.client as mqtt
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("connected OK")
else:
print("Bad connection Returned code=", rc)
def on_disconnect(client, userdata, flags, rc=0):
print(str(rc))
def on_subscribe(client, userdata, mid, granted_qos):
print("subscribed: " + str(mid) + " " + str(granted_qos))
def on_message(client, userdata, msg):
print(str(msg.payload.decode("utf-8")))
while True:
# 새로운 클라이언트 생성
client = mqtt.Client()
# 콜백 함수 설정 on_connect(브로커에 접속), on_disconnect(브로커에 접속중료), on_subscribe(topic 구독),
# on_message(발행된 메세지가 들어왔을 때)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_subscribe = on_subscribe
client.on_message = on_message
# 로컬 아닌, 원격 mqtt broker에 연결
# address : broker.hivemq.com
# port: 1883 에 연결
client.connect('203.234.62.117', 1883)
# test/hello 라는 topic 구독
client.subscribe('test/sensor', 1)
client.loop_forever() |
# coding=utf-8
import numpy as np
from scipy.integrate import quad, cumtrapz
profiles = {"linear": lambda x: x,
"quadratic": lambda x: x ** 2,
"exponential": lambda x: np.exp(10 * (x - 1))}
def FDENS(x, moat_left, ramp_length, plasma_length, N, func='linear'):
func = profiles[func]
rectangle_area = (plasma_length - ramp_length)
modified_func = lambda x_value: func((x_value - moat_left) / ramp_length)
# noinspection PyTupleAssignmentBalance
ramp_area, _ = quad(modified_func, moat_left, moat_left + ramp_length)
normalization = (N+0.1) / (rectangle_area + ramp_area) # N + 0.1 due to non-exact float calculations
result = np.zeros_like(x)
region1 = x < moat_left
region2 = (x < moat_left + ramp_length) & ~region1
region3 = (x < moat_left + plasma_length) & ~(region2 | region1)
result[region2] = normalization * modified_func(x[region2])
result[region3] = normalization
return result
# def relativistic_maxwellian(v, N, c, m, T):
# gamma = physics.gamma_from_v(v, c)
# kinetic_energy = (gamma - 1) * m * c ** 2
# normalization = N / (2 * np.pi) * m * c **2 / T / (1 + T / m / c**2)
# f = normalization * np.exp(-kinetic_energy/T)
# # TODO: finish this algorithm
# raise NotImplementedError
def generate(dense_range, func, *function_params):
y = func(dense_range, *function_params)
integrated = cumtrapz(y, dense_range, initial=0).astype(int)
diffs = np.diff(integrated)
assert (diffs <= 1).all(), "There's two particles in a cell! Increase resolution."
indices = diffs == 1
return dense_range[:-1][indices]
|
import os
import sys
import logging
import argparse
import json
import get_data
import settings
import utils
import data_manager
import visualizer
def init(stock_code, num_epoches, num_steps ):
#--stock_code MSFT --rl_method dqn --net lstm --num_steps 5 --learning --num_epoches 10 --lr 0.001 --start_epsilon 1 --discount_factor 0.9
parser = argparse.ArgumentParser()
parser.add_argument('--stock_code', default=stock_code)
parser.add_argument('--ver', choices=['v1', 'v2'], default='v1')
parser.add_argument('--rl_method', default='dqn')
parser.add_argument('--net', default='lstm')
parser.add_argument('--num_steps', type=int, default=num_steps)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--discount_factor', type=float, default=0.9)
parser.add_argument('--start_epsilon', type=float, default=1)
parser.add_argument('--balance', type=int, default=100000)
parser.add_argument('--num_epoches', type=int, default=num_epoches)
parser.add_argument('--delayed_reward_threshold', type=float, default=0.05)
parser.add_argument('--backend', choices=['tensorflow', 'plaidml'], default='tensorflow')
parser.add_argument('--output_name', default=utils.get_time_str())
parser.add_argument('--value_network_name')
parser.add_argument('--policy_network_name')
parser.add_argument('--reuse_models', action='store_true')
parser.add_argument('--learning', default=True)
parser.add_argument('--start_date', default='20160714')
parser.add_argument('--end_date', default='20191231')
args = parser.parse_args()
return args
def executor(agrs) :
# Keras Backend 설정
if args.backend == 'tensorflow':
os.environ['KERAS_BACKEND'] = 'tensorflow'
elif args.backend == 'plaidml':
os.environ['KERAS_BACKEND'] = 'plaidml.keras.backend'
# 출력 경로 설정
output_path = os.path.join(settings.BASE_DIR,
'output/{}_{}_{}'.format(args.output_name, args.rl_method, args.net))
if not os.path.isdir(output_path):
os.makedirs(output_path)
# 파라미터 기록
with open(os.path.join(output_path, 'params.json'), 'w') as f:
f.write(json.dumps(vars(args)))
# 로그 기록 설정
file_handler = logging.FileHandler(filename=os.path.join(
output_path, "{}.log".format(args.output_name)), encoding='utf-8')
stream_handler = logging.StreamHandler(sys.stdout)
file_handler.setLevel(logging.DEBUG)
stream_handler.setLevel(logging.INFO)
logging.basicConfig(format="%(message)s",
handlers=[file_handler, stream_handler], level=logging.DEBUG)
# 로그, Keras Backend 설정을 먼저하고 RLTrader 모듈들을 이후에 임포트해야 함
from agent import Agent
from learners import DQNLearner
# 모델 경로 준비
value_network_path = ''
policy_network_path = ''
if args.value_network_name is not None:
value_network_path = os.path.join(settings.BASE_DIR,
'models/{}.h5'.format(args.value_network_name))
else:
value_network_path = os.path.join(
output_path, '{}_{}_value_{}.h5'.format(
args.rl_method, args.net, args.output_name))
if args.policy_network_name is not None:
policy_network_path = os.path.join(settings.BASE_DIR,
'models/{}.h5'.format(args.policy_network_name))
else:
policy_network_path = os.path.join(
output_path, '{}_{}_policy_{}.h5'.format(
args.rl_method, args.net, args.output_name))
common_params = {}
list_stock_code = []
list_chart_data = []
list_training_data = []
list_min_trading_unit = []
list_max_trading_unit = []
stock_code = args.stock_code
get_data.get_data(stock_code,
args.start_date, args.end_date, ver=args.ver)
# 차트 데이터, 학습 데이터 준비
chart_data, training_data = data_manager.load_data(
os.path.join(settings.BASE_DIR,
'data/{}/{}.csv'.format(args.ver, stock_code)),
args.start_date, args.end_date, ver=args.ver)
# 최소/최대 투자 단위 설정
min_trading_unit = max(int(10000 / chart_data.iloc[-1]['close']), 1)
max_trading_unit = max(int(100000 / chart_data.iloc[-1]['close']), 1)
# 공통 파라미터 설정
common_params = {'rl_method': args.rl_method,
'delayed_reward_threshold': args.delayed_reward_threshold,
'net': args.net, 'num_steps': args.num_steps, 'lr': args.lr,
'output_path': output_path, 'reuse_models': args.reuse_models}
# 강화학습 시작
learner = None
common_params.update({'stock_code': stock_code,
'chart_data': chart_data,
'training_data': training_data,
'min_trading_unit': min_trading_unit,
'max_trading_unit': max_trading_unit})
learner = DQNLearner(**{**common_params,
'value_network_path': value_network_path})
if learner is not None:
pvs = learner.run(balance=args.balance,
num_epoches=args.num_epoches,
discount_factor=args.discount_factor,
start_epsilon=args.start_epsilon,
learning=args.learning)
learner.save_models()
return chart_data, pvs
def buyAndHold(chart_data):
initial_price = chart_data.iloc[0, 1]
numberOfStock = 100000 / initial_price
priceHistory = chart_data['open'].to_list()
print (priceHistory[0])
buyAndHoldValue = [x * numberOfStock for x in priceHistory ]
return buyAndHoldValue
if __name__ == '__main__':
# #args = init(stock_code, num_epoches, num_steps)
stock_names = ['GOOGL', 'AAPL', 'FB', 'AMZN', 'MSFT']
for stock_name in stock_names :
args = init(stock_name, 10, 4)
chart_data, pvs = executor(args)
buyAndHoldValue = buyAndHold(chart_data)
visualizer.present_stock (chart_data, stock_name)
visualizer.preset_buyAndHold(chart_data, pvs, buyAndHoldValue, stock_name)
steps = [3,6,9]
steps_pvs = []
for step in steps :
args = init(stock_name, 10, step)
chart_data, pvs = executor(args)
steps_pvs.append(pvs)
visualizer.present_steps(chart_data, steps, steps_pvs, stock_name)
|
#!/usr/bin/env python
import argparse
import subprocess
import tempfile
import smtplib
import email.mime.multipart
from email.MIMEText import MIMEText
import logging
def set_log():
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
fm = logging.Formatter('%(filename)s [LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s')
console = logging.StreamHandler()
console.setLevel(LOG_LEVEL)
console.setFormatter(fm)
logger.addHandler(console)
def get_body_msg():
logging.info("[!] Opening vim to write the body ..")
tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
tmp_name = tmp.name
tmp.close()
subprocess.call(['vim', tmp_name])
with open(tmp_name) as body_:
body_ = body_.read()
return body_
def send(server, port, sender, to, reply_to, subject, body, priority):
msg = email.mime.multipart.MIMEMultipart()
msg['to'] = to
msg['from'] = sender
msg['subject'] = subject
msg['X-Priority'] = priority
msg.add_header('reply-to', reply_to)
server = smtplib.SMTP(server, int(port))
msg.attach(MIMEText(body))
server.sendmail(sender, to, msg.as_string())
server.close()
def parse_args():
parser = argparse.ArgumentParser(description='Send spoofed email message')
parser.add_argument('--server', type=str,
help='SMTP Server (default localhost)', default="localhost")
parser.add_argument('--port', type=int,
help='SMTP Port (defaut 25)', default=25)
parser.add_argument('--sender', type=str,
help='Sender -> from who we send email', required=True)
parser.add_argument('--to', type=str,
help='Receiver-> to who we send email', required=True)
parser.add_argument('--priority', type=int,
help='Message priority (default 3)', default=3)
parser.add_argument('--reply-to', type=str, help='Reply-To', required=True)
parser.add_argument('--subject', type=str, help='Message subject', required=True)
return parser.parse_args()
if __name__ == '__main__':
LOG_LEVEL = 'INFO' # 'DEBUG'
set_log()
config = parse_args()
msg_body = get_body_msg
try:
send(config.server, config.port, config.sender, config.to,
config.reply_to, config.subject, msg_body, config.priority)
logging.info("[-] E-mail successfully spoofed.")
except Exception as e:
exit('Error: %s' % e)
|
from __future__ import unicode_literals
from django.conf.urls import include, patterns, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$',
'base.views.index',
name='index'),
url(r'^i18n/',
include('django.conf.urls.i18n')),
)
urlpatterns += i18n_patterns(
'',
url(r'^login/$',
'django.contrib.auth.views.login',
name='login'),
url(r'^logout/$',
'django.contrib.auth.views.logout_then_login',
name='logout'),
url(r'^dashboard/$',
'dashboard.views.dashboard',
name='dashboard'),
url(r'^setup/$',
'base.views.setup',
name='setup'),
url(r'^budget/',
include('budget.urls', namespace='budget')),
url(r'^category/',
include('category.urls', namespace='category')),
url(r'^admin/',
include(admin.site.urls)),
url(r'^transaction/',
include('transaction.urls', namespace='transaction')),
url(r'^summary/',
include('summary.urls', namespace='summary')),
)
|
'''
在反向传播时对梯度缩放
'''
from paddle.autograd import PyLayer
class GradMultiply(PyLayer):
@staticmethod #必须静态方法,(不初始化就可调用)
def forward(ctx, x,scale):
# ctx is a context object that store some objects for backward.
ctx.scale = scale # 注册变量,back中可以用
return x
@staticmethod
# forward has only one output, so there is only one gradient in the input of backward.
def backward(ctx, grad):
# forward has only one input, so only one gradient tensor is returned.
return grad * ctx.scale
|
#!/usr/bin/python
from __future__ import print_function
"""Recursive version if Fibonacci."""
def unused():
"""A function that shouldn't be compiled."""
return 42
def fib(n):
if n == 0:
return 1
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
print(fib(9))
# TODO: Do this later.
if 0:
def main():
for i in xrange(9):
print(fib(i))
print('Done fib_recursive.py')
if __name__ == '__main__':
import os
if os.getenv('CALLGRAPH') == '1':
import sys
from opy import callgraph
callgraph.Walk(main, sys.modules)
else:
main()
|
from datetime import datetime
from typing import Hashable, Any, Optional, Dict, TypeVar, List
from copy import deepcopy
from mongoengine import connect
from core.state_schema import User, Human, Bot, HumanUtterance, BotUtterance, Dialog, HUMAN_UTTERANCE_SCHEMA,\
BOT_UTTERANCE_SCHEMA
from core.transform_config import DB_HOST, DB_PORT, DB_NAME
userT = TypeVar('userT', bound=User)
class StateManager:
state_storage = connect(host=DB_HOST, port=DB_PORT, db=DB_NAME)
@staticmethod
def create_new_dialog(human, bot, location=None, channel_type=None):
dialog = Dialog(human=human, bot=bot, location=location or Dialog.location.default,
channel_type=channel_type)
dialog.save()
return dialog
@staticmethod
def create_new_human(user_telegram_id, device_type, personality=None, profile=None):
human = Human(user_telegram_id=user_telegram_id,
device_type=device_type,
personality=personality,
profile=profile or Human.profile.default)
human.save()
return human
@staticmethod
def create_new_bot(persona: Optional[List[str]] = None):
bot = Bot()
if persona:
bot.persona = persona
bot.save()
return bot
@staticmethod
def create_new_human_utterance(text, user: Human, date_time, annotations=None,
hypotheses=None, message_attributes=None):
utt = HumanUtterance(text=text,
user=user.to_dict(),
date_time=date_time,
annotations=annotations or HumanUtterance.annotations.default,
hypotheses=hypotheses or HumanUtterance.hypotheses.default,
attributes=message_attributes or HumanUtterance.attributes.default)
utt.save()
return utt
@staticmethod
def create_new_bot_utterance(orig_text, text, user, date_time, active_skill, confidence,
annotations=None):
utt = BotUtterance(orig_text=orig_text,
text=text,
user=user,
date_time=date_time,
active_skill=active_skill,
confidence=confidence,
annotations=annotations or BotUtterance.annotations.default)
utt.save()
return utt
@classmethod
def get_or_create_user(cls, user_telegram_id=Hashable, user_device_type=Any):
user_query = Human.objects(user_telegram_id__exact=user_telegram_id)
if not user_query:
user = cls.create_new_human(user_telegram_id, user_device_type)
else:
user = user_query[0]
return user
@classmethod
def get_or_create_dialog(cls, user, location, channel_type, should_reset=False):
if should_reset:
bot = cls.create_new_bot()
dialog = cls.create_new_dialog(human=user, bot=bot, location=location,
channel_type=channel_type)
else:
exist_dialogs = Dialog.objects(human__exact=user)
if not exist_dialogs:
bot = cls.create_new_bot()
dialog = cls.create_new_dialog(human=user, bot=bot, location=location,
channel_type=channel_type)
else:
dialog = exist_dialogs[0]
return dialog
@classmethod
def add_human_utterance(cls, dialog: Dialog, user: Human, text: str, date_time: datetime,
annotation: Optional[dict] = None,
hypothesis: Optional[dict] = None,
message_attrs: Optional[dict] = None) -> None:
utterance = cls.create_new_human_utterance(text, user, date_time, annotation,
hypothesis, message_attrs)
dialog.utterances.append(utterance)
dialog.save()
@classmethod
def add_bot_utterance(cls, dialog: Dialog, orig_text: str,
date_time: datetime, active_skill: str,
confidence: float, text: str = None,
annotation: Optional[dict] = None) -> None:
if not text:
text = orig_text
try:
bot = dialog.utterances[-2].user
except IndexError:
bot = cls.create_new_bot()
utterance = cls.create_new_bot_utterance(orig_text, text, bot, date_time, active_skill,
confidence,
annotation)
dialog.utterances.append(utterance)
dialog.save()
@staticmethod
def add_annotation(dialog: Dialog, payload: Dict):
dialog.utterances[-1].annotations.update(payload)
dialog.utterances[-1].save()
@staticmethod
def add_hypothesis(dialog: Dialog, payload: Dict):
hypothesis = {'skill_name': list(payload.keys())[0]}
for h in list(payload.values())[0]:
dialog.utterances[-1].hypotheses.append({**hypothesis, **h})
dialog.utterances[-1].save()
@staticmethod
def add_text(dialog: Dialog, payload: str):
dialog.utterances[-1].text = payload
dialog.utterances[-1].save()
@classmethod
def add_bot_response(cls, dialog: Dialog, payload: Dict, **kwargs):
active_skill = list(payload.values())[0]
human_utterance = dialog.utterances[-1]
active_skill_name = active_skill['skill_name']
text = active_skill['text']
confidence = active_skill['confidence']
cls.add_bot_utterance(dialog, text, datetime.now(), active_skill_name, confidence)
cls.update_human(human_utterance.user, active_skill)
cls.update_bot(dialog.utterances[-1].user, active_skill)
@staticmethod
def do_nothing(*args, **kwargs): # exclusive workaround for skill selector
pass
@staticmethod
def update_human(human: Human, active_skill: Dict):
attributes = active_skill.get('human_attributes', [])
profile = human.profile
if attributes:
for attr_name in attributes:
attr_value = active_skill['human_attributes'][attr_name]
if hasattr(human, attr_name):
setattr(human, attr_name, attr_value)
else:
if attr_name in profile.keys():
profile[attr_name] = attr_value
else:
human.attributes[attr_name] = attr_value
human.save()
@staticmethod
def update_bot(bot: Bot, active_skill: Dict):
attributes = active_skill.get('bot_attributes', [])
if attributes:
for attr_name in attributes:
attr_value = active_skill['bot_attributes'][attr_name]
if hasattr(bot, attr_name):
setattr(bot, attr_name, attr_value)
else:
bot.attributes[attr_name] = attr_value
bot.save()
@classmethod
def add_human_utterance_simple_dict(cls, dialog: Dict, dialog_object: Dialog, payload: Dict,
**kwargs) -> None:
utterance = deepcopy(HUMAN_UTTERANCE_SCHEMA)
utterance['text'] = payload
utterance['date_time'] = str(datetime.now())
utterance['user'] = dialog['human']
utterance['attributes'] = kwargs.get('message_attrs', {})
dialog['utterances'].append(utterance)
@staticmethod
def update_human_dict(human: Dict, active_skill: Dict):
attributes = active_skill.get('human_attributes', {})
for attr_name, attr_value in attributes.items():
if attr_name in human:
human[attr_name] = attr_value
elif attr_name in human['profile']:
human['profile'][attr_name] = attr_value
else:
human['attributes'][attr_name] = attr_value
@staticmethod
def update_bot_dict(bot: Dict, active_skill: Dict):
attributes = active_skill.get('bot_attributes', {})
for attr_name, attr_value in attributes.items():
if attr_name in bot:
bot[attr_name] = attr_value
else:
bot['attributes'][attr_name] = attr_value
@classmethod
def add_bot_utterance_simple_dict(cls, dialog: Dict, dialog_object: Dialog, payload: Dict,
**kwargs) -> None:
rselector_data = list(payload.values())[0]
new_text = rselector_data['text']
new_confidence = rselector_data['confidence']
cls.update_human_dict(dialog['human'], rselector_data)
cls.update_bot_dict(dialog['bot'], rselector_data)
utterance = deepcopy(BOT_UTTERANCE_SCHEMA)
utterance['text'] = new_text
utterance['date_time'] = str(datetime.now())
utterance['active_skill'] = rselector_data['skill_name']
utterance['confidence'] = new_confidence
utterance['user'] = dialog['bot']
dialog['utterances'].append(utterance)
@staticmethod
def add_annotation_dict(dialog: Dict, dialog_object: Dialog, payload: Dict, **kwargs):
dialog['utterances'][-1]['annotations'].update(payload)
@staticmethod
def add_hypothesis_dict(dialog: Dict, dialog_object: Dialog, payload: Dict, **kwargs):
hypothesis = {'skill_name': list(payload.keys())[0]}
for h in list(payload.values())[0]:
dialog['utterances'][-1]['hypotheses'].append({**hypothesis, **h})
@staticmethod
def add_text_dict(dialog: Dict, payload: str):
dialog['utterances'][-1]['text'] = payload
@staticmethod
def save_dialog_dict(dialog: Dict, dialog_object: Dialog, payload=None, **kwargs):
utt_objects = []
for utt in dialog['utterances'][::-1]:
if not utt['id']:
if utt['user']['user_type'] == 'human':
utt_objects.append(HumanUtterance.make_from_dict(utt))
elif utt['user']['user_type'] == 'bot':
utt_objects.append(BotUtterance.make_from_dict(utt))
else:
raise ValueError('unknown user type in the utterance')
else:
break
for utt in utt_objects[::-1]:
utt.save()
dialog_object.utterances.append(utt)
dialog_object.human.update_from_dict(dialog['human'])
dialog_object.bot.update_from_dict(dialog['bot'])
dialog_object.human.save()
dialog_object.bot.save()
dialog_object.save()
|
# -*- coding: utf-8 -*-
# File: test_tracker_adapter.py
""" tracker adapter test module. """
__author__ = 'Otto Hockel <hockel.otto@gmail.com>'
__docformat__ = 'plaintext'
import pytest
import urllib.request
from .conftest import mock_urlopen, _get_url, _put_url, _delete_url
from pymite.adapters import Tracker
def test_tracker_setup(libfactory):
""" Test tracker setup. """
factory_tracker = libfactory.tracker_adapter
assert factory_tracker is not None
tracker = Tracker(factory_tracker.realm, factory_tracker.apikey)
assert tracker.adapter == 'tracker'
def test_tracker_show(monkeypatch, libfactory):
tracker = Tracker(libfactory.tracker_adapter.realm,
libfactory.tracker_adapter.apikey)
# no tracker running
tracker_data = {'tracker': {}}
urlopen_tracker = mock_urlopen(tracker_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_tracker)
show = tracker.show()
assert show == tracker_data['tracker']
# a tracker running
tracker_data = {
'tracker': {
'tracking_time_entry': {
'id': 31337, 'minutes': 42,
'since': '2015-01-02T13:37:37+01:00'}
}
}
urlopen_tracker = mock_urlopen(tracker_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_tracker)
show = tracker.show()
assert show == tracker_data['tracker']
assert show['tracking_time_entry']['minutes'] == 42
assert show['tracking_time_entry']['id'] == 31337
def test_tracker_show_url(monkeypatch, libfactory):
""" is that url built right? """
tracker = libfactory.tracker_adapter
monkeypatch.setattr(Tracker, '_get', _get_url('tracker'))
stop = tracker.show()
assert stop['api'] == 'https://foo.mite.yo.lk/tracker.json'
def test_tracker_start(monkeypatch, libfactory):
tracker = Tracker(libfactory.tracker_adapter.realm,
libfactory.tracker_adapter.apikey)
# let's start a timer on an existing time entry
tracker_data = {
'tracker': {
'tracking_time_entry': {
'since': '2015-01-05T09:42:32+01:00',
'minutes': 42, 'id': 42
}
}
}
urlopen_tracker = mock_urlopen(tracker_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_tracker)
start = tracker.start(42)
assert start == tracker_data['tracker']
def test_tracker_start_url(monkeypatch, libfactory):
""" is that url built right? """
tracker = libfactory.tracker_adapter
monkeypatch.setattr(Tracker, '_put', _put_url('tracker'))
stop = tracker.start(42)
assert stop['api'] == 'https://foo.mite.yo.lk/tracker/42.json'
def test_tracker_stop(monkeypatch, libfactory):
tracker = Tracker(libfactory.tracker_adapter.realm,
libfactory.tracker_adapter.apikey)
tracker_data = {'tracker': {}}
urlopen_tracker = mock_urlopen(tracker_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_tracker)
with pytest.raises(Exception) as excinfo:
tracker.stop(31337)
assert excinfo.message == 'timer not running'
# a timer running
tracker_data = {
'tracker': {
'stopped_time_entry': {
'id': 24, 'minutes': 42}
}
}
urlopen_tracker = mock_urlopen(tracker_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_tracker)
# this test passes as we provide an id
stop = tracker.stop(24)
assert stop == tracker_data['tracker']
assert stop['stopped_time_entry']['minutes'] == 42
assert stop['stopped_time_entry']['id'] == 24
def test_tracker_stop_url(monkeypatch, libfactory):
""" is that url built right? """
tracker = libfactory.tracker_adapter
monkeypatch.setattr(Tracker, '_delete', _delete_url(200))
stop = tracker.stop(42)
assert stop['api'] == 'https://foo.mite.yo.lk/tracker/42.json'
# vim: set ft=python ts=4 sw=4 expandtab :
|
'''
Author: Nick Cao
Date: 2021-06-06 12:04:39
LastEditTime: 2021-06-06 12:12:18
LastEditors: Nick Cao
Description:
FilePath: \LeetCodeLog\src\121. Best Time to Buy and Sell Stock\121.py
'''
class Solution:
def maxProfit(self, prices: List[int]) -> int:
min_price = 10**5
profit = 0
for x in prices:
if x < min_price:
min_price = x
elif x - min_price > profit:
profit = x - min_price
return profit |
# Generated by Django 2.2 on 2019-07-21 14:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20190721_1515'),
]
operations = [
migrations.AlterModelOptions(
name='address',
options={'verbose_name_plural': 'Adreslerim'},
),
]
|
import os.path
import re
import datetime
from unittest.mock import MagicMock
from dicttoxml import dicttoxml
from data_collection.news_spider import NewsSpider
from data_collection.parser_zoo import PARSER_ZOO
from test.util import fake_response_from_file
def test_news_spider_find_news_ref():
response = fake_response_from_file('tass_data/sitemap_hot.xml', url='http://www.example.com/sitemap.xml')
date_depth = datetime.datetime(2021, 10, 26, tzinfo=datetime.timezone(datetime.timedelta(seconds=10800)))
news_spider = NewsSpider([], [], date_depth=date_depth)
expected_url = ['https://tass.ru/ekonomika/12784183', 'https://tass.ru/sport/12784233']
for news_response in news_spider._parse_sitemap(response):
assert news_response.url in expected_url, f"{news_response.url} not expected in url to crawl (date_depth = {date_depth})"
def test_news_spider_use_parser_in_response(tmpdir):
sitemap_rules = []
url_domains = []
for i in range(3):
example_name = f'example{i}'
domain = f'{example_name}.ru'
url_domains.append(domain)
PARSER_ZOO[example_name] = MagicMock()
sitemap_rules.append((domain, example_name))
xml = create_example_sitemap_xml(url_domains)
with open(os.path.join(tmpdir, 'example_sitemap.xml'), 'wb') as fout:
fout.write(xml)
example_request = fake_response_from_file(os.path.join(tmpdir, 'example_sitemap.xml'))
date_depth = datetime.datetime(2021, 10, 26, tzinfo=datetime.timezone(datetime.timedelta(seconds=10800)))
news_spider = NewsSpider([], sitemap_rules, date_depth=date_depth)
for news_response in news_spider._parse_sitemap(example_request):
parser_name = re.findall('example.', news_response.url)[0]
expected_callback = PARSER_ZOO[parser_name].parse
assert news_response.callback == expected_callback, (f"expected {parser_name} parse method")
def create_example_sitemap_xml(url_domains):
sitemap_xml = []
for url_domain, url_ending in zip(url_domains, ['/sport/12784233', '/ekonomika/12784183', '/ekonomika/12784189']):
url_xml = {
'loc': f'https://www.{url_domain}{url_ending}',
'lastmod': '2021-10-27T21:13:58+03:00',
}
sitemap_xml.append({'url': url_xml})
xml = dicttoxml(sitemap_xml, root='urlset')
return xml
|
from norsourceparser.core.models import PosTreeContainer
def test_pos_tree_resolve():
tree = 'pos("S" ( "S" ( "N" ("N" ("Epic"))))) ("V" ("V" ("V" ("Running")))))'
resolved = PosTreeContainer.resolve_pos_tree(tree)
assert len(resolved) == 2
assert len(resolved[0]) == 2
assert resolved[0][0] == 'N'
assert resolved[0][1] == 'Epic'
assert resolved[1][0] == 'V'
assert resolved[1][1] == 'Running'
|
default_app_config = "wagtailblock.blocks.apps.BlocksConfig"
|
"""Add movie table and movie_genre
Revision ID: d11d5359e4ca
Revises:
Create Date: 2020-09-27 23:20:58.300881
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd11d5359e4ca'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('movie',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('ninety_nine_popularity', sa.Float(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('director', sa.String(length=100), nullable=False),
sa.Column('imdb_score', sa.Float(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('is_deleted', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'director', name='move_name_director_name')
)
op.create_index(op.f('ix_movie_director'), 'movie', ['director'], unique=False)
op.create_index(op.f('ix_movie_imdb_score'), 'movie', ['imdb_score'], unique=False)
op.create_index(op.f('ix_movie_name'), 'movie', ['name'], unique=False)
op.create_index(op.f('ix_movie_ninety_nine_popularity'), 'movie', ['ninety_nine_popularity'], unique=False)
op.create_table('movie_genre',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('movie_id', sa.Integer(), nullable=True),
sa.Column('is_deleted', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['movie_id'], ['movie.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_movie_genre_name'), 'movie_genre', ['name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_movie_genre_name'), table_name='movie_genre')
op.drop_table('movie_genre')
op.drop_index(op.f('ix_movie_ninety_nine_popularity'), table_name='movie')
op.drop_index(op.f('ix_movie_name'), table_name='movie')
op.drop_index(op.f('ix_movie_imdb_score'), table_name='movie')
op.drop_index(op.f('ix_movie_director'), table_name='movie')
op.drop_table('movie')
# ### end Alembic commands ###
|
import sys
import multiprocessing
import datetime
import re
import os
import zlib
from datetime import datetime
import tempfile
import subprocess
from array import array
from struct import unpack
from urllib.request import pathname2url
import math
import bisect
import numpy as np
from collections import defaultdict
def contig_lengths(file):
lengths = {}
header = True
fh = open(file)
for line in fh:
if header:
header = False
else:
segs = line.strip().split('\t')
contig = segs[0]
contig = contig.split()[0]
bp = int(segs[4])
lengths[contig] = bp
fh.close()
return lengths
def format_microdiv(file, lengths):
microdiv = {}
contig_data = {}
all_sources = []
fh = open(file)
header = True
for line in fh:
segs = line.strip().split('\t')
if header:
header = False
colnames = segs
#print(colnames)
continue
contig = segs[1]
contig = contig.split()[0]
source = segs[9]
all_sources.append(source)
#We don't need this to be an int.
position = segs[2]
pi = float(segs[23])
a, t, c, g = int(segs[18]), int(segs[19]), int(segs[20]), int(segs[21]),
sub_samp_depth = int(segs[22])
#Needs pos, cov, pi, and subsamp A, T, C, G
if contig not in microdiv:
microdiv[contig] = {}
#Add length
if contig not in contig_data:
#Genome length
contig_data[contig] = {}
contig_data[contig]["length"] = lengths[contig]
if source not in microdiv[contig]:
#List of the data.
microdiv[contig][source] = {}
microdiv[contig][source][position] = [a, t, c, g, pi, sub_samp_depth]
fh.close()
all_sources = list(set(all_sources))
snp_cts = {}
to_remove = []
for ct in microdiv:
if len(microdiv[ct]) < 2:
to_remove.append(ct)
else:
#We don't need to do this work unless the contig is going to actually be used.
snps = []
for source in microdiv[ct]:
#The snps seen on this contig.
my_snps = list(microdiv[ct][source].keys())
#Build the set of total SNPs for all contigs
snps.extend(my_snps)
#This ends up being the length of the total set of SNPs across all samples
#unique positions and add total SNP count
contig_data[ct]["total_snp_count"] = len(list(set(snps)))
#Clean up the data
for ct in to_remove:
#Ensure that the data is removed nicely
microdiv[ct] = None
#Get rid of the row.
microdiv.pop(ct)
return microdiv, contig_data, all_sources
#Return all pairwise FST values for a contig
def fst_pair(one_contig_args):
data, contig_dat, contig_name = one_contig_args[0], one_contig_args[1], one_contig_args[2]
#total_snps = contig["total_snp_count"]
#contig_length = contig["length"]
fsts = {}
sources = list(data.keys())
for i in range(0, len(sources)-1):
#Select query sample
query = sources[i]
#Select query data from the contig's list
query_data = data[query]
#Results holder
fsts[query] = {}
#self is always 0, so we don't need it
#fsts[query][query] = 0
cov1 = contig_dat["length"] - (contig_dat["total_snp_count"] - len(query_data))
pi1 = 0
for p in query_data:
try:
pi1 += query_data[p][4]
except:
pass
pi1 = pi1 / cov1
for j in range(i+1, len(sources)):
target = sources[j]
#Default value of FST is 1, set it here in case there are no shared SNP positions
fsts[query][target] = 1
#Select target data from contig's list
target_data = data[target]
cov2 = contig_dat["length"] - (contig_dat["total_snp_count"] - len(target_data))
pi2 = 0
for p in target_data:
try:
pi2 += target_data[p][4]
except:
pass
pi2 = pi2 / cov2
#Select shared SNP positions
shared_positions = list(set(list(query_data.keys())).intersection(list(target_data.keys())))
#FST is calculated over shared positions
if len(shared_positions) > 0:
#find the adjusted denominator
shared_cov = contig_dat["length"] - (contig_dat["total_snp_count"] - len(shared_positions))
#print("shared cov is", shared_cov)
per_pos_fst = 0
for pos in shared_positions:
#arrays are a, t, c, g subsamples, pi, and sub_samp_depth
q = query_data[pos]
t = target_data[pos]
#Subsample depth for query and target multiplied
cc = q[5]*t[5]
#Pairwise pi values
at = (q[0] * t[1])/cc
ac = (q[0] * t[2])/cc
ag = (q[0] * t[3])/cc
ta = (q[1] * t[0])/cc
tc = (q[1] * t[2])/cc
tg = (q[1] * t[3])/cc
ca = (q[2] * t[0])/cc
ct = (q[2] * t[1])/cc
cg = (q[2] * t[3])/cc
ga = (q[3] * t[0])/cc
gt = (q[3] * t[1])/cc
gc = (q[3] * t[2])/cc
this_pos_fst = at+ac+ag+ta+tc+tg+ca+ct+cg+ga+gt+gc
per_pos_fst += this_pos_fst
try:
fst = per_pos_fst/shared_cov
fst = 1-(( (pi1+pi2)/2) / fst)
if fst < 0:
fst = 0
except:
#Divide by zero happened because SNP positions weren't shared for any pairing
fst = 1
fsts[query][target] = fst
#The final iteration is the last item in source and is a self vs. self, which also always has FST = 0
#fsts[target] = {target: 0}
return fsts, contig_name
def calculate_FST(microdiv, contig_data, all_sources, output, threads):
success = True
args = []
for ct in microdiv:
args.append((microdiv[ct], contig_data[ct], ct,))
fh = open(output, "w")
print("row_samp", "col_samp", "contig", "fst", sep = "\t", file = fh)
pool = multiprocessing.Pool(threads)
#We also need to print the "missing" pairs, so we use the list of sources
for result in pool.map(fst_pair, args):
dat = result[0]
ct_name = result[1]
for i in range(0, len(all_sources) - 1):
query = all_sources[i]
for j in range(i+1, len(all_sources)):
target = all_sources[j]
if query in dat:
if target in dat[query]:
print(query, target, ct_name, dat[query][target], sep = "\t", file = fh)
else:
print(query, target, ct_name, "NA", sep = "\t", file = fh)
else:
print(query, target, ct_name, "NA", sep = "\t", file = fh)
#for query in dat:
# for target in dat[query]:
# print(query, target, ct_name, dat[query][target], sep = "\t", file = fh)
pool.close()
pool.join()
fh.close()
return success
#Main function
def perform_fst(microdiv_file, lengths_file, out_dir, threads):
output_file = os.path.normpath(out_dir + "/MetaPop/10.Microdiversity/fixation_index.tsv")
#Get length of each contig
ct_lens = contig_lengths(lengths_file)
#Read in and format data; also get total SNP counts for valid genome+sample combos
md, ctd, sources = format_microdiv(microdiv_file, ct_lens)
#for ct in md:
# print(ct, md[ct])
#We don't need this anymore
ct_lens = None
#Calculate FST and write to output.
success = calculate_FST(md, ctd, sources, output_file, threads)
if not success:
print("FST not successfully calculated.")
return success
#Testing.
#lens = "/mnt/c/Users/Kenji/Desktop/metapy/toy_data/MetaPop/10.Microdiversity/global_contig_microdiversity.tsv"
#file = "/mnt/c/Users/Kenji/Desktop/metapy/toy_data/MetaPop/10.Microdiversity/global_raw_microdiversity_data_snp_loci_only.tsv"
#perform_fst(file, lens, "test_toy", 6)
|
from django.db import models
# Create your models here.
class signup(models.Model):
name = models.CharField(max_length=100)
ph = models.CharField(max_length=100)
about = models.CharField(max_length=100, default='null')
email = models.EmailField(primary_key=True, max_length=100, default='null')
def __str__(self):
return self.email
class login(models.Model):
email = models.EmailField(primary_key=True, max_length=100)
password = models.CharField(max_length=100)
usertype = models.CharField(max_length=100)
def __str__(self):
return self.email
|
class PositionModel:
def __init__(self, id: int, position: str):
self.id = id
self.position = position
|
from __future__ import print_function
__author__ = ["Tomas Mendez Echenagucia"]
__copyright__ = "Copyright 2020, Design Machine Group - University of Washington"
__license__ = "MIT License"
__email__ = "tmendeze@uw.edu"
__version__ = "0.1.0"
import os
import json
from compas.datastructures import Mesh
# TODO: Make custom object, use mesh only for surfaces
class Zone(object):
def __init__(self):
self.name = ''
self.surfaces = None
def to_json(self, filepath):
with open(filepath, 'w+') as fp:
json.dump(self.data, fp)
@property
def data(self):
data = {'name' : self.name,
'surfaces' : self.surfaces.to_data(),
}
return data
@data.setter
def data(self, data):
surfaces = data.get('surfaces') or {}
self.name = data.get('name') or {}
self.surfaces = ZoneSurfaces.from_data(surfaces)
def add_surfaces(self, mesh):
self.surfaces = ZoneSurfaces.from_data(mesh.data)
self.surfaces.assign_zone_surface_attributes()
@classmethod
def from_data(cls, data):
zone = cls()
zone.data = data
return zone
@classmethod
def from_json(cls, filepath):
with open(filepath, 'r') as fp:
data = json.load(fp)
zone = cls()
zone.data = data
return zone
@classmethod
def from_mesh(cls, mesh, name):
"""
Mesh faces must be provided in the following order:
1 - Floor face
2 - Ceiling face
2-N - Wall faces
"""
zone = cls()
zone.name = name
zone.add_surfaces(mesh)
return zone
class ZoneSurfaces(Mesh):
def __init__(self):
super(Mesh, self).__init__()
self.default_face_attributes.update({'name': None,
'construction':None,
'surface_type': None,
'outside_boundary_condition': None,
})
def __str__(self):
return 'compas_energyplus Zone Surfaces - {}'.format(self.name)
def assign_zone_surface_attributes(self):
self.face_attribute(0, 'name', 'floor')
self.face_attribute(0, 'surface_type', 'Floor')
self.face_attribute(0, 'construction', 'FLOOR')
self.face_attribute(0, 'outside_boundary_condition', 'Adiabatic')
self.face_attribute(1, 'name', 'ceiling')
self.face_attribute(1, 'surface_type', 'Ceiling')
self.face_attribute(1, 'construction', 'ROOF31')
self.face_attribute(1, 'outside_boundary_condition', 'Adiabatic')
self.faces_attribute('name', 'wall', [2, 3, 4, 5])
self.faces_attribute('surface_type', 'Wall', [2, 3, 4, 5])
self.faces_attribute('construction', 'R13WALL', [2, 3, 4, 5])
self.faces_attribute('outside_boundary_condition', 'Outdoors', [2, 3, 4, 5])
|
# enable users to import directly, without importing pretty_json.pretty_json
from pretty_json.pretty_json import *
|
from django.contrib.auth.base_user import BaseUserManager
from django.db import models
from django.core.validators import MinLengthValidator
from django.core.validators import MaxLengthValidator
from django.core.validators import RegexValidator
from django.contrib.auth.models import AbstractUser
class UserManager(BaseUserManager):
"""Define a model manager for User model with no username field."""
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
"""Create and save a User with the given email and password."""
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
"""Create and save a regular User with the given email and password."""
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
extra_fields.setdefault('is_campuspartner', False)
extra_fields.setdefault('is_communitypartner', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
"""Create and save a SuperUser with the given email and password."""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
def create_campuspartner(self, email, password, **extra_fields):
"""Create and save a SuperUser with the given email and password."""
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
if extra_fields.get('is_campuspartner') is not True:
raise ValueError('Campus Partner must have is_campuspartner=True.')
return self._create_user(email, password, **extra_fields)
class User(AbstractUser):
is_campuspartner = models.BooleanField(default=False)
is_communitypartner = models.BooleanField(default=False)
avatar = models.ImageField(default='profile_image/default.jpg', upload_to='profile_image', null=True, blank=True)
username = None
email = models.EmailField(('email address'), unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
class Contact(models.Model):
contacttype_choices = (
('Primary', 'Primary'),
('Secondary', 'Secondary'),
('Other' ,'Other')
)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
work_phone= models.CharField(max_length=14)
cell_phone= models.CharField(max_length=14)
email_id = models.EmailField(unique=True)
contact_type = models.CharField(max_length=15, choices=contacttype_choices, default='Select')
community_partner = models.ForeignKey('partners.CommunityPartner', on_delete=models.CASCADE,null=True,blank=True)
campus_partner = models.ForeignKey('partners.CampusPartner', on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return str(self.email_id)
def __str__(self):
return '%s %s ' % (self.first_name, self.last_name)
class MissionArea (models.Model):
mission_name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
def __str__(self):
return str(self.mission_name)
class HouseholdIncome(models.Model):
id2 = models.IntegerField(null=False, blank=False)
county = models.CharField(max_length=255)
state = models.CharField(max_length=255)
median_income = models.IntegerField(null=False, blank=False)
margin_error = models.IntegerField(null=False, blank=False)
rank = models.IntegerField(null=False, blank=False)
def __str__(self):
return str(self.county)
|
"""
Profile CRUD, import/export, storage, and buildings
"""
import json
from celery.result import AsyncResult
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, Group
from django.core.mail import mail_admins
from django.db import IntegrityError
from django.db.models import FieldDoesNotExist
from django.http import HttpResponse, JsonResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import get_object_or_404, render, redirect, reverse
from django.template import loader
from django.template.context_processors import csrf
from django.utils.html import mark_safe
from herders.decorators import username_case_redirect
from herders.forms import RegisterUserForm, CrispyChangeUsernameForm, DeleteProfileForm, EditUserForm, \
EditSummonerForm, EditBuildingForm, ImportSWParserJSONForm
from herders.models import Summoner, Storage, Building, BuildingInstance
from herders.profile_parser import validate_sw_json
from herders.rune_optimizer_parser import export_win10
from herders.tasks import com2us_data_import
def register(request):
form = RegisterUserForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
if User.objects.filter(username__iexact=form.cleaned_data['username']).exists():
form.add_error('username', 'Username already taken')
elif User.objects.filter(email__iexact=form.cleaned_data['email']).exists():
form.add_error(
'email',
mark_safe(
f'Email already in use. You can <a href="{reverse("password_reset")}">reset your password if you forgot it</a>.'
)
)
else:
new_user = None
new_summoner = None
try:
# Create the user
new_user = User.objects.create_user(
username=form.cleaned_data['username'],
password=form.cleaned_data['password'],
email=form.cleaned_data['email'],
)
new_user.save()
new_user.groups.add(Group.objects.get(name='Summoners'))
new_summoner = Summoner.objects.create(
user=new_user,
summoner_name=form.cleaned_data['summoner_name'],
public=form.cleaned_data['is_public'],
)
new_summoner.save()
# Automatically log them in
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
if user is not None:
if user.is_active:
login(request, user)
return redirect('herders:profile_default', profile_name=user.username)
except IntegrityError as e:
if new_user is not None:
new_user.delete()
if new_summoner is not None:
new_summoner.delete()
form.add_error(None, 'There was an issue completing your registration. Please try again.')
mail_admins(
subject='Error during user registration',
message='{}'.format(e),
fail_silently=True,
)
context = {'form': form}
return render(request, 'herders/register.html', context)
@login_required
def change_username(request):
user = request.user
form = CrispyChangeUsernameForm(request.POST or None)
context = {
'form': form,
}
if request.method == 'POST' and form.is_valid():
try:
user.username = form.cleaned_data['username']
user.save()
return redirect('username_change_complete')
except IntegrityError:
form.add_error('username', 'Username already taken')
return render(request, 'registration/change_username.html', context)
def change_username_complete(request):
return render(request, 'registration/change_username_complete.html')
@username_case_redirect
@login_required
def profile_delete(request, profile_name):
user = request.user
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
form = DeleteProfileForm(request.POST or None)
form.helper.form_action = reverse('herders:profile_delete', kwargs={'profile_name': profile_name})
context = {
'form': form,
}
if is_owner:
if request.method == 'POST' and form.is_valid():
logout(request)
user.delete()
messages.warning(request, 'Your profile has been permanently deleted.')
return redirect('news:latest_news')
return render(request, 'herders/profile/profile_delete.html', context)
else:
return HttpResponseForbidden("You don't own this profile")
@username_case_redirect
@login_required
def following(request, profile_name):
return_path = request.GET.get(
'next',
reverse('herders:profile_following', kwargs={'profile_name': profile_name})
)
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
context = {
'is_owner': is_owner,
'profile_name': profile_name,
'summoner': summoner,
'view': 'following',
'return_path': return_path,
}
return render(request, 'herders/profile/following/list.html', context)
@username_case_redirect
@login_required
def follow_add(request, profile_name, follow_username):
return_path = request.GET.get(
'next',
reverse('herders:profile_default', kwargs={'profile_name': profile_name})
)
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
new_follower = get_object_or_404(Summoner, user__username=follow_username)
is_owner = (request.user.is_authenticated and summoner.user == request.user)
if is_owner:
summoner.following.add(new_follower)
messages.info(request, 'Now following %s' % new_follower.user.username)
return redirect(return_path)
else:
return HttpResponseForbidden()
@username_case_redirect
@login_required
def follow_remove(request, profile_name, follow_username):
return_path = request.GET.get(
'next',
reverse('herders:profile_default', kwargs={'profile_name': profile_name})
)
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
removed_follower = get_object_or_404(Summoner, user__username=follow_username)
is_owner = (request.user.is_authenticated and summoner.user == request.user)
if is_owner:
summoner.following.remove(removed_follower)
messages.info(request, 'Unfollowed %s' % removed_follower.user.username)
return redirect(return_path)
else:
return HttpResponseForbidden()
@username_case_redirect
@login_required
def profile_edit(request, profile_name):
return_path = request.GET.get(
'next',
reverse('herders:profile_default', kwargs={'profile_name': profile_name})
)
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
user_form = EditUserForm(request.POST or None, instance=request.user)
summoner_form = EditSummonerForm(request.POST or None, instance=request.user.summoner)
context = {
'is_owner': is_owner,
'profile_name': profile_name,
'summoner': summoner,
'return_path': return_path,
'user_form': user_form,
'summoner_form': summoner_form,
}
if is_owner:
if request.method == 'POST' and summoner_form.is_valid() and user_form.is_valid():
summoner_form.save()
user_form.save()
messages.info(request, 'Your profile has been updated.')
return redirect(return_path)
else:
return render(request, 'herders/profile/profile_edit.html', context)
else:
return HttpResponseForbidden()
@username_case_redirect
@login_required
def storage(request, profile_name):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
if is_owner:
craft_mats = []
essence_mats = []
monster_mats = []
for field_name in Storage.ESSENCE_FIELDS:
essence_mats.append({
'name': summoner.storage._meta.get_field(field_name).help_text,
'field_name': field_name,
'element': field_name.split('_')[0],
'qty': getattr(summoner.storage, field_name)
})
for field_name in Storage.CRAFT_FIELDS:
craft_mats.append({
'name': summoner.storage._meta.get_field(field_name).help_text,
'field_name': field_name,
'qty': getattr(summoner.storage, field_name)
})
for field_name in Storage.MONSTER_FIELDS:
monster_mats.append({
'name': summoner.storage._meta.get_field(field_name).help_text,
'field_name': field_name if not field_name.startswith('rainbowmon') else 'rainbowmon',
'qty': getattr(summoner.storage, field_name)
})
context = {
'is_owner': is_owner,
'profile_name': profile_name,
'summoner': summoner,
'essence_mats': essence_mats,
'craft_mats': craft_mats,
'monster_mats': monster_mats,
}
return render(request, 'herders/profile/storage/base.html', context=context)
else:
return HttpResponseForbidden()
@username_case_redirect
@login_required
def storage_update(request, profile_name):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
if is_owner and request.POST:
field_name = request.POST.get('name')
try:
new_value = int(request.POST.get('value'))
except ValueError:
return HttpResponseBadRequest('Invalid Entry')
essence_size = None
if 'essence' in field_name:
# Split the actual field name off from the size
try:
field_name, essence_size = field_name.split('.')
size_map = {
'low': Storage.ESSENCE_LOW,
'mid': Storage.ESSENCE_MID,
'high': Storage.ESSENCE_HIGH,
}
essence_size = size_map[essence_size]
except (ValueError, KeyError):
return HttpResponseBadRequest()
try:
Storage._meta.get_field(field_name)
except FieldDoesNotExist:
return HttpResponseBadRequest()
else:
if essence_size is not None:
# Get a copy of the size array and set the correct index to new value
essence_list = getattr(summoner.storage, field_name)
essence_list[essence_size] = new_value
new_value = essence_list
setattr(summoner.storage, field_name, new_value)
summoner.storage.save()
return HttpResponse()
else:
return HttpResponseForbidden()
@username_case_redirect
def buildings(request, profile_name):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return render(request, 'herders/profile/not_found.html')
is_owner = (request.user.is_authenticated and summoner.user == request.user)
context = {
'summoner': summoner,
'is_owner': is_owner,
'profile_name': profile_name,
}
return render(request, 'herders/profile/buildings/base.html', context)
@username_case_redirect
def buildings_inventory(request, profile_name):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return render(request, 'herders/profile/not_found.html')
is_owner = (request.user.is_authenticated and summoner.user == request.user)
all_buildings = Building.objects.all().order_by('name')
building_data = []
total_glory_cost = 0
spent_glory = 0
total_guild_cost = 0
spent_guild = 0
for b in all_buildings:
bldg_data = _building_data(summoner, b)
if b.area == Building.AREA_GENERAL:
total_glory_cost += sum(b.upgrade_cost)
spent_glory += bldg_data['spent_upgrade_cost']
elif b.area == Building.AREA_GUILD:
total_guild_cost += sum(b.upgrade_cost)
spent_guild += bldg_data['spent_upgrade_cost']
building_data.append(bldg_data)
context = {
'is_owner': is_owner,
'summoner': summoner,
'profile_name': profile_name,
'buildings': building_data,
'total_glory_cost': total_glory_cost,
'spent_glory': spent_glory,
'glory_progress': float(spent_glory) / total_glory_cost * 100,
'total_guild_cost': total_guild_cost,
'spent_guild': spent_guild,
'guild_progress': float(spent_guild) / total_guild_cost * 100,
}
return render(request, 'herders/profile/buildings/inventory.html', context)
@username_case_redirect
@login_required
def building_edit(request, profile_name, building_id):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
base_building = get_object_or_404(Building, pk=building_id)
try:
owned_instance = BuildingInstance.objects.get(owner=summoner, building=base_building)
except BuildingInstance.DoesNotExist:
owned_instance = BuildingInstance.objects.create(owner=summoner, level=0, building=base_building)
form = EditBuildingForm(request.POST or None, instance=owned_instance)
form.helper.form_action = reverse(
'herders:building_edit',
kwargs={'profile_name': profile_name, 'building_id': building_id}
)
context = {
'form': form,
}
context.update(csrf(request))
if is_owner:
if request.method == 'POST' and form.is_valid():
owned_instance = form.save()
messages.success(
request,
f'Updated {owned_instance.building.name} to level {owned_instance.level}'
)
response_data = {
'code': 'success',
}
else:
template = loader.get_template('herders/profile/buildings/edit_form.html')
response_data = {
'code': 'error',
'html': template.render(context),
}
return JsonResponse(response_data)
else:
return HttpResponseForbidden()
def _building_data(summoner, building):
percent_stat = building.affected_stat in Building.PERCENT_STATS
total_upgrade_cost = sum(building.upgrade_cost)
if building.area == Building.AREA_GENERAL:
currency = 'glory_points.png'
else:
currency = 'guild_points.png'
try:
instance = BuildingInstance.objects.get(owner=summoner, building=building)
if instance.level > 0:
stat_bonus = building.stat_bonus[instance.level - 1]
else:
stat_bonus = 0
remaining_upgrade_cost = instance.remaining_upgrade_cost()
except BuildingInstance.DoesNotExist:
instance = None
stat_bonus = 0
remaining_upgrade_cost = total_upgrade_cost
except BuildingInstance.MultipleObjectsReturned:
# Should only be 1 ever - use the first and delete the others.
instance = BuildingInstance.objects.filter(owner=summoner, building=building).first()
BuildingInstance.objects.filter(owner=summoner, building=building).exclude(pk=instance.pk).delete()
return _building_data(summoner, building)
return {
'base': building,
'instance': instance,
'stat_bonus': stat_bonus,
'percent_stat': percent_stat,
'spent_upgrade_cost': total_upgrade_cost - remaining_upgrade_cost,
'total_upgrade_cost': total_upgrade_cost,
'upgrade_progress': float(total_upgrade_cost - remaining_upgrade_cost) / total_upgrade_cost * 100,
'currency': currency,
}
@username_case_redirect
@login_required
def import_export_home(request, profile_name):
return render(request, 'herders/profile/import_export/base.html', context={
'profile_name': profile_name,
'view': 'importexport'
})
def _get_import_options(form_data):
return {
'clear_profile': form_data.get('clear_profile'),
'default_priority': form_data.get('default_priority'),
'lock_monsters': form_data.get('lock_monsters'),
'minimum_stars': int(form_data.get('minimum_stars', 1)),
'ignore_silver': form_data.get('ignore_silver'),
'ignore_material': form_data.get('ignore_material'),
'except_with_runes': form_data.get('except_with_runes'),
'except_light_and_dark': form_data.get('except_light_and_dark'),
'except_fusion_ingredient': form_data.get('except_fusion_ingredient'),
'delete_missing_monsters': form_data.get('missing_monster_action'),
'delete_missing_runes': form_data.get('missing_rune_action'),
'ignore_validation_errors': form_data.get('ignore_validation'),
}
@username_case_redirect
@login_required
def import_sw_json(request, profile_name):
errors = []
validation_failures = []
request.session['import_stage'] = None
request.session['import_total'] = 0
request.session['import_current'] = 0
if request.POST:
request.session['import_stage'] = None
request.session.save()
form = ImportSWParserJSONForm(request.POST, request.FILES)
form.helper.form_action = reverse('herders:import_swparser', kwargs={'profile_name': profile_name})
if form.is_valid():
summoner = get_object_or_404(Summoner, user__username=request.user.username)
uploaded_file = form.cleaned_data['json_file']
import_options = _get_import_options(form.cleaned_data)
if form.cleaned_data.get('save_defaults'):
summoner.preferences['import_options'] = import_options
summoner.save()
try:
data = json.load(uploaded_file)
except ValueError as e:
errors.append('Unable to parse file: ' + str(e))
except AttributeError:
errors.append('Issue opening uploaded file. Please try again.')
else:
schema_errors, validation_errors = validate_sw_json(data, request.user.summoner)
if schema_errors:
errors.append(schema_errors)
if validation_errors:
validation_failures += validation_errors
if not errors and (not validation_failures or import_options['ignore_validation_errors']):
# Queue the import
task = com2us_data_import.delay(data, summoner.pk, import_options)
request.session['import_task_id'] = task.task_id
return render(
request,
'herders/profile/import_export/import_progress.html',
context={'profile_name': profile_name}
)
else:
form = ImportSWParserJSONForm(
initial=request.user.summoner.preferences.get('import_options', {})
)
context = {
'profile_name': profile_name,
'form': form,
'errors': errors,
'validation_failures': validation_failures,
'view': 'importexport',
}
return render(request, 'herders/profile/import_export/import_sw_json.html', context)
@username_case_redirect
@login_required
def import_status(request, profile_name):
task_id = request.GET.get('id', request.session.get('import_task_id'))
task = AsyncResult(task_id)
if task:
try:
return JsonResponse({
'status': task.status,
'result': task.result,
})
except:
return JsonResponse({
'status': 'error',
})
else:
raise Http404('Task ID not provided')
@username_case_redirect
@login_required
def export_win10_optimizer(request, profile_name):
summoner = get_object_or_404(Summoner, user=request.user)
export_data = export_win10(summoner)
response = HttpResponse(export_data)
response['Content-Disposition'] = f'attachment; filename={request.user.username}_swarfarm_win10_optimizer_export.json'
return response
|
import numpy as np
from numpy import sin, cos, log
from _routines import ffi, lib
from threading import Thread, Lock
import quaternion
def basic_eval(q, a, b, c, max_iter, shape=None):
if shape is None:
for z in (q, a, b, c):
if hasattr(z, 'shape'):
if shape is None:
shape = z.shape
if shape == ():
shape = z.shape
# Upshape and make unique
z = np.zeros(shape)
q = q + z
a = a + z
b = b + z
c = c + z
result = z
q_buf = ffi.cast("double*", q.ctypes.data)
a_buf = ffi.cast("double*", a.ctypes.data)
b_buf = ffi.cast("double*", b.ctypes.data)
c_buf = ffi.cast("double*", c.ctypes.data)
result_buf = ffi.cast("double*", result.ctypes.data)
lib.smooth_julia(
q_buf, a_buf, b_buf, c_buf,
result_buf, max_iter,
result.size
)
return result
def advanced_eval(q, a, b, c, d, e, f, g, max_iter, exponent=2, shape=None):
if shape is None:
for z in (q, a, b, c, d, e, f, g):
if hasattr(z, 'shape'):
if shape is None:
shape = z.shape
if shape == ():
shape = z.shape
bailout = 256
base = 1 / log(exponent)
offset = max_iter - 1 - log(log(bailout)) * base
result = -np.ones(shape)
# Upshape and make unique
z = np.zeros(shape)
q = q + z
a = a + z
b = b + z
c = c + z
d = d + z
e = e + z
f = f + z
g = g + z
for i in range(max_iter):
r = abs(q)
result[np.logical_and(result < 0, r > bailout)] = i
s = (r <= bailout)
z = q[s]
q[s] = z**exponent + z*a[s] + b[s]*z + c[s] + z*d[s]*z + z*e[s]/z + f[s]*z*z + z*z*g[s]
inside = (result < 0)
result[~inside] = log(log(abs(q[~inside]))) - result[~inside] + offset
result[inside] = 0
return result
def second_order_eval(q0, q1, a, b, c, d, max_iter, exponent=2, shape=None):
if shape is None:
for z in (q0, q1, a, b, c, d):
if hasattr(z, 'shape'):
if shape is None:
shape = z.shape
if shape == ():
shape = z.shape
bailout = 256
base = 1 / log(exponent)
offset = max_iter - 1 - log(log(bailout)) * base
result = -np.ones(shape)
# Upshape and make unique
z = np.zeros(shape)
q0 = q0 + z
q1 = q1 + z
a = a + z
b = b + z
c = c + z
d = d + z
for i in range(max_iter):
r = abs(q0)
result[np.logical_and(result < 0, r > bailout)] = i
s = (r <= bailout)
temp = q0[s] + 0
q0[s] = q0[s]**exponent + q0[s]*d[s]*q1[s] + q1[s]*a[s] + b[s]*q0[s] + c[s]
q1[s] = temp
inside = (result < 0)
result[~inside] = log(log(abs(q0[~inside]))) - result[~inside] + offset
result[inside] = 0
return result
def julia(
width, height, center, zoom, theta, phi,
a, b, c, max_iter, color_map,
anti_aliasing=2, x_scale=1.0, u_scale=1.0, v_scale=0.005, u_samples=1024, v_samples=4,
pseudo_mandelbrot=False, coloring=1, bg_luminance=0.2, attenuation=1.0):
lock = Lock()
num_color_channels = 3
result = np.zeros((num_color_channels, height, width))
zoom = 2**-zoom
u_max = u_scale * zoom
v_max = v_scale * zoom
u_delta = -2*u_scale*zoom / u_samples
v_delta = -2*v_scale*zoom / v_samples
def accumulate_subpixels(offset_x, offset_y):
nonlocal result
x = np.arange(width, dtype='float64') + offset_x
y = np.arange(height, dtype='float64') + offset_y
x = x_scale * (2 * x - width) * zoom / height
y = (2 * y - height) * zoom / height
x, y = np.meshgrid(x, y)
qw = center.w + x*cos(theta) + u_max*sin(theta)
qx = center.x + u_max*cos(theta) - x*sin(theta)
qy = center.y + y*cos(phi) + v_max*sin(phi)
qz = center.z + v_max*cos(phi) - y*sin(phi)
uw = sin(theta) * u_delta
ux = cos(theta) * u_delta
uy = 0
uz = 0
vw = 0
vx = 0
vy = sin(phi) * v_delta
vz = cos(phi) * v_delta
area = qw + 0
red = qx + 0
green = qy + 0
blue = qz + 0
qw_buf = ffi.cast("double*", area.ctypes.data)
qx_buf = ffi.cast("double*", red.ctypes.data)
qy_buf = ffi.cast("double*", green.ctypes.data)
qz_buf = ffi.cast("double*", blue.ctypes.data)
lib.julia(
qw_buf, qx_buf, qy_buf, qz_buf, width*height,
uw, ux, uy, uz, u_samples,
vw, vx, vy, vz, v_samples,
a.w, a.x, a.y, a.z,
b.w, b.x, b.y, b.z,
c.w, c.x, c.y, c.z,
max_iter, pseudo_mandelbrot, coloring,
bg_luminance, attenuation
)
subpixel_image = color_map(area, red, green, blue)
lock.acquire()
result += subpixel_image
lock.release()
ts = []
offsets = np.arange(anti_aliasing) / anti_aliasing
for i in offsets:
for j in offsets:
ts.append(Thread(target=accumulate_subpixels, args=(i, j)))
ts[-1].start()
for t in ts:
t.join()
result /= anti_aliasing**2
return result
def biaxial_eval(q, c0, c1, exponent0, exponent1, max_iter):
escaped = -np.ones(q.shape)
for i in range(max_iter):
escaped[np.logical_and(escaped < 0, abs(q) >= 128)] = i
s = escaped < 0
if i % 2:
q[s] = (q[s]*quaternion.x)**exponent1*(-quaternion.x) + c1
else:
q[s] = q[s]**exponent0 + c0
exponent = max(exponent0, exponent1)
s = escaped > 0
escaped[s] = np.log(np.log(abs(q[s]))) / np.log(exponent) - escaped[s] + max_iter - 1 - np.log(np.log(128)) / np.log(exponent)
escaped[~s] = 0
return escaped
def x_root_mandelbrot_eval(q, c, max_iter):
c = c + q*0
escaped = -np.ones(q.shape)
for i in range(max_iter):
escaped[np.logical_and(escaped < 0, abs(q) >= 128)] = i
s = escaped < 0
if s.any():
centroid = (q[s]-1)*q[s]*(-q[s]-1)
x = (
(quaternion.x*q[s]-1)*centroid*(-quaternion.x*q[s]-1) +
(-quaternion.x*q[s]-1)*centroid*(quaternion.x*q[s]-1)
) * 0.5
q[s] = x - q[s] + c[s]
exponent = 5
s = escaped > 0
escaped[s] = np.log(np.log(abs(q[s]))) / np.log(exponent) - escaped[s] + max_iter - 1 - np.log(np.log(128)) / np.log(exponent)
escaped[~s] = 0
return escaped
def tesseract(q, c, max_iter, exponent=5, sign=-1, counter_exponent=-1):
c = c + q*0
x = quaternion.x**counter_exponent
y = quaternion.y**counter_exponent
z = quaternion.z**counter_exponent
escaped = -np.ones(q.shape)
for i in range(max_iter):
escaped[np.logical_and(escaped < 0, abs(q) >= 128)] = i
s = escaped < 0
if s.any():
p = q[s]
base = (p**exponent + (p*quaternion.x)**exponent*x + (p*quaternion.y)**exponent*y + (p*quaternion.z)**exponent*z ) / 4.0
q[s] = base*sign + c[s]
s = escaped > 0
escaped[s] = np.log(np.log(abs(q[s]))) / np.log(exponent) - escaped[s] + max_iter - 1 - np.log(np.log(128)) / np.log(exponent)
escaped[~s] = 0
return escaped
|
"""
License
-------
Copyright (C) 2021 - David Fernández Castellanos
This file is part of the ADMiniSter package. You can use it, redistribute
it, and/or modify it under the terms of the Creative Commons Attribution
4.0 International Public License.
Summary
-------
Long-term storage of numerical data requires context to make sense of that data.
Adding metadata to the files can partially solve this problem by making the files
self-descriptive. While common plain-text data formats such as JSON and XML can
handle metadata in a natural way, the CSV format, which is specially convenient
for numerical data storage, does not. Thus, different applications or users resort
to their own ways to include metadata in CSV files as a header, making this
metadata format non-universal and potentially laborious to be parsed and loaded
into an application.
This module defines a format to store data and metadata in plain text files, and
provides the tools to create and read the data and the metadata easily.
The format specified here is meant to be self-descriptive and straightforward enough for
most common situations. To this end, the data is stored as CSV, and the metadata is
stored as a header. The header can be composed of an arbitrary number of sections,
and each section stores text or an arbitrary number of keys and values.
The tools provided here allow us to write and load data stored in this format easily.
The CSV data is handled using pandas built-in tools. The metadata in the header
is manipulated using dictionary-like interfaces, thanks to the functions implemented
next.
Format specification
--------------------
1 - The header is defined by all the lines from the top of the file that start with #
2 - After # and a blank space, the next single word defines a header's section name
3 - After the section name, a colon, and a blank space, the section's data is specified
4 - If '=' is present in the section's data:
4.1 - The data represents dictionary-like data, with the format:
key1=value1, key2=value2, ... => dict(key1=value1, key2=value2, ...)
4.2 - Numerical values are automatically identified and parsed. The keys always remain strings
else:
4.3 - The section's data is a single string
5. - A section named dtype specifies the types of the columns following the key-value synthax:
col_name1=col_type1, col_name2=col_type2, ...
6. - After the last header line, the CSV data begins
6.1 - The first line specifies the name of the columns
6.2 - The rest of the lines correspond to the data
7. - If no header section is specified, the file reduces to a plain CSV file
Example
-------
An example of such data file looks as follow:
# name: format_example
# description: with this meaningful description, we will understand the data in the future
# params: mu=1., sigma=0.5, a=3.
# dtype: time_step=int64, value=float64
step_number,value
0,3.72816
1,3.76502
2,4.09007
3,3.41426
4,4.36476
5,3.14854
6,4.38866
7,4.09359
8,3.89782
9,3.66243
10,4.22698
11,4.90460
12,3.37719
13,4.28130
...
To create such file, we would create the data and use the write function:
>>> from ADMiniSter import csv_with_metadata as csv
>>> import numpy as np
>>> import pandas as pd
>>>
>>> mu = 1.
>>> sigma = 0.5
>>> a = 3.
>>>
>>> n = 100
>>> t = np.arange(n)
>>> x = np.random.normal(mu,sigma,n)+a
>>>
>>> df = pd.DataFrame({'time_step': t, 'value': x})
>>>
>>> metadata = dict(name = 'format_example',
>>> description = 'with this meaningful description, we will understand the data in the future',
>>> params = {'mu':mu, 'sigma':sigma, 'a':a}
>>> )
>>>
>>> csv.write(df, '/tmp/test_file.csv', metadata)
To load it, we would use the load function:
>>> df, metata = csv.load('/tmp/test_file.csv')
>>>
>>> df
time_step value
0 0 3.72816
1 1 3.76502
2 2 4.09007
3 3 3.41426
4 4 4.36476
.. ... ...
95 95 4.36909
96 96 3.78041
97 97 3.71782
98 98 3.61544
99 99 4.37941
[100 rows x 2 columns]
>>>
>>> df.dtypes
time_step int64
value float64
dtype: object
>>>
>>> metadata
{'name': 'format_example',
'description': 'with this meaningful description, we will understand the data in the future',
'params': {'mu': 1.0, 'sigma': 0.5, 'a': 3.0},
'dtype': {'time_step': 'int64', 'value': 'float64'}}
"""
import pandas as pd
from ast import literal_eval
def header_line_to_dict(hearder_line):
"""
Parse a line of a text file header into a dictionary.
Parameters
----------
- hearder_line: a line of the header
Returns
-------
A dictionary witih the data stored in the header line.
For more informaiton, see format description at the top.
"""
hearder_line = hearder_line.split(",")
hearder_line = filter(lambda x: x != "", hearder_line)
attrs = {}
for S in hearder_line:
a = S.strip(" ").split("=")
key = a[0]
value_str = a[1]
try:
attrs[key] = literal_eval(value_str)
except ValueError:
# in this case, the string does not represent a number, so we
# keep it as is
attrs[key] = value_str
return attrs
def construct_header(metadata):
"""
Construct a text file header in string format.
For more informaiton, see format description at the top.
Parameters
----------
- metadata: dictionary-like object witih the metadata
Returns
-------
A string with the metadata, ready to be used as header of a text file.
"""
header_str = ""
for name, value in metadata.items():
if name == "filename":
continue
header_str += "# {}:".format(name)
if type(value) == dict:
for (k, v) in value.items():
k = k.replace(" ", "_")
if type(v) == str:
v = v.replace(" ", "_")
value_str = ["{}={}".format(k, v) for k, v in value.items()]
value_str = ("," + " ").join(value_str)
header_str += " {}\n".format(value_str)
else:
header_str += " {}\n".format(value)
return header_str.strip("\n")
def write(df, filename, metadata=None, float_format="%.5f"):
"""
Write the input pandas DataFrame into a CSV file, with a header created from
the input metadata.
Parameters
----------
- df: the pandas DataFrame
- filename: the file to be written
- metadata: the metadata, to be written as a header. It is a dictionary, where each
key corresponds to a section of the header. Each value is a string, a number,
or another dictionary. If it is another dictionary, it will be written as a
header line with format key1=value1, key2=value2, ...
(for more informaiton, see format description at the top)
- float_format: specifies the format with which the float data is written
Example
-------
See documentation at the top
"""
with open(filename, "w", encoding="ascii") as file:
if metadata is not None:
metadata["dtype"] = {k: str(v) for k, v in df.dtypes.items()}
header_str = construct_header(metadata)
file.write(header_str + "\n")
df.to_csv(file, index=False, sep=",", float_format=float_format)
return
def parse_header(filename):
"""
Parse the header of a text file.
Parameters
----------
- filename: the file to read
Returns
-------
A dictionary, where each key corresponds to a line of the header. Each value is
another dictionary with the data of that line.
For more informaiton, see format description at the top.
"""
header = dict()
with open(filename, "r", encoding="ascii") as file:
file_str = file.read().split("\n")
hearder_lines = list()
for line in file_str:
if line.startswith("#"):
hearder_lines.append(line.strip("# "))
for line in hearder_lines:
name = line.split(":")[0].strip(" ")
value = line.split(":")[1].strip(" ")
if "=" in value:
value = header_line_to_dict(value)
header[name] = value
return header
def load(filename):
"""
Load a text file with metadata as header and data in CSV format.
This function allows to load files created with write().
For more informaiton, see format description at the top.
Parameters
----------
- filename: the file to load
Returns
-------
- df: a pandas DataFrame with the CSV data
- metadata: a dictionary whose keys correspond to each line of metadata
composing the header.
Example
-------
See documentation at the top
"""
metadata = parse_header(filename)
metadata["filename"] = filename
if "dtype" in metadata:
dtype = metadata["dtype"]
else:
dtype = None
try:
df = pd.read_csv(filename, comment="#", engine="c", dtype=dtype)
except FileNotFoundError:
print("Problem reading: {}".format(filename))
raise
return df, metadata
|
from datetime import timedelta
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.timezone import now
from help_desk.models import ActiveRange
from help_desk.utils import create_help_desk_groups
from help_desk.tests.factories import HelpDeskUserFactory
@override_settings(LANGUAGE_CODE='en')
class NormalizeLanguageCodeMiddlewareTestCase(TestCase):
def setUp(self):
self.url = reverse(settings.LOGIN_URL)
def test_simple_positive_case(self):
"""test that a request with no accept-language header gets the default language"""
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
self.assertContains(response, '<html lang="en"', status_code=200)
def test_accept_language_overrides_settings(self):
"""test that a simple accept-language header is respected"""
response = self.client.get(self.url, HTTP_ACCEPT_LANGUAGE='ar')
self.assertEqual(200, response.status_code)
self.assertContains(response, '<html lang="ar"', status_code=200)
def test_yandex_accept_language(self):
"""test the actual accept-language header (from the yandex bot) reported in issue 1351"""
http_accept_language = 'ru, uk;q=0.8, be;q=0.8, en;q=0.7, *;q=0.01'
response = self.client.get(self.url, HTTP_ACCEPT_LANGUAGE=http_accept_language)
self.assertEqual(200, response.status_code)
self.assertContains(response, '<html lang="en"', status_code=200)
def test_accept_language_with_subcode_overrides_settings(self):
"""test that an accept-language header with a subcode is respected and normalized"""
response = self.client.get(self.url, HTTP_ACCEPT_LANGUAGE='ar-ly')
self.assertEqual(200, response.status_code)
self.assertContains(response, '<html lang="ar"', status_code=200)
def test_accept_language_with_subcode_not_case_sensitive(self):
"""test that the middleware that examines the accept-language header ignores case"""
response = self.client.get(self.url, HTTP_ACCEPT_LANGUAGE='en-US')
self.assertEqual(200, response.status_code)
self.assertContains(response, '<html lang="en"', status_code=200)
def test_unknown_language_reverts_to_settings(self):
"""test that an accept-language header with an unknown language reverts to the default"""
response = self.client.get(self.url, HTTP_ACCEPT_LANGUAGE='sv')
self.assertEqual(200, response.status_code)
self.assertContains(response, '<html lang="en"', status_code=200)
class GroupExpirationMiddlewareTest(TestCase):
@classmethod
def setUpTestData(cls):
create_help_desk_groups()
cls.password = 'fakepassword'
cls.user = HelpDeskUserFactory(password=cls.password)
cls.active_range = ActiveRange.objects.create(user=cls.user)
def setUp(self):
self.client.login(username=self.user.username, password=self.password)
self.url = reverse('check_registration')
def check_message(self, rsp, expected_message):
"""
Helper method to check that a response has a 200 status code, and that it
has a single django contrib message matching `expected_message`. If
`expected_message` is None, then make sure that there are no messages
being shown to the user.
"""
self.assertEqual(rsp.status_code, 200)
message_list = list(rsp.context['messages'])
if expected_message:
self.assertEqual(len(message_list), 1)
self.assertEqual(expected_message, str(message_list[0]))
else:
self.assertEqual(len(message_list), 0)
def test_after_active_range(self):
yesterday = now().date() - timedelta(days=1)
self.active_range.end_date = yesterday
self.active_range.save()
rsp = self.client.get(self.url, follow=True)
# user is shown an error message ...
self.check_message(rsp, 'This account no longer has staff access.')
self.user.refresh_from_db()
# ... and user is no longer a member of any groups
self.assertFalse(self.user.groups.exists())
def test_in_active_range(self):
tomorrow = now().date() + timedelta(days=1)
self.active_range.end_date = tomorrow
self.active_range.save()
rsp = self.client.get(self.url, follow=True)
# No error message ...
self.check_message(rsp, None)
self.user.refresh_from_db()
# ... and user is still in groups
self.assertTrue(self.user.groups.exists())
def test_at_range_end(self):
self.active_range.end_date = now().date()
self.active_range.save()
rsp = self.client.get(self.url, follow=True)
# No error message ...
self.check_message(rsp, None)
self.user.refresh_from_db()
# ... and user is still in groups
self.assertTrue(self.user.groups.exists())
def test_anonymous_user_not_affected(self):
self.client.logout()
rsp = self.client.get(self.url, follow=True)
# No error message.
self.check_message(rsp, None)
def test_user_without_active_range_not_affected(self):
ActiveRange.objects.unfiltered().delete()
rsp = self.client.get(self.url)
self.assertEqual(rsp.status_code, 200)
# No error message ...
self.check_message(rsp, None)
self.user.refresh_from_db()
# ... and user is still in groups
self.assertTrue(self.user.groups.exists())
|
# -*- coding: UTF-8 -*-
# Sandbox class
# https://github.com/hiromu/arrow-judge/blob/master/src/sandbox.py
import os
import re
import pwd
import json
import stat
import time
import shutil
import signal
import subprocess
import uuid
import configparser
AVAILABLE_DEVICES = ['full', 'null', 'random', 'stderr', 'stdin', 'stdout', 'urandom', 'zero']
CGROUP_SUBSETS = ['cpuacct', 'memory']
AVAILABLE_PATHS = [
'/bin', '/etc', '/lib', '/lib64', '/proc', '/sbin',
'/usr',
'/var/lib']
SYSCTL_PARAMS = ['kernel.sem=0 0 0 0', 'kernel.shmall=0', 'kernel.shmmax=0', 'kernel.shmmni=0', 'kernel.msgmax=0', 'kernel.msgmnb=0', 'kernel.msgmni=0', 'fs.mqueue.queues_max=0']
def execCommand(command):
return subprocess.call(command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
class SandBox():
def __init__(self, directory, user, addition_path=[]):
self.base_dir = os.path.join(directory, str(uuid.uuid4()))
self.sandbox_user = user
cfg=configparser.ConfigParser()
cfg.read('./config.ini', 'UTF-8')
self.addition_path=addition_path
self.mounted=[] # list of real paths
def __enter__(self):
self.mount()
return self
def __exit__(self, ex_type, ex_value, trace):
self.umount()
pass
def __mount_dir(self, path):
if path in self.mounted: # already mounted
return
self.mounted.append(path)
virtual_path = self.base_dir + path
if (not path.startswith('/dev')) and os.path.islink(path): # device should be mounted anytime
shutil.copy(path, virtual_path, follow_symlinks=False)
elif os.path.isdir(path):
virtual_path=self.base_dir + path
if not os.path.exists(virtual_path):
os.makedirs(virtual_path)
if not os.path.isdir(virtual_path):
os.remove(virtual_path)
os.makedirs(virtual_path)
execCommand('mount -n --bind -o ro %s %s' % (path, virtual_path))
else: # file or device
virtual_path=self.base_dir + path
if not os.path.exists(virtual_path):
open(virtual_path, 'a').close()
if os.path.isdir(virtual_path):
os.removedirs(virtual_path)
open(virtual_path, 'a').close()
execCommand('mount -n --bind %s %s' % (path, virtual_path))
def mount(self):
if not os.path.exists(self.base_dir):
os.makedirs(self.base_dir)
# Change permission
uid = pwd.getpwnam(self.sandbox_user)
os.chown(self.base_dir, uid.pw_uid, uid.pw_gid)
# Mount /dev filesystem
if not os.path.exists(self.base_dir + '/dev'):
os.mkdir(self.base_dir + '/dev')
for i in AVAILABLE_DEVICES:
self.__mount_dir('/dev/' + i)
# Mount allowed directory
for i in AVAILABLE_PATHS + self.addition_path:
self.__mount_dir(i)
# Mount tmp directory
path = self.base_dir + '/tmp'
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
os.remove(path)
os.makedirs(path)
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
def umount(self):
# Unmount tmp directory
path = self.base_dir + '/tmp'
if os.path.exists(path):
shutil.rmtree(path)
for i in self.mounted:
path = self.base_dir + i
while True:
if not os.path.islink(path):
while execCommand('umount -l %s' % (path)):
pass
try:
if os.path.islink(path) or os.path.isfile(path):
os.unlink(path)
else:
delete_path = i
while delete_path != '/':
if os.listdir(self.base_dir + delete_path):
break
os.rmdir(self.base_dir + delete_path)
delete_path = os.path.dirname(delete_path)
except OSError as e:
if re.match(r'\[Errno 16\] Device or resource busy', str(e)):
sleep(1)
continue
break
shutil.rmtree(self.base_dir)
def clean(self):
self.umount()
self.mount()
def Popen(self, args, *, as_user=False, **kwargs):
cmd_args='unshare -finpu chroot {dir}'.format(dir=self.base_dir).split(' ')
if as_user:
cmd_args.extend('sudo -u {user}'.format(user=self.sandbox_user).split(' '))
#
cmd_args.extend(args)
return subprocess.Popen(cmd_args, shell=False, **kwargs)
def put_file(self, filepath, content, permission=0o644):
with open(self.base_dir+filepath, 'wb') as f:
f.write(content)
os.chmod(self.base_dir+filepath, permission)
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Stackdriver Trace API."""
from google.cloud.trace._gax import make_gax_trace_api
from google.cloud.client import ClientWithProject
from google.cloud._helpers import _datetime_to_pb_timestamp
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: The project which the client acts on behalf of.
If not passed, falls back to the default inferred from
the environment.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed, falls back to the default
inferred from the environment.
"""
_trace_api = None
def __init__(self, project=None, credentials=None):
super(Client, self).__init__(
project=project, credentials=credentials)
@property
def trace_api(self):
"""Helper for trace-related API calls.
See
https://cloud.google.com/trace/docs/reference/v1/rpc/google.devtools.
cloudtrace.v1
"""
self._trace_api = make_gax_trace_api(self)
return self._trace_api
def patch_traces(self, traces, project_id=None, options=None):
"""Sends new traces to Stackdriver Trace or updates existing traces.
:type traces: dict
:param traces: The traces to be patched in the API call.
:type project_id: str
:param project_id: (Optional) ID of the Cloud project where the trace
data is stored.
:type options: :class:`~google.gax.CallOptions`
:param options: (Optional) Overrides the default settings for this
call, e.g, timeout, retries etc.
"""
if project_id is None:
project_id = self.project
self.trace_api.patch_traces(
project_id=project_id,
traces=traces,
options=options)
def get_trace(self, trace_id, project_id=None, options=None):
"""Gets a single trace by its ID.
:type project_id: str
:param project_id: ID of the Cloud project where the trace data is
stored.
:type trace_id: str
:param trace_id: ID of the trace to return.
:type options: :class:`~google.gax.CallOptions`
:param options: (Optional) Overrides the default settings for this
call, e.g, timeout, retries etc.
:rtype: dict
:returns: A Trace dict.
"""
if project_id is None:
project_id = self.project
return self.trace_api.get_trace(
project_id=project_id,
trace_id=trace_id,
options=options)
def list_traces(
self,
project_id=None,
view=None,
page_size=None,
start_time=None,
end_time=None,
filter_=None,
order_by=None,
page_token=None):
"""Returns of a list of traces that match the filter conditions.
:type project_id: str
:param project_id: (Optional) ID of the Cloud project where the trace
data is stored.
:type view: :class:`google.cloud.gapic.trace.v1.enums.
ListTracesRequest.ViewType`
:param view: (Optional) Type of data returned for traces in the list.
Default is ``MINIMAL``.
:type page_size: int
:param page_size: (Optional) Maximum number of traces to return.
If not specified or <= 0, the implementation selects
a reasonable value. The implementation may return
fewer traces than the requested page size.
:type start_time: :class:`~datetime.datetime`
:param start_time: (Optional) Start of the time interval (inclusive)
during which the trace data was collected from the
application.
:type end_time: :class:`~datetime.datetime`
:param end_time: (Optional) End of the time interval (inclusive) during
which the trace data was collected from the
application.
:type filter_: str
:param filter_: (Optional) An optional filter for the request.
:type order_by: str
:param order_by: (Optional) Field used to sort the returned traces.
:type page_token: str
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Traces that match the specified filter conditions.
"""
if project_id is None:
project_id = self.project
if start_time is not None:
start_time = _datetime_to_pb_timestamp(start_time)
if end_time is not None:
end_time = _datetime_to_pb_timestamp(end_time)
return self.trace_api.list_traces(
project_id=project_id,
view=view,
page_size=page_size,
start_time=start_time,
end_time=end_time,
filter_=filter_,
order_by=order_by,
page_token=page_token)
|
#!/usr/bin/python3
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
import re
import sqlite3
import time
import random
import logging
import copy
import threading
import concurrent.futures
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
#import thread
database = "database/test.db"
linkFile = "data/links.csv"
databaseConnection = sqlite3.connect(database)
dbSql = databaseConnection.cursor();
baseLinkImage = 'https://www.themoviedb.org'
# multithread implementation to drasticly improve speed
# each position contains a dictionary with:
# {cl: [], overview: overview, imageLink: imageLink, actor: []}
collectedData = []
notCollected = []
def CollectDataFor(cl):
logger.info("Start collecting data for:" + cl)
cl = cl.split(',')
linkToInfo = baseLinkImage + '/movie/' + str(cl[2])
counter = 0
triedSleep = 1
while counter < 10:
req = None
webpage = None
try:
req = Request(linkToInfo, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
except Exception as e:
logger.error("HTTP ERROR OCURED: " + str(e))
errorCode = re.findall('\d+', str(e))
if len(errorCode) > 0 and int(errorCode[0]) == 429:
if triedSleep > 2:
logger.critical("CANT MAKE ANY MORE REQUESTS, EXITING")
databaseConnection.close()
exit(0)
triedSleep += 1
logger.INFO("To many request trying to sleep for 10 seconds")
time.sleep(10*triedSleep)
triedSleep = True
else:
counter = 10
else:
webSoup = BeautifulSoup(webpage, "html.parser")
overview = webSoup.find_all("div", {"class": "overview"})
if len(overview) > 0:
overview = re.findall(r'<p>(.*?)</p>', str(overview))[0]
else:
logger.warning("CANT FIND DATA FOR: " + str(cl))
return
imageLink = webSoup.find_all("img", {"class": "poster"})
if len(imageLink) > 0:
imageLink = baseLinkImage + str(re.findall(r'src="(.*?)"', str(imageLink))[0])
else:
imageLink = None
actor = webSoup.find_all("ol", {"class": "people"})
actor = re.findall(r'<p>.*<a.*>(.*?)</a>.*</p>', str(actor))
if len(overview) > 10:
logger.info("Found info for " + str(cl[0]))
infoDict = {}
infoDict["cl"] = cl
infoDict["overview"] = overview
infoDict["imageLink"] = imageLink
infoDict["actor"] = actor
collectedData.append(infoDict)
counter = 10
else:
logger.info("Not found. Repeating id:" + str(cl[0]) + ' - c:' + str(counter))
counter += 1
time.sleep(random.random()*2)
if counter == 10:
logger.warning("CANT FIND DATA FOR: " + str(linkToInfo))
def InsertIntoDatabase():
while collectedData:
cData = collectedData.pop(0)
logger.debug(str(cData))
for tc in cData["actor"]:
if len(tc) > 1:
tcId = dbSql.execute("SELECT id FROM actor WHERE name=?", (tc,)).fetchall()
if len(tcId) == 0:
dbSql.execute("INSERT INTO actor(name) VALUES(?)", (tc,))
tcId = dbSql.execute("SELECT id FROM actor WHERE name=?", (tc,)).fetchall()
dbSql.execute("INSERT INTO movieActor(id_movie, id_actor) VALUES(?,?)", (int(cData["cl"][0]), int(tcId[0][0]),))
dbSql.execute("UPDATE movie SET overview=?, image=? WHERE id=?", (cData["overview"], cData["imageLink"], int(cData["cl"][0]),))
cMovie = dbSql.execute("SELECT title, overview FROM movie WHERE id=?", (int(cData["cl"][0]),)).fetchall()
logger.info("Inserted additional data for movie: " + str(cMovie[0][0]))
def alreadyInDatabase(cl):
cl = cl.split(',')
overview = dbSql.execute("SELECT overview FROM movie WHERE id=?", (cl[0],)).fetchall()
logger.debug(str(overview))
if overview[0][0] is not None and len(overview[0][0]) > 25:
return True
return False
maxThreads = 5
chunkCounter = 0
moviesLeft = 0
with open(linkFile, newline='') as cLink:
moviedbId = re.sub(r'[^\x00-\x7f]',r' ', cLink.read())
moviedbId = moviedbId.splitlines()
moviedbId.pop(0)
moviesLeft = len(moviedbId)
chunks = [moviedbId[x:x+maxThreads] for x in range(0, len(moviedbId), maxThreads)]
try:
for chunk in chunks:
#logger.debug("chunk data:"+str(chunk))
i = 0
while i < len(chunk):
if alreadyInDatabase(chunk[i]):
del chunk[i]
moviesLeft -= 1
else:
i += 1
if len(chunk) > 0:
logger.debug("Still no data:"+str(chunk))
if len(chunk) > 1:
with concurrent.futures.ThreadPoolExecutor(max_workers=maxThreads) as executor:
executor.map(CollectDataFor, chunk)
else:
CollectDataFor(chunk[0])
InsertIntoDatabase()
chunkCounter += len(chunk)
print("Movies left:", moviesLeft - chunkCounter)
except Exception as e:
logger.error("ERROR IN MAIN FUNCTION: " + str(e))
#closing conection
logger.info("Commited to database current changes")
databaseConnection.commit()
databaseConnection.close()
|
import torch
from src.utils import preprocess
from time import gmtime, strftime
def save_model(model, optimizer, src_vocabulary, tgt_vocabulary, epoch, loss, time_elapsed, with_att=False, is_jit=False):
"""
Save trained model, along with source and target languages vocabularies.
:param model: The trained model
:param optimizer: The optimizer used to train the model
:param src_vocabulary: source language vocabulary.
:param tgt_vocabulary: target language vocabulary.
:param epoch: epoch at which the model will be saved.
:param loss: double
The loss of the model.
:param is_jit: boolean
Whether to save the JIT version of the model
:param time_elapsed: int
Number of seconds that the model took to train until that point
:param with_att: Boolean
Save the version of model with attention mechanism
:return: None
"""
# define checkpoint dictionary
checkpoint = {
'epoch': epoch + 1,
'loss': loss,
'time_elapsed': time_elapsed,
'src_vocabulary': src_vocabulary,
'tgt_vocabulary': tgt_vocabulary,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}
label = 'WITH_ATT' if with_att else 'WITHOUT_ATT'
substitute_path = 'ATTENTION_CHECKPOINTS/' if with_att else 'WITHOUT_ATTENTION/'
# Save the checkpoint
filename = f'checkpoints/{substitute_path}CHECKPOINT_{label}__DE__TO__EN__EPOCH_{epoch}__AT__{strftime("%Y_%m_%d__%H_%M_%S", gmtime())}__TRAIN_LOSS__{loss}.pt'
jit_filename = f'checkpoints/{substitute_path}JIT/JIT__{label}_ATT__DE__TO__EN__EPOCH_{epoch}__AT__{strftime("%Y_%m_%d__%H_%M_%S", gmtime())}__TRAIN_LOSS__{loss}.pt'
# save checkpoint
torch.save(checkpoint, filename)
# if the JIT model required to be saved as well
if is_jit:
# save jit mode model
model.eval()
de_sentence = 'Ein kleines Mädchen klettert in ein Spielhaus aus Holz.'
en_sentence = 'A little girl climbing into a wooden playhouse.'
# Trace the model
# traced = torch.jit.trace(model, (preprocess(de_sentence, src_vocabulary)[0], preprocess(en_sentence, tgt_vocabulary)[0]), check_trace=False)
traced = torch.jit.trace(model, (preprocess(de_sentence, src_vocabulary.vocab)[0], preprocess(en_sentence, tgt_vocabulary.vocab)[0]), check_trace=False)
# save traced model
traced.save(jit_filename)
|
from .experiment import Experiment
from .experiment_set import ExperimentSet |
#!/usr/bin/env python
import os
import urllib
import time
import threading
import sys
sys.path.insert(0, os.path.abspath('./plugins/butia'))
from gettext import gettext as _
from plugins.plugin import Plugin
from TurtleArt.talogo import logoerror
from TurtleArt.tapalette import make_palette
from TurtleArt.tapalette import palette_name_to_index
from TurtleArt.tapalette import palette_blocks
from TurtleArt.tapalette import special_block_colors
from TurtleArt.taprimitive import Primitive , ArgSlot, ConstantArg
from TurtleArt.tatype import TYPE_STRING, TYPE_FLOAT, TYPE_NUMBER
from TurtleArt.tautils import debug_output
from TurtleArt.tawindow import block_names
from pybot import pybot_client
#constants definitions
ERROR = -1 # default return value in case of error
MAX_SENSOR_PER_TYPE = 6
RELAY_PORT = {}
MODULOS_CONECTADOS = []
COLOR_RED = ["#FF0000","#808080"]
COLOR_PRESENT = ["#00FF00","#008000"]
COLOR_NOTPRESENT = ["#A0A0A0","#808080"]
class Atyarandu(Plugin):
def __init__(self, parent):
self._parent = parent
self._status = True
self.robot = pybot_client.robot()
self.pollthread = None
self.actualizable = True
self._auto_refresh = False
self.modulos_conectados = []
self._list_connected_device_module = []
def setup(self):
palette = make_palette('atyarandu',
colors=COLOR_PRESENT,
help_string=_('Palette of Renewable Energy'))
palette.add_block('engrefreshagh',
style='basic-style',
label=_('refresh Energy'),
value_block=True,
help_string=\
_('updates the status of the palette and the Energy blocks'),
prim_name='engrefreshagh')
self._parent.lc.def_prim('engrefreshagh', 0,
Primitive(self.prim_refresh,
return_type=TYPE_STRING))
palette.add_block('enggenagh',
style='box-style',
label=_('energy generated'),
value_block=True,
help_string=\
_('Estimated value of renewable energy (MW) to generate in the next hour in Uruguay'),
prim_name='enggenagh')
self._parent.lc.def_prim('enggenagh', 0,
Primitive(self.prim_enggen,
return_type=TYPE_FLOAT))
palette.add_block('engmaxagh',
style='box-style',
label=_('max energy'),
value_block=True,
help_string=\
_('Nominal value of renewable energy (MW) that can be generated in Uruguay'),
prim_name='engmaxagh')
self._parent.lc.def_prim('engmaxagh', 0,
Primitive(self.prim_engmax,
return_type=TYPE_FLOAT))
palette.add_block('engrecagh',
style='box-style',
label=_('recommended energy'),
value_block=True,
help_string=\
_('The preferred value of renewable energy (MW) for use'),
prim_name='engrecagh')
self._parent.lc.def_prim('engrecagh', 0,
Primitive(self.prim_engrec,
return_type=TYPE_FLOAT))
palette.add_block('engoncagh',
style='box-style',
label=_('ON'),
value_block='On',
help_string= _('Power on'),
colors = COLOR_PRESENT,
prim_name='engonagh')
self._parent.lc.def_prim('engonagh', 0,
Primitive(self.prim_on, return_type=TYPE_NUMBER))
palette.add_block('engoffcagh',
style='box-style',
label=_('OFF'),
value_block='Off',
colors = COLOR_RED,
help_string= _('Power off'),
prim_name='engoffagh')
self._parent.lc.def_prim('engoffagh', 0,
Primitive(self.prim_off, return_type=TYPE_NUMBER))
global RELAY_PORT
for m in range(MAX_SENSOR_PER_TYPE):
if m == 0:
ocultar = False
else:
ocultar = True
n = m
x = str(m+1)
nombloque = 'relay' + x + 'agh'
RELAY_PORT[nombloque] = 0
palette.add_block(nombloque,
style='basic-style-1arg',
label=_('relay'),
prim_name=nombloque,
default = 1,
hidden = ocultar,
colors = COLOR_PRESENT,
help_string= _('power on/off the relay'))
self._parent.lc.def_prim(nombloque, 1,
Primitive(self.prim_control,
arg_descs=[ArgSlot(TYPE_NUMBER),ConstantArg(nombloque)]))
special_block_colors[nombloque] = COLOR_NOTPRESENT
################################ Functions ################################
def prim_refresh(self):
#Refresh
self.check_for_device_change(True)
if not(self._auto_refresh):
self._auto_refresh = True
self.refresh()
def prim_enggen(self):
#Returns the estimated value (MW) of renewable energy generation for the next hour in Uruguay
try:
archivo = urllib.urlopen('https://www.fing.edu.uy/cluster/eolica/pron_pot_parques/GUASU.txt')
dato = float (archivo.read())
dato = round(dato, 2)
archivo.close()
except:
dato = ERROR
return dato
def prim_engmax(self):
#Returns the nominal value (MW) of renewable energy that can be generated in Uruguay
try:
archivo = urllib.urlopen('https://www.fing.edu.uy/cluster/eolica/pron_pot_parques/GUASUnom.txt')
dato = float (archivo.read())
dato = round(dato, 2)
archivo.close()
except:
dato = ERROR
return dato
def prim_engrec(self):
#Returns the nominal value (MW) of renewable energy that can be generated in Uruguay
try:
archivo = urllib.urlopen('https://www.fing.edu.uy/cluster/eolica/pron_pot_parques/EOLO.txt')
dato = float (archivo.read())
archivo.close()
dato = round(dato, 2)
except:
dato = ERROR
return dato
def prim_on(self):
#Signal ON relay
return 1
def prim_off(self):
#Signal Off relay
return 0
def prim_control(self, valor, nom):
#Turns RELAY on and off: 1 means on, 0 means off
port = RELAY_PORT[nom]
try:
valor = int(valor)
except:
pass
if valor in [0, 1]:
self.robot.setRelay(port, valor)
else:
msj = _('ERROR: Use 0 or 1, not %s')
raise logoerror(msj % valor)
################################ Turtle calls ################################
def quit(self):
self.actualizable = False
if self.pollthread:
self.pollthread.cancel()
################################ ################################
def check_for_device_change(self, force_refresh=False):
""" if there exists new devices connected or disconections to the butia IO board,
then it change the color of the blocks corresponding to the device """
old_list_connected_device_module = self._list_connected_device_module[:]
self._list_connected_device_module = self.robot.getModulesList()
if force_refresh:
self.update_palette_colors(True)
else:
if not(old_list_connected_device_module == self._list_connected_device_module):
self.update_palette_colors(False)
def update_palette_colors(self, flag):
# if there exists new RELAY connected or disconections to the butia IO board,
# then it change the color of the blocks corresponding
global RELAY_PORT
#print 'refreshing'
regenerar_paleta = False
cant_modulos_conectados = 0
l = self._list_connected_device_module[:]
modulos_nuevos = []
self.modulos_conectados = []
mods = []
for e in l:
t = self.robot._split_module(e)
#t = ('5', 'relay', '0')
#print t
if t[1] == 'relay':
self.modulos_conectados.append(t[0])
modulos_nuevos.append(t[0])
mods.append(t[1] + ":" + t[0])
#print mods
#print 'mod conectados', self.modulos_conectados
modulos_nuevos = self.modulos_conectados[:]
#genera = self.prim_enggen()
#valor = self.prim_engrec()
genera = 68
valor = 60
cont_relay = 0
for blk in self._parent.block_list.list:
if blk.name.endswith('agh'):
#blk.name = 'relay2agh'
if blk.name == 'enggenagh':
if genera >= 0:
if valor >= 0:
if genera >= valor:
special_block_colors[blk.name] = COLOR_PRESENT
else:
special_block_colors[blk.name] = COLOR_RED
else:
special_block_colors[blk.name] = COLOR_NOTPRESENT
else:
special_block_colors[blk.name] = COLOR_NOTPRESENT
elif blk.name == 'engrecagh':
if valor >= 0:
special_block_colors[blk.name] = COLOR_PRESENT
else:
special_block_colors[blk.name] = COLOR_NOTPRESENT
elif blk.name == 'engmaxagh':
if valor >= 0:
special_block_colors[blk.name] = COLOR_PRESENT
else:
special_block_colors[blk.name] = COLOR_NOTPRESENT
elif blk.name[:5] == 'relay':
#print blk.name
#print RELAY_PORT[blk.name]
#if RELAY_PORT[blk.name] == 0:
if len(modulos_nuevos)>0:
RELAY_PORT[blk.name] = modulos_nuevos[0]
tmp = modulos_nuevos[0]
modulos_nuevos.remove(tmp)
tmp = 'relay:' + str(RELAY_PORT[blk.name])
#print 'tmp', tmp
if tmp in mods:
cant_modulos_conectados += 1
if (blk.type == 'proto'):
blk.set_visibility(True)
regenerar_paleta = True
label = 'relay:' + str(RELAY_PORT[blk.name])
special_block_colors[blk.name] = COLOR_PRESENT
else:
label = 'relay'
if(blk.type == 'proto'):
regenerar_paleta = True
#print 'tengo un proto', blk.name
if (RELAY_PORT[blk.name] <> 0) | (blk.name == 'relay1agh'):
#if cant_modulos_conectados == 0:
#if len(modulos_nuevos) == 0:
#cant_modulos_conectados = -1
if not blk.get_visibility():
blk.set_visibility(True)
else:
blk.set_visibility(False)
special_block_colors[blk.name] = COLOR_NOTPRESENT
blk.spr.set_label(label)
block_names[blk.name][0] = label
blk.refresh()
if regenerar_paleta:
index = palette_name_to_index('atyarandu')
if index is not None:
self._parent.regenerate_palette(index)
def refresh(self):
if self._parent.get_init_complete():
if self.actualizable:
self.check_for_device_change()
self.pollthread = threading.Timer(3, self.refresh)
self.pollthread.start()
|
from math import pow
mod=1000000007
t=int(input())
while(t):
t-=1
k,a,b=map(int,input().split())
ans=0
j=1
while(j<=b):
ans+=((b-max(j,a)+1)*j)
j*=(k)
j+=1
print(ans%mod) |
#!/usr/bin/env python3
#
# $ env PYTHONPATH=.. python3 hello_world.py
# Tree: ['Hello', ',', 'World', '!']
#
import textparser
from textparser import Sequence
class Parser(textparser.Parser):
def token_specs(self):
return [
('SKIP', r'[ \r\n\t]+'),
('WORD', r'\w+'),
('EMARK', '!', r'!'),
('COMMA', ',', r','),
('MISMATCH', r'.')
]
def grammar(self):
return Sequence('WORD', ',', 'WORD', '!')
tree = Parser().parse('Hello, World!')
print('Tree:', tree)
|
from .utils import latest_version, validate
from .utils import SchemaNotFoundError, InvalidSchemaError, ValidationError
from .version import __version__
|
"""This module is a collection of classes and
operations for the arithmetic expressions that
will come as input from the end user.
"""
import pycalco
from pycalco.shell import PyCalcoShell
class PyCalco(object):
def __init__(self):
self.version = pycalco.version
self.shell = PyCalcoShell()
def exec(self, cmd):
self.shell.onecmd(cmd)
def run(self):
self.shell.cmdloop()
|
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.stats
import matplotlib.gridspec as gridspec
# mpl.use('TkAgg')
# create base data
np.random.seed(12345)
df = pd.DataFrame([np.random.normal(32000,200000,3650),
np.random.normal(43000,100000,3650),
np.random.normal(43500,140000,3650),
np.random.normal(48000,70000,3650)],
index=[1992,1993,1994,1995])
# transpose, so years become columns
dft = df.T
# function that returns mean, min value 0.95 confidence, max value 0.95 confidence
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
#dft.describe(include='all')
# create a dataframe with rows = years and columns = mean, cimin, cimax
# values form dataframe can be used to feed to the bar chart
# first create a dict to feed the dataframe
years = ['1992', '1993', '1994', '1995']
yearLen = len(years)
index = np.arange(yearLen)
# create a dict {'1993': pd.Series(all the values for 1993), '1994':... }
seriesPerYear = {}
for i in range(0, yearLen):
seriesPerYear[years[i]] = dft.iloc[:,i]
# calculate mean, cimin, cimax and add to the dict
ciDict = {'mean':[],'ciMinIntv':[], 'ciMaxIntv':[]}
for s in seriesPerYear.values():
mn, cimin, cimax = mean_confidence_interval(s)
ciDict['mean'].append(mn)
ciDict['ciMinIntv'].append(mn-cimin)
ciDict['ciMaxIntv'].append(cimax-mn)
# create a dataframe form the dict
# ciMaxIntv ciMinIntv mean
# 1992 6510.938018 6510.938018 33312.107476
# 1993 2264.561291 2264.561291 47743.550969
# 1994 3193.254844 3193.254844 41861.859541
# 1995 4555.329592 4555.329592 39493.304941
cidf = pd.DataFrame(ciDict, index=years)
ciArray = [cidf['ciMinIntv'], cidf['ciMaxIntv']]
# create subplots
fig = plt.figure()
gspec = gridspec.GridSpec(7, 1) # 1 col, 5 rows / put colorbar in row 5
ax = plt.subplot(gspec[:5, 0])
cbax = plt.subplot(gspec[6:, 0])
colorDk = '#333333'
error_kw = {}
cmap = mpl.cm.get_cmap('bwr')
# create color bar
cb = mpl.colorbar.ColorbarBase(cbax, cmap=cmap, orientation='horizontal')
cb.set_label('Relation of the value of interest to the mean')
cb.set_ticks([0.0, 1.0])
cb.set_ticklabels(['Above mean', 'Below the mean'])
cb.ax.xaxis.set_tick_params(labelsize='small')
def updatePlot():
global voi, ax
ax.cla()
print(voi)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_xticks(np.arange(4))
ax.set_xticklabels(years)
ax.set_title("Values per year (click to change value of interest)")
ax.text(-0.35, voi+450, "Value of interest: {0:0.0f}".format(voi), fontsize=8)
plotColors = []
for y in years:
# check if voi is under mean-cmin, over mean+xmax or interpollate between
cimin = cidf.loc[y, 'mean'] - cidf.loc[y, 'ciMinIntv']
cimax = cidf.loc[y, 'mean'] + cidf.loc[y, 'ciMaxIntv']
r = (voi - cimin) / (cimax-cimin)
if r < 0.0:
rgba = cmap(1.0)
plotColors.append(rgba)
elif r > 1.0:
rgba = cmap(0.0)
plotColors.append(rgba)
else: #interpollate
rgba = cmap(1.0-r)
plotColors.append(rgba)
#ax.set_title("{}".format(r))
print('plot colors:', plotColors)
ax.bar(index, cidf['mean'], width=0.6, yerr=ciArray, label='',
color= plotColors,
edgecolor= colorDk,
lw=0.5,
capsize=5,
ecolor=colorDk
)
ax.axhline(voi, color=colorDk)
def clear(event):
print('clearing')
global voi, ax
ax.cla()
def onclick(event):
global voi
voi = event.ydata
updatePlot()
cid = fig.canvas.mpl_connect('button_press_event', clear)
cid = fig.canvas.mpl_connect('button_release_event', onclick)
# set initial value of interest
voi = 40000
updatePlot()
plt.show()
|
import time
from datetime import timedelta
from typing import Union
def usec() -> int:
"""Returns the current time in microseconds since the Unix epoch."""
return int(time.time() * 1000000)
def msec() -> int:
"""Returns the current time in milliseconds since the Unix epoch."""
return int(usec() / 1000)
def sec() -> int:
"""Returns the current time in seconds since the Unix epoch."""
return int(time.time())
def format_duration_us(t_us: Union[int, float]) -> str:
"""Formats the given microsecond duration as a string."""
t_us = int(t_us)
t_ms = t_us / 1000
t_s = t_ms / 1000
t_m = t_s / 60
t_h = t_m / 60
t_d = t_h / 24
if t_d >= 1:
rem_h = t_h % 24
return "%dd %dh" % (t_d, rem_h)
if t_h >= 1:
rem_m = t_m % 60
return "%dh %dm" % (t_h, rem_m)
if t_m >= 1:
rem_s = t_s % 60
return "%dm %ds" % (t_m, rem_s)
if t_s >= 1:
return "%d sec" % t_s
if t_ms >= 1:
return "%d ms" % t_ms
return "%d μs" % t_us
def format_duration_td(value: timedelta, precision: int = 0) -> str:
pieces = []
if value.days:
pieces.append(f"{value.days}d")
seconds = value.seconds
if seconds >= 3600:
hours = int(seconds / 3600)
pieces.append(f"{hours}h")
seconds -= hours * 3600
if seconds >= 60:
minutes = int(seconds / 60)
pieces.append(f"{minutes}m")
seconds -= minutes * 60
if seconds > 0 or not pieces:
pieces.append(f"{seconds}s")
if precision == 0:
return "".join(pieces)
return "".join(pieces[:precision]) |
import numpy as np
import re
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy.sql import exists
from flask import Flask, jsonify
# Database Setup
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# Turn existing database into a new model
Base = automap_base()
# Create Table
Base.prepare(engine, reflect=True)
# Save References to the Table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Set up Flask
app = Flask(__name__)
# Flask Routes
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start (enter YYYY-MM-DD)<br/>"
f"/api/v1.0/start/end (enter YYYY-MM-DD/YYYY-MM-DD)"
)
# Convert query results into a dictionary using `date` as the key and `tobs` as the value
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create a session from Python to the DB
session = Session(engine)
# Query Measurement
results = (session.query(Measurement.date, Measurement.tobs)
.order_by(Measurement.date))
# Create a dictionary
precipitation_date_tobs = []
for each_row in results:
dt_dict = {}
dt_dict["date"] = each_row.date
dt_dict["tobs"] = each_row.tobs
precipitation_date_tobs.append(dt_dict)
return jsonify(precipitation_date_tobs)
# Return a JSON List of Stations from Dataset
@app.route("/api/v1.0/stations")
def stations():
# Create Session from Python to DB
session = Session(engine)
# Query Stations
results = session.query(Station.name).all()
# Convert List of Tuples into Normal list
station_details = list(np.ravel(results))
return jsonify(station_details)
# Query the Dates and Temperature for the Most Active Station for Last Year
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session from Python to DB
session = Session(engine)
# Query Measurements for Latest Data and Calculate Start Date
latest_date = (session.query(Measurement.date)
.order_by(Measurement.date
.desc())
.first())
latest_date_str = str(latest_date)
latest_date_str = re.sub("'|,", "",latest_date_str)
latest_date_obj = dt.datetime.strptime(latest_date_str, '(%Y-%m-%d)')
query_start_date = dt.date(latest_date_obj.year, latest_date_obj.month, latest_date_obj.day) - dt.timedelta(days=366)
# Query Station Names and Observation Counts. Descend Sort and Select Most Active.
q_station_list = (session.query(Measurement.station, func.count(Measurement.station))
.group_by(Measurement.station)
.order_by(func.count(Measurement.station).desc())
.all())
station_hno = q_station_list[0][0]
print(station_hno)
# Return a list of tobs for Year prior to Last Day.
results = (session.query(Measurement.station, Measurement.date, Measurement.tobs)
.filter(Measurement.date >= query_start_date)
.filter(Measurement.station == station_hno)
.all())
# Create JSON results
tobs_list = []
for result in results:
line = {}
line["Date"] = result[1]
line["Station"] = result[0]
line["Temperature"] = int(result[2])
tobs_list.append(line)
return jsonify(tobs_list)
# Calculate `TMIN`, `TAVG`, and `TMAX` for all dates >= to Start Date
@app.route("/api/v1.0/<start>")
def start_only(start):
# Create session from Python to DB
session = Session(engine)
# Date Range
date_range_max = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
date_range_max_str = str(date_range_max)
date_range_max_str = re.sub("'|,", "",date_range_max_str)
print (date_range_max_str)
date_range_min = session.query(Measurement.date).first()
date_range_min_str = str(date_range_min)
date_range_min_str = re.sub("'|,", "",date_range_min_str)
print (date_range_min_str)
# Check for Valid Start Date
valid_entry = session.query(exists().where(Measurement.date == start)).scalar()
if valid_entry:
results = (session.query(func.min(Measurement.tobs)
,func.avg(Measurement.tobs)
,func.max(Measurement.tobs))
.filter(Measurement.date >= start).all())
tmin =results[0][0]
tavg ='{0:.4}'.format(results[0][1])
tmax =results[0][2]
result_printout =( ['Start Date: ' + start,
'The Lowest Temperature was: ' + str(tmin) + ' F',
'The Average Temperature was: ' + str(tavg) + ' F',
'The Highest Temperature was: ' + str(tmax) + ' F'])
return jsonify(result_printout)
return jsonify({"error": f"Date {start} not valid. Date Range is {date_range_min_str} to {date_range_max_str}"}), 404
# Calculate the `TMIN`, `TAVG`, and `TMAX` for Dates between Start Date/End Date.
@app.route("/api/v1.0/<start>/<end>")
def start_end(start, end):
# Create session from Python to DB
session = Session(engine)
# Date Range
date_range_max = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
date_range_max_str = str(date_range_max)
date_range_max_str = re.sub("'|,", "",date_range_max_str)
print (date_range_max_str)
date_range_min = session.query(Measurement.date).first()
date_range_min_str = str(date_range_min)
date_range_min_str = re.sub("'|,", "",date_range_min_str)
print (date_range_min_str)
# Check for valid Start Date
valid_entry_start = session.query(exists().where(Measurement.date == start)).scalar()
# Check for valid End Date
valid_entry_end = session.query(exists().where(Measurement.date == end)).scalar()
if valid_entry_start and valid_entry_end:
results = (session.query(func.min(Measurement.tobs)
,func.avg(Measurement.tobs)
,func.max(Measurement.tobs))
.filter(Measurement.date >= start)
.filter(Measurement.date <= end).all())
tmin =results[0][0]
tavg ='{0:.4}'.format(results[0][1])
tmax =results[0][2]
result_printout =( ['Start Date: ' + start,
'End Date: ' + end,
'The Lowest Temperature was: ' + str(tmin) + ' F',
'The Average Temperature was: ' + str(tavg) + ' F',
'The Highest Temperature was: ' + str(tmax) + ' F'])
return jsonify(result_printout)
if not valid_entry_start and not valid_entry_end:
return jsonify({"error": f"Start {start} and End Date {end} not valid. Date Range is {date_range_min_str} to {date_range_max_str}"}), 404
if not valid_entry_start:
return jsonify({"error": f"Start Date {start} not valid. Date Range is {date_range_min_str} to {date_range_max_str}"}), 404
if not valid_entry_end:
return jsonify({"error": f"End Date {end} not valid. Date Range is {date_range_min_str} to {date_range_max_str}"}), 404
if __name__ == '__main__':
app.run(debug=True) |
# coding: utf-8
from __future__ import unicode_literals
from .ard import ARDMediathekBaseIE
from ..utils import (
ExtractorError,
get_element_by_attribute,
)
class SRMediathekIE(ARDMediathekBaseIE):
IE_NAME = "sr:mediathek"
IE_DESC = "Saarländischer Rundfunk"
_VALID_URL = (
r"https?://sr-mediathek(?:\.sr-online)?\.de/index\.php\?.*?&id=(?P<id>[0-9]+)"
)
_TESTS = [
{
"url": "http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455",
"info_dict": {
"id": "28455",
"ext": "mp4",
"title": "sportarena (26.10.2014)",
"description": "Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ",
"thumbnail": r"re:^https?://.*\.jpg$",
},
"skip": "no longer available",
},
{
"url": "http://sr-mediathek.sr-online.de/index.php?seite=7&id=37682",
"info_dict": {
"id": "37682",
"ext": "mp4",
"title": "Love, Cakes and Rock'n'Roll",
"description": "md5:18bf9763631c7d326c22603681e1123d",
},
"params": {
# m3u8 download
"skip_download": True,
},
},
{
"url": "http://sr-mediathek.de/index.php?seite=7&id=7480",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if (
">Der gewünschte Beitrag ist leider nicht mehr verfügbar.<"
in webpage
):
raise ExtractorError(
"Video %s is no longer available" % video_id, expected=True
)
media_collection_url = self._search_regex(
r'data-mediacollection-ardplayer="([^"]+)"', webpage, "media collection url"
)
info = self._extract_media_info(media_collection_url, webpage, video_id)
info.update(
{
"id": video_id,
"title": get_element_by_attribute("class", "ardplayer-title", webpage),
"description": self._og_search_description(webpage),
"thumbnail": self._og_search_thumbnail(webpage),
}
)
return info
|
# Particles und Confetti
# Nach dem Processing (Java) Sketch von Daniel Shiffmann
# aus: The Nature of Code, o.O., 2012, Seite 166ff
# Dieses Mal mit externer Bibliothek
size(500, 500)
speed(30)
colormode(RGB)
from pvector import PVector
pt = ximport ("particle")
particles = []
confetti = []
def setup():
global loc
loc = PVector(WIDTH/2, 50)
def draw():
global loc
background("#1f2838")
r = random()
if r < 0.5:
particles.append(pt.Particle(loc))
else:
confetti.append(pt.Confetti(loc))
for i in range(len(particles) - 1, 0, -1):
particles[i].run()
if particles[i].isDead():
particles.pop(i)
# print("Particels: ", len(particles))
for i in range(len(confetti) -1 , 0, -1):
confetti[i].run()
confetti[i].theta += 0.7
if confetti[i].theta >= 360:
confetti[i].theta = 0.0
if confetti[i].isDead():
confetti.pop(i)
# print("Konfetti: ", len(confetti))
|
import m1
m1.getdata(10)
|
"""Joint MAP state-path and parameter estimator."""
import itertools
import numpy as np
from ceacoest import optim, rk, utils
class Problem(optim.Problem):
"""Joint MAP state-path and parameter estimation problem."""
def __init__(self, model, t, y, u):
self.model = model
"""Underlying model."""
self.collocation = col = rk.LGLCollocation(model.collocation_order)
"""Collocation method."""
assert np.ndim(t) == 1
self.piece_len = np.diff(t)
"""Normalized length of each collocation piece."""
assert np.all(self.piece_len > 0)
self.npieces = npieces = len(self.piece_len)
"""Number of collocation pieces."""
self.tc = self.collocation.grid(t)
"""Collocation time grid."""
npoints = self.tc.size
ncoarse = len(t)
self.npoints = npoints
"""Total number of collocation points."""
super().__init__()
x = self.register_decision('x', model.nx, npoints)
wp = self.register_decision('wp', (col.n, model.nw), npieces)
p = self.register_decision('p', model.np)
self.register_derived('xp', PieceRavelledVariable(self, 'x'))
self.register_constraint(
'e', model.e, ('xp','wp','up','p', 'piece_len'), model.ne, npieces
)
assert isinstance(y, np.ndarray)
assert y.shape == (ncoarse, self.model.ny)
ymask = np.ma.getmaskarray(y)
kmeas_coarse, = np.nonzero(np.any(~ymask, axis=1))
self.kmeas = kmeas_coarse * self.collocation.ninterv
"""Collocation time indices with active measurements."""
self.y = y[kmeas_coarse]
"""Measurements at the time indices with active measurements."""
self.nmeas = np.size(self.kmeas)
"""Number of measurement indices."""
if callable(u):
u = u(self.tc)
assert isinstance(u, np.ndarray)
assert u.shape == (self.npoints, model.nu)
self.u = u
"""The inputs at the fine grid points."""
self.um = self.u[self.kmeas]
"""The inputs at the measurement points."""
up = np.zeros((npieces, self.collocation.n, model.nu))
up[:, :-1].flat = u[:-1, :].flat
up[:-1, -1] = up[1:, 0]
up[-1, -1] = u[-1]
self.up = up
"""Piece-ravelled inputs."""
self.register_derived('xm', XMVariable(self))
self._register_model_constraint_derivatives('e', ('xp','wp','p'))
self.register_merit('L', model.L, ('y','xm','um','p'), self.nmeas)
self.register_merit(
'J', model.J, ('xp', 'wp', 'up', 'p', 'piece_len'), npieces
)
self._register_model_merit_derivatives('L', ('xm', 'p'))
self._register_model_merit_derivatives('J', ('xp', 'wp', 'p'))
def _register_model_merit_gradient(self, merit_name, wrt_name):
grad = getattr(self.model, f'd{merit_name}_d{wrt_name}')
self.register_merit_gradient(merit_name, wrt_name, grad)
def _register_model_merit_hessian(self, merit_name, wrt_names):
hess_name = utils.double_deriv_name(merit_name, wrt_names)
val = getattr(self.model, f'{hess_name}_val')
ind = getattr(self.model, f'{hess_name}_ind')
self.register_merit_hessian(merit_name, wrt_names, val, ind)
def _register_model_merit_derivatives(self, merit_name, wrt_names):
for wrt_name in wrt_names:
self._register_model_merit_gradient(merit_name, wrt_name)
for comb in itertools.combinations_with_replacement(wrt_names, 2):
self._register_model_merit_hessian(merit_name, comb)
def _register_model_constraint_jacobian(self, constraint_name, wrt_name):
val = getattr(self.model, f'd{constraint_name}_d{wrt_name}_val')
ind = getattr(self.model, f'd{constraint_name}_d{wrt_name}_ind')
self.register_constraint_jacobian(constraint_name, wrt_name, val, ind)
def _register_model_constraint_hessian(self, constraint_name, wrt_names):
hess_name = utils.double_deriv_name(constraint_name, wrt_names)
val = getattr(self.model, f'{hess_name}_val')
ind = getattr(self.model, f'{hess_name}_ind')
self.register_constraint_hessian(constraint_name, wrt_names, val, ind)
def _register_model_constraint_derivatives(self, cons_name, wrt_names):
for wrt_name in wrt_names:
self._register_model_constraint_jacobian(cons_name, wrt_name)
for comb in itertools.combinations_with_replacement(wrt_names, 2):
self._register_model_constraint_hessian(cons_name, comb)
def variables(self, dvec):
"""Get all variables needed to evaluate problem functions."""
return {'y': self.y, 'um': self.um, 'u': self.u, 'up': self.up,
'piece_len': self.piece_len, **super().variables(dvec)}
def set_decision_item(self, name, value, dvec):
self._set_decision_item(name, value, self.model.symbol_index_map, dvec)
def set_defect_scale(self, name, value, cvec):
component_name, index = self.model.symbol_index_map[name]
if component_name != 'x':
raise ValueError(f"'{name}' is not a component of the state vector")
e = self.constraints['e'].unpack_from(cvec)
e = e.reshape((self.npieces, self.collocation.ninterv, self.model.nx))
e[(..., *index)] = value
class PieceRavelledVariable:
def __init__(self, problem, var_name):
self.p = problem
self.var_name = var_name
@property
def var(self):
return self.p.decision[self.var_name]
@property
def nvar(self):
return self.var.shape[1]
@property
def shape(self):
return (self.p.npieces, self.p.collocation.n, self.nvar)
@property
def tiling(self):
return self.p.npieces
def build(self, variables):
v = variables[self.var_name]
assert v.shape == self.var.shape
vp = np.zeros(self.shape)
vp[:, :-1].flat = v[:-1, :].flat
vp[:-1, -1] = vp[1:, 0]
vp[-1, -1] = v[-1]
return vp
def add_to(self, destination, value):
vp = np.asarray(value)
assert vp.shape == self.shape
v = np.zeros(self.var.shape)
v[:-1].flat = vp[:, :-1].flatten()
v[self.p.collocation.n-1::self.p.collocation.n-1] += vp[:, -1]
self.var.add_to(destination, v)
def expand_indices(self, ind):
npieces = self.p.npieces
increments = self.p.collocation.ninterv * self.nvar
return ind + np.arange(npieces)[:, None] * increments + self.var.offset
class XMVariable:
def __init__(self, problem):
self.p = problem
@property
def tiling(self):
return self.p.nmeas
@property
def shape(self):
return (self.p.nmeas, self.p.model.nx)
def build(self, variables):
x = variables['x']
return x[self.p.kmeas]
def add_to(self, destination, value):
assert np.shape(value) == self.shape
x = self.p.decision['x'].unpack_from(destination)
x[self.p.kmeas] += value
def expand_indices(self, ind):
x_offset = self.p.decision['x'].offset
ind = np.asarray(ind, dtype=int)
return ind + x_offset + self.p.kmeas[:, None] * self.p.model.nx
|
import os
from domainbed.lib import misc
import numpy as np
import torch
import torch.nn.functional as F
import json
def argmax(iterable):
return max(enumerate(iterable), key=lambda x: x[1])[0]
def argmin(iterable):
return min(enumerate(iterable), key=lambda x: x[1])[0]
def percent(number):
number = round(number, 4) * 100
return f"{number:.2f}\\%"
def percent_no(number):
number = round(number, 4) * 100
return f"{number:.2f}"
def round_l(num_list, digit=4):
return [round(num, digit) for num in num_list]
def loss_gap(num_domains, env_loaders, model, device, whole=False):
''' compute gap = max_i loss_i(h) - min_j loss_j(h), return i, j, and the gap for the whole dataset'''
''' model = h, index are from 1 ... n'''
max_env_loss, min_env_loss = -np.inf, np.inf
max_index = min_index = 0
index = 0
for index, loader in enumerate(env_loaders):
loss = misc.loss(algorithm, loader, device)
# print("index: ", index, "loss: ", loss)
if index == 0 and not whole:
continue
if loss > max_env_loss:
max_env_loss = loss
max_index = index
if loss < min_env_loss:
min_env_loss = loss
min_index = index
return max_index, min_index, max_env_loss, min_env_loss
def loss_gap_batch(num_domains, minibatches, model, device):
''' compute gap = max_i loss_i(h) - min_j loss_j(h), return i, j, and the gap for a single batch'''
''' because we will compute sup_{h'} (rather than min), the return value is the opposite'''
''' model = h, index are from 1 ... n, minibatches is a list of iterators for a given batch'''
max_env_loss, min_env_loss = -np.inf, np.inf
for index, (x, y) in enumerate(minibatches):
x = x.to(device)
y = y.to(device)
p = model.predict(x)
batch = torch.ones(len(x)).to(device)
total = batch.sum().item()
loss = F.cross_entropy(p, y) * len(y) / total
if loss > max_env_loss:
max_env_loss = loss
if loss < min_env_loss:
min_env_loss = loss
return min_env_loss - max_env_loss
def distance(h1, h2):
''' distance of two networks (h1, h2 are classifiers)'''
dist = 0.
for param in h1.state_dict():
h1_param, h2_param = h1.state_dict()[param], h2.state_dict()[param]
dist += torch.norm(h1_param - h2_param) ** 2 # use Frobenius norms for matrices
return torch.sqrt(dist)
def proj(delta, adv_h, h):
''' return proj_{B(h, \delta)}(adv_h), Euclidean projection to Euclidean ball'''
''' adv_h and h are two classifiers'''
dist = distance(adv_h, h)
if dist <= delta:
return adv_h
else:
ratio = delta / dist
for param_h, param_adv_h in zip(h.parameters(), adv_h.parameters()):
param_adv_h.data = param_h + ratio * (param_adv_h - param_h)
# print("distance: ", distance(adv_h, h))
return adv_h
def loss_acc(num_domains, env_loaders, model, device):
'''evaluate a tuple of losses (of each domain) and a tuple of accs (of each domain)'''
losses, accs = [], []
for i in range(num_domains):
print(f'domain {i}')
env = env_loaders[i]
loss = misc.loss(model, env, device)
acc = misc.accuracy(model, env, None, device)
losses.append(loss)
accs.append(acc)
return losses, accs
|
# Generated by Django 3.2.6 on 2021-11-30 20:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('offers', '0008_auto_20211130_1718'),
]
operations = [
migrations.AlterField(
model_name='fiiedit',
name='data_base',
field=models.DateField(blank=True, null=True, verbose_name='Data-base'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.