content
stringlengths 5
1.05M
|
|---|
lista = list()
def l(): print(30*'-')
l()
print('s para sair em nome')
while 1:
nome = str(input('nome: ').lower())
if nome =='s': break
nota1 = float(input('nota 1: '))
nota2 = float(input('nota 2: '))
media = (nota1 + nota2) / 2
lista.append([nome, [nota1, nota2], media])
l()
print(f'{"No.":<5}{"nome":<10}{"media":>10}')
for i, p in enumerate(lista):
print(f'{i:<5}{p[0]:<10}{p[2]:>10}')
l()
print('digite s para sair\n qual aluno quer ver as notas')
while 1:
op = input('n:')
if op == 's':
break
l()
print(f'nome: {lista[int(op)][0].title()}\nnotas: {lista[int(op)][1]}')
|
# -*- coding: utf-8 -*-
"""
.. module: byroapi.cli
:synopsis: CLI interface
.. moduleauthor:: "Josef Nevrly <josef.nevrly@gmail.com>"
"""
import sys
import pkg_resources
import asyncio
import logging
import click
from onacol import ConfigManager, ConfigValidationError
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from . import __version__
from .byroapi import ByroApi
DEFAULT_CONFIG_FILE = pkg_resources.resource_filename(
"byroapi", "default_config.yaml")
logger = logging.getLogger("byroapi")
def global_exception_handler(loop, context):
msg = f"{context.get('message', '')} : {context.get('exception', '')} @ " \
f"{context.get('future','')}"
logger.error("Exception caught at global level: %s", msg)
@click.command(context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True
))
@click.option("--config", type=click.Path(exists=True), default=None,
help="Path to the configuration file.")
@click.option("--get-config-template", type=click.File("w"), default=None,
help="Write default configuration template to the file.")
@click.option("--fill-form", type=click.File("rb"), default=None,
help="Fill form using command line. Input is a yaml file.")
@click.option("--output", type=click.File("wb"), default="-")
@click.version_option(version=__version__)
@click.pass_context
def main(ctx, config, get_config_template, fill_form, output):
"""Console script for byroapi."""
# Instantiate config_manager
config_manager = ConfigManager(
DEFAULT_CONFIG_FILE,
env_var_prefix="byroapi",
optional_files=[config] if config else []
)
# Generate configuration for the --get-config-template option
# Then finish the application
if get_config_template:
config_manager.generate_config_example(get_config_template)
sys.exit(0)
# Load (implicit) environment variables
config_manager.config_from_env_vars()
# Parse all extra command line options
config_manager.config_from_cli_args(ctx.args)
# Validate the config
try:
config_manager.validate()
except ConfigValidationError as cve:
click.secho("<----------------Configuration problem---------------->",
fg='red')
# Logging is not yet configured at this point.
click.secho(str(cve), fg='red', err=True)
sys.exit(1)
# Asyncio loop setup
loop = asyncio.get_event_loop()
loop.set_exception_handler(global_exception_handler)
logging.basicConfig(level=getattr(
logging, config_manager.config['general']['log_level']),
format="%(asctime)s.%(msecs)03d [%(name)s][%(levelname)s] %(message)s",
datefmt="%H:%M:%S",
stream=sys.stdout)
logging.getLogger("aiohttp").setLevel(logging.WARNING)
# Setup your main classes here
byroapi = ByroApi(config_manager.config, loop=loop)
# Run statically for CLI fill-form
if fill_form is not None:
try:
form_payload = yaml.load(fill_form, Loader=Loader)
byroapi.fill_form_to_file(form_payload, output)
sys.exit(0)
except Exception as e:
logger.exception(e)
sys.exit(1)
try:
click.secho("Running byroapi application ..", fg='green')
# Start the server
byroapi.start()
loop.run_forever()
except KeyboardInterrupt:
click.secho("<--------------- Shutting down ------------------->",
fg='red')
except Exception as e:
logger.exception(e)
finally:
try:
# Stop and cleanup your app here
byroapi.stop()
loop.run_until_complete(asyncio.sleep(1.0))
loop.close()
except Exception as e:
logger.exception("Error occured during shutdown : %s", e)
click.secho("<--------------- Stopped ------------------->", fg='red')
sys.exit(0)
if __name__ == "__main__":
main() # pragma: no cover
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import torch
import torch.utils.data
from opts import opts
from model.model import create_model, load_model, save_model
from model.data_parallel import DataParallel
from logger import Logger
from dataset.dataset_factory import get_dataset
from trainer import Trainer
from main import get_optimizer
if __name__ == '__main__':
opt = opts().parse()
torch.manual_seed(opt.seed)
Dataset = get_dataset(opt.dataset)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
path_1 = '/mnt/3dvision-cpfs/zhuoyu/CenterTrack/exp/ddd/nu_3d_det_uni/model_last.pth'
path_2 = '/mnt/3dvision-cpfs/zhuoyu/CenterTrack/exp/ddd/nu_3d_det_fix_param/model_last.pth'
model_1 = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
model_2 = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
optimizer = get_optimizer(opt, model_1)
model_1, _, _ = load_model(
model_1, path_1, opt, optimizer)
model_2, _, _ = load_model(
model_2, path_2, opt, optimizer)
for p1, p2 in zip(model_1.parameters(), model_2.parameters()):
if p1.data.ne(p2.data).sum() > 0:
print(False)
else:
print(True)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.securitycenter.v1',
manifest={
'Folder',
},
)
class Folder(proto.Message):
r"""Message that contains the resource name and display name of a
folder resource.
Attributes:
resource_folder (str):
Full resource name of this folder. See:
https://cloud.google.com/apis/design/resource_names#full_resource_name
resource_folder_display_name (str):
The user defined display name for this
folder.
"""
resource_folder = proto.Field(
proto.STRING,
number=1,
)
resource_folder_display_name = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
import re
import secrets
import string
import unicodedata
from flask import request
import boto3
from .errors import ITSInvalidImageFileError
from .settings import MIME_TYPES, NAMESPACES
def get_redirect_location(namespace, query, filename):
config = NAMESPACES[namespace]
redirect_url = "{url}?{query_param}={scheme}://{host}/{namespace}/{path}".format(
url=config["url"],
query_param=config["query-param"],
scheme=request.scheme,
host=request.host,
namespace=namespace,
path=filename,
)
ext = query.pop("format", None)
for key, val in query.items():
redirect_url = redirect_url + ".{key}.{val}".format(key=key, val=val)
if ext:
redirect_url = redirect_url + "." + ext
return redirect_url
def validate_image_type(image):
if image.format.upper() not in MIME_TYPES:
raise ITSInvalidImageFileError("invalid image file")
return image
def upload_to_s3(image_file, bucket_name, key):
client = boto3.client('s3')
client.put_object(
Body=image_file,
Bucket=bucket_name,
Key=key,
ACL='public-read',
)
def slugify_filename(filename: str) -> str:
slug = unicodedata.normalize('NFKD', filename).encode('ascii', 'ignore')\
.decode('ascii')
name, extension = slug.strip().lower().rsplit('.', 1)
name = re.sub(r'[^\w\s-]', '', name)
name = re.sub(r'[-\s]+', '-', name)
return f"{name}-{get_random_string(6)}.{extension.lower()}"
def get_random_string(length):
return ''.join(secrets.choice(string.ascii_lowercase + string.digits)
for _ in range(length))
|
from mamba import description, before, context, it, after
from doublex import Spy
from doublex_expects import have_been_called_with
from expects import expect, have_keys, be_a, have_len, be_above_or_equal
from os import getpid
from infcommon import logger
from infrabbitmq import factory
from infrabbitmq.rabbitmq import (
RabbitMQQueueEventProcessor,
TOPIC_EXCHANGE_TYPE,
)
# --------------------------------------------------
# Avoid pika logging
factory.configure_pika_logger_to_error()
# --------------------------------------------------
A_TOPIC_EXCHANGE_NAME = 'a_topic_exchange_name'
A_QUEUE_NAME = 'a_queue_name_{}'.format(getpid())
A_LIST_OF_TOPICS = '#'
A_NETWORK = 'a_network'
AN_EVENT_NAME = 'an_event_name'
AN_EVENT_DATA = 'an_event_data'
with description('RabbitMQEventPublisher integration test: Feature publish') as self:
with before.each:
self.sut_event_publisher = factory.rabbitmq_event_publisher(exchange=A_TOPIC_EXCHANGE_NAME)
self.rabbitmq_client = factory.no_singleton_rabbitmq_client()
self.event_processor = Spy()
self.event_builder = factory.raw_event_builder()
self.logger = logger
self.sut_event_processor = RabbitMQQueueEventProcessor(queue_name=A_QUEUE_NAME,
event_processor=self.event_processor,
rabbitmq_client=self.rabbitmq_client,
exchange=A_TOPIC_EXCHANGE_NAME,
list_of_topics=A_LIST_OF_TOPICS,
event_builder=self.event_builder,
logger=self.logger,
exchange_type=TOPIC_EXCHANGE_TYPE,
queue_options={},
exchange_options={}
)
with after.each:
self.rabbitmq_client.queue_unbind(queue_name=A_QUEUE_NAME,
exchange=A_TOPIC_EXCHANGE_NAME,
routing_key=A_LIST_OF_TOPICS[0])
self.rabbitmq_client.queue_delete(queue_name=A_QUEUE_NAME)
self.rabbitmq_client.exchange_delete(exchange=A_TOPIC_EXCHANGE_NAME)
with context('publish and processing an event'):
with it('calls the processor with event object data'):
self.sut_event_publisher.publish(AN_EVENT_NAME, A_NETWORK, data=AN_EVENT_DATA)
self.sut_event_publisher.publish(AN_EVENT_NAME, A_NETWORK, data=AN_EVENT_DATA)
self.sut_event_processor.process_body(max_iterations=1)
expect(self.event_processor.process).to(have_been_called_with(have_keys(name=AN_EVENT_NAME,
network=A_NETWORK,
data=AN_EVENT_DATA,
timestamp=be_a(float),
timestamp_str=have_len(be_above_or_equal(1))
)
).once
)
|
import numpy as np
import numpy.random as npr
import collections
from itertools import product
from scipy.stats import multivariate_normal
def split_train_test_multiple(
datas, datas2, chunk=5000, train_frac=0.7, val_frac=0.15, seed=0, verbose=True
):
"""
Split elements of lists (data and datas2) in chunks along the first
dimension and assigns the chunks randomly to train, validation
and test sets.
The first dimensions of datas and datas2 should be the same for each
element of the lists.
Input:
______
:param datas: list of arrays [T, D]
D can be any dimension
:param datas2: list of arrays [T, L]
L can be any dimension
:param chunk: int
length of chunks to split the elements of datas and datas2 in.
:param train_frac: float
fraction of chunks to be used in training set
:param val_frac: float
fraction of chunks to be used in validation set
:param seed: int
seed to pass to numpy for random shuffle
:return:
(train_ys, train_xs): tuple of lists of arrays
train_ys: list of arrays
each array is a chunk of datas assigned to the training set
train_xs: list of arrays
each array is a chunk of datas2 assigned to the training set
(val_ys, val_xs): tuple of lists of arrays
val_ys: list of arrays
each array is a chunk of datas assigned to the validation set
val_xs: list of arrays
each array is a chunk of datas2 assigned to the validation set
(test_ys, test_xs): tuple of lists of arrays
test_ys: list of arrays
each array is a chunk of datas assigned to the testing set
test_xs: list of arrays
each array is a chunk of datas2 assigned to the testing set
"""
# datas T x D
# datas2 T x N
npr.seed(seed)
all_ys = []
all_xs = []
all_choices = []
for y, x in zip(datas, datas2):
T = y.shape[0]
C = 0
for start in range(0, T, chunk):
stop = min(start + chunk, T)
all_ys.append(y[start:stop])
all_xs.append(x[start:stop])
C += 1
# assign some of the data to train, val, and test
choices = -1 * np.ones(C)
choices[: int(train_frac * C)] = 0
choices[int(train_frac * C) : int((train_frac + val_frac) * C)] = 1
choices[int((train_frac + val_frac) * C) :] = 2
# shuffle around the choices
choices = choices[npr.permutation(C)]
all_choices.append(choices)
all_choices = np.concatenate(all_choices)
get_arr = lambda arr, chc: [x for x, c in zip(arr, all_choices) if c == chc]
train_ys = get_arr(all_ys, 0)
train_xs = get_arr(all_xs, 0)
val_ys = get_arr(all_ys, 1)
val_xs = get_arr(all_xs, 1)
test_ys = get_arr(all_ys, 2)
test_xs = get_arr(all_xs, 2)
if verbose:
print("Len of train data is {}".format(len(train_ys)))
print("Len of val data is {}".format(len(val_ys)))
print("Len of test data is {}".format(len(test_ys)))
print(list(map(len, train_ys)))
print(list(map(len, val_ys)))
print(list(map(len, test_ys)))
assert len(train_ys) >= 1
assert (len(val_ys) >= 1) | (len(test_ys) >= 1)
return (train_ys, train_xs), (val_ys, val_xs), (test_ys, test_xs)
def create_schedule(param_ranges, verbose=False):
"""
Create schedule for experiment given dictionary of
parameters. Each configuration in the schedule is
a combination of the parameters (keys) and their values
Inputs:
_______
:param param_ranges: dictionary of parameters
{'param1': range(0, 10, 2), 'param2': 1, ...}
The value of each key can be an int, float, list or array.
:param verbose: bool
Flag to print each configuration in schedule
:return:
schedule: list of configuration
each configuration is an experiment to run
"""
#Args:
# param_ranges: dict
#Returns:
# Schedule containing all possible combinations of passed parameter values.
param_lists = []
# for each parameter-range pair ('p': range(x)),
# create a list of the form [('p', 0), ('p', 1), ..., ('p', x)]
for param, vals in param_ranges.items():
if isinstance(vals, str):
vals = [vals]
# if a single value is passed for param...
elif not isinstance(vals, collections.Iterable):
vals = [vals]
param_lists.append([(param, v) for v in vals])
# permute the parameter lists
schedule = [dict(config) for config in product(*param_lists)]
print('Created schedule containing {} configurations.'.format(len(schedule)))
if verbose:
for config in schedule:
print(config)
print('-----------------------------------------------')
return schedule
def multivariate_gaussian_fit(data, return_log=True):
"""
Fit Multivariate Gaussian distribution to data
Outputs MV class
:param data: list of N arrays of dimensions T x D
note arrays can have different first dimension (T)
but must have the same second dimension (D)
:return: MV class froms scipy
"""
# singular matrix will not be good w different datasets
# test_data = (# series x # Dobs) x T
# test_aus = np.vstack(test_data).T # D x TN
test_aus = np.concatenate(data, 0) # TN x D
# calculate data mean
mus = test_aus.mean(0) # D
# calculate data covariance
stds = np.cov(test_aus, rowvar=False) # D x D
# Fit multivariate normal
Y2 = multivariate_normal(mean=mus, cov=stds, allow_singular=False)
if return_log:
return Y2.logpdf(test_aus)
else:
return Y2
def state_transition_probabilities_ngram(state_seq, K, ngram=1):
from itertools import product
combinations_ = list(product(np.arange(K), repeat=ngram))
num_combination = len(combinations_)
state_transition_counts = np.zeros((K, num_combination))
for k in range(K):
# do not include last n states in seq
idx_ = np.argwhere(state_seq[:-ngram] == k)
# search in all combination pair
for jj, combination_ in enumerate(combinations_):
# test each combination gram
for ii, comb_ in enumerate(combination_):
# test for each index
for local_idx in idx_:
if state_seq[local_idx + ii + 1] == comb_:
state_transition_counts[k, jj] += 1
state_transition_counts /= state_transition_counts.sum(1, keepdims=True)
return state_transition_counts
def state_transition_probabilities(state_seq, num_states, normalize=True):
"""
# bigram probabilities: state transition probabilities
"""
state_transition_counts = np.zeros((num_states, num_states))
for k in range(num_states):
# do not include last state seq
idx_ = np.argwhere(state_seq[:-1] == k)
# do not include last state seq
#next_state, next_state_count = np.unique(state_seq[idx_ + 1], return_counts=True)
#state_transition_counts[k, next_state] = next_state_count
state_transition_counts[k] = np.bincount(state_seq[idx_k + 1].flatten(),
minlength = num_states)
#state_transition_counts /= state_transition_counts.sum(1, keepdims=True)
if normalize:
# Normalize according to state transitions
#state_transition_counts /= state_transition_counts.sum(1, keepdims=True)
# Hack to ignore 0s
num_transitions = state_transition_counts.sum(1)
for k in range(num_states):
if num_transitions[k] > 0:
state_transition_counts[k] /= num_transitions[k]
return state_transition_counts
def multiple_state_transition_probabilities(state_seq_list, num_states, normalize=True):
"""
# bigram probabilities: state transition probabilities
"""
state_transition_counts = np.zeros((num_states, num_states))
for k in range(num_states):
# do not include last state seq
for state_seq in state_seq_list:
# find where state k is
idx_k = np.argwhere(state_seq[:-1] == k)
# count #s of other states
state_transition_counts[k] += np.bincount(state_seq[idx_k + 1].flatten(),
minlength = num_states)
if normalize:
# Normalize according to state transitions
#state_transition_counts /= state_transition_counts.sum(1, keepdims=True)
# Hack to ignore 0s
num_transitions = state_transition_counts.sum(1)
for k in range(num_states):
if num_transitions[k] > 0:
state_transition_counts[k] /= num_transitions[k]
return state_transition_counts
|
"""
Parse method tests for pyubx2.UBXMessage
Created on 3 Oct 2020
*** NB: must be saved in UTF-8 format ***
@author: semuadmin
"""
# pylint: disable=line-too-long, invalid-name, missing-docstring, no-member
import unittest
from pyubx2 import UBXMessage, UBXReader, VALCKSUM, VALNONE, SET
class ParseTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.ack_ack = b"\xb5b\x05\x01\x02\x00\x06\x01\x0f\x38"
self.ack_ack_badck = b"\xb5b\x05\x01\x02\x00\x06\x01\x0f\x37"
self.cfg_msg = b"\xb5b\x06\x01\x08\x00\xf0\x01\x00\x01\x01\x01\x00\x00\x036"
self.cfg_prt = b"\xb5b\x06\x00\x00\x00\x06\x18"
self.nav_velned = b"\xb5b\x01\x12$\x000D\n\x18\xfd\xff\xff\xff\xf1\xff\xff\xff\xfc\xff\xff\xff\x10\x00\x00\x00\x0f\x00\x00\x00\x83\xf5\x01\x00A\x00\x00\x00\xf0\xdfz\x00\xd0\xa6"
self.nav_svinfo = b""
self.cfg_nmeavx = b"\xb5b\x06\x17\x04\x00\x00\x00\x00\x00\x21\xe9"
self.cfg_nmeav0 = b"\xb5b\x06\x17\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x29\x61"
self.mga_dbd = b"\xb5b\x13\x80\x0e\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x01\x02\xf2\xc2"
self.mga_flash_ack = b"\xb5b\x13\x21\x06\x00\x03\x01\x02\x00\x00\x04\x44\x3a"
self.cfg_valget = b"\xb5b\x06\x8b\x0c\x00\x00\x00\x00\x00\x01\x00\x52\x40\x80\x25\x00\x00\xd5\xd0"
self.cfg_valget2 = (
b"\xb5b\x06\x8b\x09\x00\x00\x00\x00\x00\x01\x00\x51\x20\x55\x61\xc2"
)
self.cfg_valget3 = b"\xb5b\x06\x8b\x16\x00\x00\x00\x00\x00\x01\x00\x51\x20\x55\x01\x00\x52\x40\x80\x25\x00\x00\x02\x00\x21\x30\x23\x1c\x92"
self.cfg_valget4 = b"\xb5b\x06\x8b\x0c\x00\x00\x00\x00\x00\x68\x00\x11\x40\xb6\xf3\x9d\x3f\xdb\x3d"
self.esf_meas = b"\xb5b\x10\x02\x10\x00\x01\x02\x03\x04\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\x50\x8a"
self.esf_measct = b"\xb5b\x10\x02\x14\x00\x01\x02\x03\x04\x00\x08\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\x01\x02\x03\x04\x66\xae"
self.esf_meas_log = [
b"\xb5b\x10\x02\x18\x00\x72\xd8\x07\x00\x18\x18\x00\x00\x4b\xfd\xff\x10\x40\x02\x00\x11\x23\x28\x00\x12\x72\xd8\x07\x00\x03\x9c",
b"\xb5b\x10\x02\x1c\x00\x6d\xd8\x07\x00\x18\x20\x00\x00\xcd\x06\x00\x0e\xe4\xfe\xff\x0d\x03\xfa\xff\x05\x09\x0b\x00\x0c\x6d\xd8\x07\x00\xee\x51",
b"\xb5b\x10\x02\x18\x00\xd5\xd8\x07\x00\x18\x18\x00\x00\x4d\xfd\xff\x10\x45\x02\x00\x11\x1f\x28\x00\x12\xd5\xd8\x07\x00\xcc\xac",
b"\xb5b\x10\x02\x1c\x00\xd0\xd8\x07\x00\x18\x20\x00\x00\x7c\x06\x00\x0e\xcb\xfe\xff\x0d\xac\xf9\xff\x05\x09\x0b\x00\x0c\xd0\xd8\x07\x00\xf2\xae",
b"\xb5b\x10\x02\x18\x00\x38\xd9\x07\x00\x18\x18\x00\x00\x4a\xfd\xff\x10\x41\x02\x00\x11\x27\x28\x00\x12\x38\xd9\x07\x00\x95\x7a",
b"\xb5b\x10\x02\x1c\x00\x33\xd9\x07\x00\x18\x20\x00\x00\x0f\x06\x00\x0e\x16\xfe\xff\x0d\x5b\xfa\xff\x05\x0a\x0b\x00\x0c\x33\xd9\x07\x00\x49\x9f",
b"\xb5b\x10\x02\x18\x00\x9c\xd9\x07\x00\x18\x18\x00\x00\x4e\xfd\xff\x10\x4e\x02\x00\x11\x20\x28\x00\x12\x9c\xd9\x07\x00\x67\x0e",
b"\xb5b\x10\x02\x1c\x00\x97\xd9\x07\x00\x18\x20\x00\x00\x85\x06\x00\x0e\x77\xfe\xff\x0d\xe1\xf9\xff\x05\x0a\x0b\x00\x0c\x97\xd9\x07\x00\x6d\xa4",
b"\xb5b\x10\x02\x18\x00\xff\xd9\x07\x00\x18\x18\x00\x00\x4c\xfd\xff\x10\x3e\x02\x00\x11\x24\x28\x00\x12\xff\xd9\x07\x00\x1f\x22",
b"\xb5b\x10\x02\x1c\x00\xfa\xd9\x07\x00\x18\x20\x00\x00\x92\x06\x00\x0e\x61\xfe\xff\x0d\x9f\xf9\xff\x05\x0a\x0b\x00\x0c\xfa\xd9\x07\x00\xe8\x90",
b"\xb5b\x10\x02\x18\x00\x63\xda\x07\x00\x18\x18\x00\x00\x47\xfd\xff\x10\x44\x02\x00\x11\x1c\x28\x00\x12\x63\xda\x07\x00\xe2\xe4",
b"\xb5b\x10\x02\x1c\x00\x5e\xda\x07\x00\x18\x20\x00\x00\xef\x06\x00\x0e\xb8\xfe\xff\x0d\xc8\xf9\xff\x05\x0a\x0b\x00\x0c\x5e\xda\x07\x00\x8f\xce",
b"\xb5b\x10\x02\x18\x00\xc6\xda\x07\x00\x18\x18\x00\x00\x4a\xfd\xff\x10\x4e\x02\x00\x11\x21\x28\x00\x12\xc6\xda\x07\x00\xba\x88",
b"\xb5b\x10\x02\x1c\x00\xc1\xda\x07\x00\x18\x20\x00\x00\x82\x06\x00\x0e\x5b\xfe\xff\x0d\xc8\xf9\xff\x05\x09\x0b\x00\x0c\xc1\xda\x07\x00\x8a\xd2",
b"\xb5b\x10\x02\x18\x00\x2a\xdb\x07\x00\x18\x18\x00\x00\x48\xfd\xff\x10\x47\x02\x00\x11\x27\x28\x00\x12\x2a\xdb\x07\x00\x81\x4e",
b"\xb5b\x10\x02\x1c\x00\x25\xdb\x07\x00\x18\x20\x00\x00\x1b\x07\x00\x0e\xed\xfe\xff\x0d\xfa\xf9\xff\x05\x09\x0b\x00\x0c\x25\xdb\x07\x00\xb2\xef",
b"\xb5b\x10\x02\x1c\x00\xdb\x25\x01\x00\x18\x20\x00\x00\x76\x02\x00\x0e\x06\xf8\xff\x0d\xde\xf7\xff\x05\x54\x0a\x00\x0c\xdb\x25\x01\x00\x3b\x91",
b"\xb5b\x10\x02\x18\x00\xee\x23\x01\x00\x18\x18\x00\x00\xe8\x11\x00\x10\xfa\x07\x00\x11\xa1\x22\x00\x12\xee\x23\x01\x00\x6e\xf9",
b"\xb5b\x10\x02\x1c\x00\x94\x21\x01\x00\x18\x20\x00\x00\xff\x05\x00\x0e\xf3\xfe\xff\x0d\x4d\x0b\x00\x05\x51\x0a\x00\x0c\x94\x21\x01\x00\xa5\x52",
]
self.mga_ini1 = b"\xb5b\x13\x40\x14\x00\x01\x00\x01\x02\x01\x02\x03\x04\x01\x02\x03\x04\x01\x02\x03\x04\x01\x02\x03\x04\x93\xc8"
self.mon_span = b"\xb5b\n1\x14\x01\x00\x01\x00\x00-+-,+-.,-.+,+.-..-,..//./00203017?9398:L]<@C;H<>=A@BDCGJNQRVY[_cgpqyz\x7f\x84\x8c\x90\x99\xa0\xa7\xae\xb0\xae\xaa\xa7\xa2\x9b\x97\x96\x94\x91\x90\x8e\x8c\x8c\x8c\x8b\x8b\x89\x88\x89\x89\x89\x8b\x88\x89\x8a\x89\x8a\x8a\x89\x8a\x8b\x8a\x8a\x8b\x8b\x8c\x8a\x8a\x8a\x8b\x88\x88\x87\x87\x86\x85\x85\x85\x84\x89\x84\x85\x83\x84\x84\x84\x85\x88\x87\x87\x88\x8a\x8a\x8a\x8a\x8b\x8e\x8c\x8d\x8d\x8f\x8e\x8d\x8f\x8e\x8f\x8f\x8e\x8f\x8f\x90\x91\x92\x93\x93\x93\x95\x94\x94\x94\x94\x95\x94\x95\x93\x93\x91\x92\x93\x92\x94\x95\x94\x95\x97\x97\x98\x97\x94\x90\x8d\x86\x82\x7fyupmg`]VRLEB?=;99665422202101///-//.-0-.-/..--,.-+-,--+.,,--,,-*\x00 \xa1\x07 \xa1\x07\x00@\xc4`^\x0c\x00\x00\x00\x15j"
def tearDown(self):
pass
def testAck(self):
res = UBXReader.parse(self.ack_ack, validate=VALCKSUM)
self.assertIsInstance(res, UBXMessage)
def testAckID(self):
res = UBXReader.parse(self.ack_ack)
self.assertEqual(res.identity, "ACK-ACK")
def testAckStr(self):
res = UBXReader.parse(self.ack_ack, validate=VALCKSUM)
self.assertEqual(str(res), "<UBX(ACK-ACK, clsID=CFG, msgID=CFG-MSG)>")
def testAckRepr(self):
res = UBXReader.parse(self.ack_ack)
self.assertEqual(
repr(res), "UBXMessage(b'\\x05', b'\\x01', 0, payload=b'\\x06\\x01')"
)
def testAckCkF(self):
UBXReader.parse(self.ack_ack_badck, validate=VALNONE)
def testCfg(self):
res = UBXReader.parse(self.ack_ack)
self.assertIsInstance(res, UBXMessage)
def testCfgID(self):
res = UBXReader.parse(self.cfg_msg, validate=VALCKSUM)
self.assertEqual(res.identity, "CFG-MSG")
def testCfgStr(self):
res = UBXReader.parse(self.cfg_msg, validate=VALCKSUM)
self.assertEqual(
str(res),
"<UBX(CFG-MSG, msgClass=NMEA-Standard, msgID=GLL, rateDDC=0, rateUART1=1, rateUART2=1, rateUSB=1, rateSPI=0, reserved=0)>",
)
def testCfgRepr(self):
res = UBXReader.parse(self.cfg_msg)
self.assertEqual(
repr(res),
"UBXMessage(b'\\x06', b'\\x01', 0, payload=b'\\xf0\\x01\\x00\\x01\\x01\\x01\\x00\\x00')",
)
def testCfgProp1(self):
res = UBXReader.parse(self.cfg_msg, validate=VALCKSUM)
self.assertEqual(res.rateUART1, 1)
def testCfgProp2(self):
res = UBXReader.parse(self.cfg_msg)
self.assertEqual(res.rateSPI, 0)
def testNavVelNed(self):
res = UBXReader.parse(self.nav_velned, validate=VALCKSUM)
self.assertIsInstance(res, UBXMessage)
def testNavVelNedID(self):
res = UBXReader.parse(self.nav_velned)
self.assertEqual(res.identity, "NAV-VELNED")
def testNavVelNedStr(self):
res = UBXReader.parse(self.nav_velned)
self.assertEqual(
str(res),
"<UBX(NAV-VELNED, iTOW=16:01:50, velN=-3, velE=-15, velD=-4, speed=16, gSpeed=15, heading=128387, sAcc=65, cAcc=8052720)>",
)
def testNavVelNedRepr(self):
res = UBXReader.parse(self.nav_velned)
self.assertEqual(
repr(res),
"UBXMessage(b'\\x01', b'\\x12', 0, payload=b'0D\\n\\x18\\xfd\\xff\\xff\\xff\\xf1\\xff\\xff\\xff\\xfc\\xff\\xff\\xff\\x10\\x00\\x00\\x00\\x0f\\x00\\x00\\x00\\x83\\xf5\\x01\\x00A\\x00\\x00\\x00\\xf0\\xdfz\\x00')",
)
def testNavVelNedProp1(self):
res = UBXReader.parse(self.nav_velned, validate=VALCKSUM)
self.assertEqual(res.iTOW, 403326000)
def testNavVelNedProp2(self):
res = UBXReader.parse(self.nav_velned)
self.assertEqual(res.cAcc, 8052720)
def testCfgPrt(self): # POLL example with null payload
res = UBXReader.parse(self.cfg_prt)
self.assertIsInstance(res, UBXMessage)
def testCfgPrtID(self):
res = UBXReader.parse(self.cfg_prt)
self.assertEqual(res.identity, "CFG-PRT")
def testCfgPrtStr(self):
res = UBXReader.parse(self.cfg_prt, validate=VALCKSUM)
self.assertEqual(str(res), "<UBX(CFG-PRT)>")
def testCfgPrtRepr(self):
res = UBXReader.parse(self.cfg_prt)
self.assertEqual(repr(res), "UBXMessage(b'\\x06', b'\\x00', 0)")
def testCfgNmeaVx(self): # test older NMEA message parse
res = UBXReader.parse(self.cfg_nmeavx)
self.assertEqual(
str(res),
"<UBX(CFG-NMEA, filter=b'\\x00', nmeaVersion=0, numSV=0, flags=b'\\x00')>",
)
def testCfgNmeaV0(self): # test older NMEA message parse
res = UBXReader.parse(self.cfg_nmeav0)
self.assertEqual(
str(res),
"<UBX(CFG-NMEA, filter=b'\\x00', nmeaVersion=0, numSV=0, flags=b'\\x00', gnssToFilter=b'\\x00\\x00\\x00\\x00', svNumbering=0, mainTalkerId=0, gsvTalkerId=0, version=0)>",
)
def testMgaDbd(self):
res = UBXReader.parse(self.mga_dbd)
self.assertEqual(
str(res),
"<UBX(MGA-DBD, reserved1=3727165692135864801209549313, data_01=1, data_02=2)>",
)
def testMgaFlashAck(self):
res = UBXReader.parse(self.mga_flash_ack)
self.assertEqual(
str(res),
"<UBX(MGA-FLASH-ACK, type=3, version=1, ack=2, reserved1=0, sequence=1024)>",
)
def testCFGVALGET(self): # test parser of CFG-VALGET CFG-UART1-BAUDRATE
res = UBXReader.parse(self.cfg_valget)
self.assertEqual(
str(res),
"<UBX(CFG-VALGET, version=0, layer=0, position=0, CFG_UART1_BAUDRATE=9600)>",
)
def testCFGVALGET2(self): # test parse of CFG-VALGET CFG-I2C-ADDRESS
res = UBXReader.parse(self.cfg_valget2)
self.assertEqual(
str(res),
"<UBX(CFG-VALGET, version=0, layer=0, position=0, CFG_I2C_ADDRESS=85)>",
)
def testCFGVALGET3(
self,
): # test parse of CFG-VALGET CFG-I2C-ADDRESS, CFG-UART1-BAUDRATE, CFG-RATE-NAV
res = UBXReader.parse(self.cfg_valget3)
self.assertEqual(
str(res),
"<UBX(CFG-VALGET, version=0, layer=0, position=0, CFG_I2C_ADDRESS=85, CFG_UART1_BAUDRATE=9600, CFG_RATE_NAV=35)>",
)
def testCFGVALGET4(self): # test parser of CFG-VALGET CFG-NAVSPG-USRDAT_ROTY
res = UBXReader.parse(self.cfg_valget4)
self.assertAlmostEqual(res.CFG_NAVSPG_USRDAT_ROTY, 1.23, 2)
def testESFMEAS(self): # test parser of ESF-MEAS without calibTtag data
res = UBXReader.parse(self.esf_meas)
self.assertEqual(
str(res),
"<UBX(ESF-MEAS, timeTag=67305985, flags=b'\\x00\\x00', id=0, data_01=b'\\x01\\x02\\x03\\x04', data_02=b'\\x05\\x06\\x07\\x08')>",
)
def testESFMEASCT(self): # test parser of ESF-MEAS with calibTtag data
res = UBXReader.parse(self.esf_measct)
self.assertEqual(
str(res),
"<UBX(ESF-MEAS, timeTag=67305985, flags=b'\\x00\\x08', id=0, data_01=b'\\x01\\x02\\x03\\x04', data_02=b'\\x05\\x06\\x07\\x08', calibTtag=67305985)>",
)
def testESFMEASLOG(
self,
): # test parse of actual ESF-MEAS log - thanks to tgalecki for log
EXPECTED_RESULT = [
"<UBX(ESF-MEAS, timeTag=514162, flags=b'\\x18\\x18', id=0, data_01=b'K\\xfd\\xff\\x10', data_02=b'@\\x02\\x00\\x11', data_03=b'#(\\x00\\x12', calibTtag=514162)>",
"<UBX(ESF-MEAS, timeTag=514157, flags=b'\\x18 ', id=0, data_01=b'\\xcd\\x06\\x00\\x0e', data_02=b'\\xe4\\xfe\\xff\\r', data_03=b'\\x03\\xfa\\xff\\x05', data_04=b'\\t\\x0b\\x00\\x0c', data_05=b'm\\xd8\\x07\\x00')>",
"<UBX(ESF-MEAS, timeTag=514261, flags=b'\\x18\\x18', id=0, data_01=b'M\\xfd\\xff\\x10', data_02=b'E\\x02\\x00\\x11', data_03=b'\\x1f(\\x00\\x12', calibTtag=514261)>",
"<UBX(ESF-MEAS, timeTag=514256, flags=b'\\x18 ', id=0, data_01=b'|\\x06\\x00\\x0e', data_02=b'\\xcb\\xfe\\xff\\r', data_03=b'\\xac\\xf9\\xff\\x05', data_04=b'\\t\\x0b\\x00\\x0c', data_05=b'\\xd0\\xd8\\x07\\x00')>",
"<UBX(ESF-MEAS, timeTag=514360, flags=b'\\x18\\x18', id=0, data_01=b'J\\xfd\\xff\\x10', data_02=b'A\\x02\\x00\\x11', data_03=b\"'(\\x00\\x12\", calibTtag=514360)>",
"<UBX(ESF-MEAS, timeTag=514355, flags=b'\\x18 ', id=0, data_01=b'\\x0f\\x06\\x00\\x0e', data_02=b'\\x16\\xfe\\xff\\r', data_03=b'[\\xfa\\xff\\x05', data_04=b'\\n\\x0b\\x00\\x0c', data_05=b'3\\xd9\\x07\\x00')>",
"<UBX(ESF-MEAS, timeTag=514460, flags=b'\\x18\\x18', id=0, data_01=b'N\\xfd\\xff\\x10', data_02=b'N\\x02\\x00\\x11', data_03=b' (\\x00\\x12', calibTtag=514460)>",
"<UBX(ESF-MEAS, timeTag=514455, flags=b'\\x18 ', id=0, data_01=b'\\x85\\x06\\x00\\x0e', data_02=b'w\\xfe\\xff\\r', data_03=b'\\xe1\\xf9\\xff\\x05', data_04=b'\\n\\x0b\\x00\\x0c', data_05=b'\\x97\\xd9\\x07\\x00')>",
"<UBX(ESF-MEAS, timeTag=514559, flags=b'\\x18\\x18', id=0, data_01=b'L\\xfd\\xff\\x10', data_02=b'>\\x02\\x00\\x11', data_03=b'$(\\x00\\x12', calibTtag=514559)>",
"<UBX(ESF-MEAS, timeTag=514554, flags=b'\\x18 ', id=0, data_01=b'\\x92\\x06\\x00\\x0e', data_02=b'a\\xfe\\xff\\r', data_03=b'\\x9f\\xf9\\xff\\x05', data_04=b'\\n\\x0b\\x00\\x0c', data_05=b'\\xfa\\xd9\\x07\\x00')>",
"<UBX(ESF-MEAS, timeTag=514659, flags=b'\\x18\\x18', id=0, data_01=b'G\\xfd\\xff\\x10', data_02=b'D\\x02\\x00\\x11', data_03=b'\\x1c(\\x00\\x12', calibTtag=514659)>",
"<UBX(ESF-MEAS, timeTag=514654, flags=b'\\x18 ', id=0, data_01=b'\\xef\\x06\\x00\\x0e', data_02=b'\\xb8\\xfe\\xff\\r', data_03=b'\\xc8\\xf9\\xff\\x05', data_04=b'\\n\\x0b\\x00\\x0c', data_05=b'^\\xda\\x07\\x00')>",
"<UBX(ESF-MEAS, timeTag=514758, flags=b'\\x18\\x18', id=0, data_01=b'J\\xfd\\xff\\x10', data_02=b'N\\x02\\x00\\x11', data_03=b'!(\\x00\\x12', calibTtag=514758)>",
"<UBX(ESF-MEAS, timeTag=514753, flags=b'\\x18 ', id=0, data_01=b'\\x82\\x06\\x00\\x0e', data_02=b'[\\xfe\\xff\\r', data_03=b'\\xc8\\xf9\\xff\\x05', data_04=b'\\t\\x0b\\x00\\x0c', data_05=b'\\xc1\\xda\\x07\\x00')>",
"<UBX(ESF-MEAS, timeTag=514858, flags=b'\\x18\\x18', id=0, data_01=b'H\\xfd\\xff\\x10', data_02=b'G\\x02\\x00\\x11', data_03=b\"'(\\x00\\x12\", calibTtag=514858)>",
"<UBX(ESF-MEAS, timeTag=514853, flags=b'\\x18 ', id=0, data_01=b'\\x1b\\x07\\x00\\x0e', data_02=b'\\xed\\xfe\\xff\\r', data_03=b'\\xfa\\xf9\\xff\\x05', data_04=b'\\t\\x0b\\x00\\x0c', data_05=b'%\\xdb\\x07\\x00')>",
"<UBX(ESF-MEAS, timeTag=75227, flags=b'\\x18 ', id=0, data_01=b'v\\x02\\x00\\x0e', data_02=b'\\x06\\xf8\\xff\\r', data_03=b'\\xde\\xf7\\xff\\x05', data_04=b'T\\n\\x00\\x0c', data_05=b'\\xdb%\\x01\\x00')>",
"<UBX(ESF-MEAS, timeTag=74734, flags=b'\\x18\\x18', id=0, data_01=b'\\xe8\\x11\\x00\\x10', data_02=b'\\xfa\\x07\\x00\\x11', data_03=b'\\xa1\"\\x00\\x12', calibTtag=74734)>",
"<UBX(ESF-MEAS, timeTag=74132, flags=b'\\x18 ', id=0, data_01=b'\\xff\\x05\\x00\\x0e', data_02=b'\\xf3\\xfe\\xff\\r', data_03=b'M\\x0b\\x00\\x05', data_04=b'Q\\n\\x00\\x0c', data_05=b'\\x94!\\x01\\x00')>",
]
for i, msg in enumerate(self.esf_meas_log):
res = UBXReader.parse(msg, validate=VALCKSUM)
self.assertEqual(str(res), EXPECTED_RESULT[i])
def testRXMPMPV0(self): # test parser of RXM-PMP v0 message
rxm_pmpv0 = b"\xb5b\x02\x72\x0e\x02\x00\x00\x01\x02\x03\x04\x01\x02\x03\x04\x01\x02\x03\x04\x01\x02\x01\x01"
for i in range(504):
n = i % 256
rxm_pmpv0 += n.to_bytes(1, "little", signed=False)
rxm_pmpv0 += b"\x01\x02\x01\x00\xcf\xa9"
res = UBXReader.parse(rxm_pmpv0, validate=VALCKSUM)
self.assertEqual(
str(res),
"<UBX(RXM-PMP, version=0, reserved0=0, timeTag=67305985, uniqueWord1=67305985, uniqueWord2=67305985, serviceIdentifier=513, spare=1, uniqueWordBitErrors=1, userData_01=0, userData_02=1, userData_03=2, userData_04=3, userData_05=4, userData_06=5, userData_07=6, userData_08=7, userData_09=8, userData_10=9, userData_11=10, userData_12=11, userData_13=12, userData_14=13, userData_15=14, userData_16=15, userData_17=16, userData_18=17, userData_19=18, userData_20=19, userData_21=20, userData_22=21, userData_23=22, userData_24=23, userData_25=24, userData_26=25, userData_27=26, userData_28=27, userData_29=28, userData_30=29, userData_31=30, userData_32=31, userData_33=32, userData_34=33, userData_35=34, userData_36=35, userData_37=36, userData_38=37, userData_39=38, userData_40=39, userData_41=40, userData_42=41, userData_43=42, userData_44=43, userData_45=44, userData_46=45, userData_47=46, userData_48=47, userData_49=48, userData_50=49, userData_51=50, userData_52=51, userData_53=52, userData_54=53, userData_55=54, userData_56=55, userData_57=56, userData_58=57, userData_59=58, userData_60=59, userData_61=60, userData_62=61, userData_63=62, userData_64=63, userData_65=64, userData_66=65, userData_67=66, userData_68=67, userData_69=68, userData_70=69, userData_71=70, userData_72=71, userData_73=72, userData_74=73, userData_75=74, userData_76=75, userData_77=76, userData_78=77, userData_79=78, userData_80=79, userData_81=80, userData_82=81, userData_83=82, userData_84=83, userData_85=84, userData_86=85, userData_87=86, userData_88=87, userData_89=88, userData_90=89, userData_91=90, userData_92=91, userData_93=92, userData_94=93, userData_95=94, userData_96=95, userData_97=96, userData_98=97, userData_99=98, userData_100=99, userData_101=100, userData_102=101, userData_103=102, userData_104=103, userData_105=104, userData_106=105, userData_107=106, userData_108=107, userData_109=108, userData_110=109, userData_111=110, userData_112=111, userData_113=112, userData_114=113, userData_115=114, userData_116=115, userData_117=116, userData_118=117, userData_119=118, userData_120=119, userData_121=120, userData_122=121, userData_123=122, userData_124=123, userData_125=124, userData_126=125, userData_127=126, userData_128=127, userData_129=128, userData_130=129, userData_131=130, userData_132=131, userData_133=132, userData_134=133, userData_135=134, userData_136=135, userData_137=136, userData_138=137, userData_139=138, userData_140=139, userData_141=140, userData_142=141, userData_143=142, userData_144=143, userData_145=144, userData_146=145, userData_147=146, userData_148=147, userData_149=148, userData_150=149, userData_151=150, userData_152=151, userData_153=152, userData_154=153, userData_155=154, userData_156=155, userData_157=156, userData_158=157, userData_159=158, userData_160=159, userData_161=160, userData_162=161, userData_163=162, userData_164=163, userData_165=164, userData_166=165, userData_167=166, userData_168=167, userData_169=168, userData_170=169, userData_171=170, userData_172=171, userData_173=172, userData_174=173, userData_175=174, userData_176=175, userData_177=176, userData_178=177, userData_179=178, userData_180=179, userData_181=180, userData_182=181, userData_183=182, userData_184=183, userData_185=184, userData_186=185, userData_187=186, userData_188=187, userData_189=188, userData_190=189, userData_191=190, userData_192=191, userData_193=192, userData_194=193, userData_195=194, userData_196=195, userData_197=196, userData_198=197, userData_199=198, userData_200=199, userData_201=200, userData_202=201, userData_203=202, userData_204=203, userData_205=204, userData_206=205, userData_207=206, userData_208=207, userData_209=208, userData_210=209, userData_211=210, userData_212=211, userData_213=212, userData_214=213, userData_215=214, userData_216=215, userData_217=216, userData_218=217, userData_219=218, userData_220=219, userData_221=220, userData_222=221, userData_223=222, userData_224=223, userData_225=224, userData_226=225, userData_227=226, userData_228=227, userData_229=228, userData_230=229, userData_231=230, userData_232=231, userData_233=232, userData_234=233, userData_235=234, userData_236=235, userData_237=236, userData_238=237, userData_239=238, userData_240=239, userData_241=240, userData_242=241, userData_243=242, userData_244=243, userData_245=244, userData_246=245, userData_247=246, userData_248=247, userData_249=248, userData_250=249, userData_251=250, userData_252=251, userData_253=252, userData_254=253, userData_255=254, userData_256=255, userData_257=0, userData_258=1, userData_259=2, userData_260=3, userData_261=4, userData_262=5, userData_263=6, userData_264=7, userData_265=8, userData_266=9, userData_267=10, userData_268=11, userData_269=12, userData_270=13, userData_271=14, userData_272=15, userData_273=16, userData_274=17, userData_275=18, userData_276=19, userData_277=20, userData_278=21, userData_279=22, userData_280=23, userData_281=24, userData_282=25, userData_283=26, userData_284=27, userData_285=28, userData_286=29, userData_287=30, userData_288=31, userData_289=32, userData_290=33, userData_291=34, userData_292=35, userData_293=36, userData_294=37, userData_295=38, userData_296=39, userData_297=40, userData_298=41, userData_299=42, userData_300=43, userData_301=44, userData_302=45, userData_303=46, userData_304=47, userData_305=48, userData_306=49, userData_307=50, userData_308=51, userData_309=52, userData_310=53, userData_311=54, userData_312=55, userData_313=56, userData_314=57, userData_315=58, userData_316=59, userData_317=60, userData_318=61, userData_319=62, userData_320=63, userData_321=64, userData_322=65, userData_323=66, userData_324=67, userData_325=68, userData_326=69, userData_327=70, userData_328=71, userData_329=72, userData_330=73, userData_331=74, userData_332=75, userData_333=76, userData_334=77, userData_335=78, userData_336=79, userData_337=80, userData_338=81, userData_339=82, userData_340=83, userData_341=84, userData_342=85, userData_343=86, userData_344=87, userData_345=88, userData_346=89, userData_347=90, userData_348=91, userData_349=92, userData_350=93, userData_351=94, userData_352=95, userData_353=96, userData_354=97, userData_355=98, userData_356=99, userData_357=100, userData_358=101, userData_359=102, userData_360=103, userData_361=104, userData_362=105, userData_363=106, userData_364=107, userData_365=108, userData_366=109, userData_367=110, userData_368=111, userData_369=112, userData_370=113, userData_371=114, userData_372=115, userData_373=116, userData_374=117, userData_375=118, userData_376=119, userData_377=120, userData_378=121, userData_379=122, userData_380=123, userData_381=124, userData_382=125, userData_383=126, userData_384=127, userData_385=128, userData_386=129, userData_387=130, userData_388=131, userData_389=132, userData_390=133, userData_391=134, userData_392=135, userData_393=136, userData_394=137, userData_395=138, userData_396=139, userData_397=140, userData_398=141, userData_399=142, userData_400=143, userData_401=144, userData_402=145, userData_403=146, userData_404=147, userData_405=148, userData_406=149, userData_407=150, userData_408=151, userData_409=152, userData_410=153, userData_411=154, userData_412=155, userData_413=156, userData_414=157, userData_415=158, userData_416=159, userData_417=160, userData_418=161, userData_419=162, userData_420=163, userData_421=164, userData_422=165, userData_423=166, userData_424=167, userData_425=168, userData_426=169, userData_427=170, userData_428=171, userData_429=172, userData_430=173, userData_431=174, userData_432=175, userData_433=176, userData_434=177, userData_435=178, userData_436=179, userData_437=180, userData_438=181, userData_439=182, userData_440=183, userData_441=184, userData_442=185, userData_443=186, userData_444=187, userData_445=188, userData_446=189, userData_447=190, userData_448=191, userData_449=192, userData_450=193, userData_451=194, userData_452=195, userData_453=196, userData_454=197, userData_455=198, userData_456=199, userData_457=200, userData_458=201, userData_459=202, userData_460=203, userData_461=204, userData_462=205, userData_463=206, userData_464=207, userData_465=208, userData_466=209, userData_467=210, userData_468=211, userData_469=212, userData_470=213, userData_471=214, userData_472=215, userData_473=216, userData_474=217, userData_475=218, userData_476=219, userData_477=220, userData_478=221, userData_479=222, userData_480=223, userData_481=224, userData_482=225, userData_483=226, userData_484=227, userData_485=228, userData_486=229, userData_487=230, userData_488=231, userData_489=232, userData_490=233, userData_491=234, userData_492=235, userData_493=236, userData_494=237, userData_495=238, userData_496=239, userData_497=240, userData_498=241, userData_499=242, userData_500=243, userData_501=244, userData_502=245, userData_503=246, userData_504=247, fecBits=513, ebno=1, reserved1=0)>",
)
def testRXMPMPV1(self): # test parser of RXM-PMP v1 message
rxm_pmpv1 = b"\xb5b\x02\x72\x23\x00\x01\x00\x0b\x00\x01\x02\x03\x04\x01\x02\x03\x04\x01\x02\x03\x04\x01\x02\x00\x01\x01\x02\x01\x00"
for i in range(11):
rxm_pmpv1 += i.to_bytes(1, "little", signed=False)
rxm_pmpv1 += b"\x00\x20"
res = UBXReader.parse(rxm_pmpv1, True)
self.assertEqual(
str(res),
"<UBX(RXM-PMP, version=1, reserved0=0, numBytesUserData=11, timeTag=67305985, uniqueWord1=67305985, uniqueWord2=67305985, serviceIdentifier=513, spare=0, uniqueWordBitErrors=1, fecBits=513, ebno=1, reserved1=0, userData_01=0, userData_02=1, userData_03=2, userData_04=3, userData_05=4, userData_06=5, userData_07=6, userData_08=7, userData_09=8, userData_10=9, userData_11=10)>",
)
def testRXMRLMS(self): # test parser of RXM-RLM-S message
rxm_rlms = b"\xb5b\x02\x59\x10\x00\x00\x01\x00\x00"
for i in range(8):
rxm_rlms += i.to_bytes(1, "little", signed=False)
rxm_rlms += b"\x00\x01\x02\x00\x8b\xbd"
res = UBXReader.parse(rxm_rlms)
self.assertEqual(
str(res),
"<UBX(RXM-RLM, version=0, type=1, svId=0, reserved0=0, beacon_01=0, beacon_02=1, beacon_03=2, beacon_04=3, beacon_05=4, beacon_06=5, beacon_07=6, beacon_08=7, message=0, params1=1, params2=2, reserved1=0)>",
)
def testRXMRLML(self): # test parser of RXM-RLM-L message
rxm_rlms = b"\xb5b\x02\x59\x1c\x00\x00\x02\x00\x00"
for i in range(8):
rxm_rlms += i.to_bytes(1, "little", signed=False)
rxm_rlms += b"\x00"
for i in range(12):
rxm_rlms += i.to_bytes(1, "little", signed=False)
rxm_rlms += b"\x00\x01\x02\xda\x81"
res = UBXReader.parse(rxm_rlms)
self.assertEqual(
str(res),
"<UBX(RXM-RLM, version=0, type=2, svId=0, reserved0=0, beacon_01=0, beacon_02=1, beacon_03=2, beacon_04=3, beacon_05=4, beacon_06=5, beacon_07=6, beacon_08=7, message=0, params_01=0, params_02=1, params_03=2, params_04=3, params_05=4, params_06=5, params_07=6, params_08=7, params_09=8, params_10=9, params_11=10, params_12=11, reserved1=131328)>",
)
def testMONSPAN(self): # test parser of MON-SPAN message
res = UBXReader.parse(self.mon_span)
self.assertEqual(
str(res),
"<UBX(MON-SPAN, version=0, numRfBlocks=1, reserved0=0, spectrum_01=[45, 43, 45, 44, 43, 45, 46, 44, 45, 46, 43, 44, 43, 46, 45, 46, 46, 45, 44, 46, 46, 47, 47, 46, 47, 48, 48, 50, 48, 51, 48, 49, 55, 63, 57, 51, 57, 56, 58, 76, 93, 60, 64, 67, 59, 72, 60, 62, 61, 65, 64, 66, 68, 67, 71, 74, 78, 81, 82, 86, 89, 91, 95, 99, 103, 112, 113, 121, 122, 127, 132, 140, 144, 153, 160, 167, 174, 176, 174, 170, 167, 162, 155, 151, 150, 148, 145, 144, 142, 140, 140, 140, 139, 139, 137, 136, 137, 137, 137, 139, 136, 137, 138, 137, 138, 138, 137, 138, 139, 138, 138, 139, 139, 140, 138, 138, 138, 139, 136, 136, 135, 135, 134, 133, 133, 133, 132, 137, 132, 133, 131, 132, 132, 132, 133, 136, 135, 135, 136, 138, 138, 138, 138, 139, 142, 140, 141, 141, 143, 142, 141, 143, 142, 143, 143, 142, 143, 143, 144, 145, 146, 147, 147, 147, 149, 148, 148, 148, 148, 149, 148, 149, 147, 147, 145, 146, 147, 146, 148, 149, 148, 149, 151, 151, 152, 151, 148, 144, 141, 134, 130, 127, 121, 117, 112, 109, 103, 96, 93, 86, 82, 76, 69, 66, 63, 61, 59, 57, 57, 54, 54, 53, 52, 50, 50, 50, 48, 50, 49, 48, 49, 47, 47, 47, 45, 47, 47, 46, 45, 48, 45, 46, 45, 47, 46, 46, 45, 45, 44, 46, 45, 43, 45, 44, 45, 45, 43, 46, 44, 44, 45, 45, 44, 44, 45, 42], span_01=128000000, res_01=500000, center_01=1583400000, pga_01=12, reserved1_01=0)>",
)
def testMONSPAN2(self): # test parser of MON-SPAN message (repeating groups empty)
mon_span = b"\xb5b\x0a\x31\x04\x00\x00\x00\x01\x02"
mon_span += b"\x42\xc3"
res = UBXReader.parse(mon_span)
self.assertEqual(
str(res), "<UBX(MON-SPAN, version=0, numRfBlocks=0, reserved0=513)>"
)
def testMGAINI1(self): # test parser of MGA-INI input message with kwargs
res = UBXReader.parse(self.mga_ini1, msgmode=SET)
self.assertEqual(
str(res),
"<UBX(MGA-INI-POS_LLH, type=1, version=0, reserved1=513, lat=67305985, lon=67305985, alt=67305985, posAcc=67305985)>",
)
def testMGAINI2(self): # test parser of MGA-INI input message with args
res = UBXReader.parse(self.mga_ini1, True, SET)
self.assertEqual(
str(res),
"<UBX(MGA-INI-POS_LLH, type=1, version=0, reserved1=513, lat=67305985, lon=67305985, alt=67305985, posAcc=67305985)>",
)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
from utils import get_embeddings
import os
import torch
import pickle
import argparse
def create_database(in_path, out_path):
images_list = os.listdir(in_path)
embeddings_set = torch.rand(len(images_list), 1, 512)
id_to_name = {}
for i, image in enumerate(images_list):
embeddings, name = get_embeddings(os.path.join(in_path, image))
if embeddings is not None:
embeddings_set[i] = embeddings
id_to_name[i] = name
database = [embeddings_set, id_to_name]
with open(out_path,"wb") as pkl_out:
pickle.dump(database, pkl_out)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-in", "--in_path",
required=True,
help="path of the input images")
ap.add_argument("-o", "--out_path",
default = "database.pkl",
help="path of database to be saved")
args = vars(ap.parse_args())
create_database(args["in_path"], args["out_path"])
|
#This scripts demonstartes developing program with Tkinter library
#Source: https://www.udemy.com/the-python-mega-course/learn/v4/t/lecture/4775342?start=0
from tkinter import *
window = Tk()
#Everything goes between this line and window.mainloop()
b1=Button(window, text="Execute")
#b1.pack()
b1.grid(row=1, column=1)
#Adding entry
e1=Entry(window)
e1.grid(row=1, column=2)
#Adding text
t1=Text(window, height=2, width=20)
t1.grid(row=1,column=3)
window.mainloop() #Without this program will run and close
|
# -*- coding: utf-8 -*-
"""
Python Slack Bot docker parser class for use with the HB Bot
"""
import os
import re
DOCKER_SUPPORTED = ["image", "container", "help"]
SUBCOMMAND_SUPPORTED = ["ls",]
def docker_usage_message():
return ("I'm sorry. I don't understand your docker command."
"I understand docker [%s] if you would like to try one of those." % "|".join(DOCKER_SUPPORTED))
def parse_command(incoming_text):
"""
incoming_text: A text string to parse for docker commands
returns: a fully validated docker command
"""
docker_action = ''
parse1 = re.compile(r"(?<=\bdocker\s)(\w+)")
match_obj = parse1.search(incoming_text)
if match_obj:
docker_action = match_obj.group()
print("Got docker action %s" % (docker_action,))
if docker_action and docker_action in DOCKER_SUPPORTED:
# Use this type of code if we want to limit the docker commands
#parse2 = re.compile(r"(?<=\b%s\s)(\w+)" % docker_action)
#match_obj = parse2.search(incoming_text)
#if match_obj:
# docker_subcommand = match_obj.group()
# if docker_subcommand in SUBCOMMAND_SUPPORTED:
# return "docker %s %s" % (docker_action, docker_subcommand)
# Otherwise let it fly and return help if it pumps mud.
print "returning docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return "docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return docker_usage_message()
|
import pandas as pd
import glob
import csv
import os
import seaborn as sns
import matplotlib.pyplot as plt
from builtins import any
class CrystalBall:
def __init__(self, list_of_csvs:list, csvname_to_colnames_list:dict, csvname_to_IDs:dict, csvname_to_nonIDs:dict, all_IDs:list, all_nonIDs:list, csvname_to_one_ID:list):
# get list of all files in current directory that end in .csv
self.list_of_csvs = list_of_csvs
# create dictionary where csvname maps to colnames
self.csvname_to_colnames_list = csvname_to_colnames_list
# create dictionary where csvname maps to colnames that have the substring "ID"
self.csvname_to_IDs = csvname_to_IDs
# create dictionary where csvname maps to colnames that do not have the substring "ID"
self.csvname_to_nonIDs = csvname_to_nonIDs
# create list of only unique IDs
self.all_IDs = all_IDs
# create list of only unique nonIDs
self.all_nonIDs = all_nonIDs
# create list of all column names (IDs + nonIDs)
self.all_colnames = list(all_IDs.union(all_nonIDs))
# create dictionary that maps out relationship, one csvname to one ID
self.csvname_to_one_ID = csvname_to_one_ID
@classmethod
def run(self, rel_dir):
""" Initialize the Crystal Ball object for a given directory that contains the CSVs.
Parameters
----------
rel_dir : str
- A string that contains the relative directory, which contains the CSVs to analyze.
Returns
--------
CrystalBall
- CrystalBall that has all class variables initialized by this run script.
Examples
--------
.. code-block:: python
relative_directory = './folder1/folder2'
crystalBall = CrystalBall.run(relative_directory)
"""
rel_dir = rel_dir + '/*.csv'
list_of_csvs = sorted(glob.glob(rel_dir))
csvname_to_colnames_list = {}
csvname_to_IDs = {}
csvname_to_nonIDs = {}
all_IDs = set()
all_nonIDs = set()
csvname_to_one_ID = []
for csv_name in list_of_csvs:
with open(csv_name, "rt") as f:
reader = csv.reader(f)
try:
col_names = next(reader)
csvname_to_colnames_list[csv_name] = col_names
ids = []
non_ids = []
for col_name in col_names:
if 'ID' in col_name or 'Id' in col_name:
csvname_to_one_ID.append([os.path.split(csv_name)[1], col_name])
ids.append(col_name)
else:
non_ids.append(col_name)
csvname_to_IDs[csv_name] = ids
csvname_to_nonIDs[csv_name] = non_ids
all_IDs.update(ids)
all_nonIDs.update(non_ids)
continue
except StopIteration:
continue
except:
continue
return CrystalBall(list_of_csvs, csvname_to_colnames_list, csvname_to_IDs, csvname_to_nonIDs, all_IDs, all_nonIDs, csvname_to_one_ID)
def contains(self, keywords: list, all_colnames: list=None) -> list:
""" Check if keywords exist in all_colnames.
- Determine whether a keyword (substring) exists in a given list of column names (strings).
- Note: This search is case sensitive!
Parameters
----------
keywords : list[str]
- List of key words that the user is interested in
all_colnames : list[str]
- List of column names of a table, or for many tables.
- If no argument is provided, this function will use the column names generated when the run method was called.
Returns
-------
list
- Each index corresponds to a keyword.
- For each index, True if substring exists in list of strings, otherwise False.
Examples
--------
>>> colnames = ['id', 'name', 'title']
>>> cb.contains(['name'], colnames)
[True]
>>> cb.contains(['Name'], colnames)
[False]
>>> cb.contains(['name', 'Name'], colnames)
[True, False]
"""
if all_colnames is None:
return [any(keyword in colname for colname in self.all_colnames) for keyword in keywords]
else:
return [any(keyword in colname for colname in all_colnames) for keyword in keywords]
def featureSearch(self, keywords: list, all_colnames: list=None, mode: str='UNION') -> list:
""" Find the columns that contain the keywords.
- Find features (column names) that contain the substrings specified in keywords.
- Note: This search is case sensitive!
Parameters
----------
keywords : list[str]
- List of key words that the user is interested in
colnames : list[str]
- List of column names of a table, or for many tables.
- If no argument is provided, this function will use the column names generated when the run method was called.
Returns
--------
DataFrame
- DataFrame will contain all features (column names) that contains one/all substrings found in keywords.
- DataFrame will be sorted in alphabetical order.
Examples (update example, outputs a DataFrame instead of a list)
--------
>>> colnames = ['id', 'name', 'nameType', 'subSpeciesName', 'title']
>>> cb.featureSearch(['name'], colnames)
['name', 'nameType']
>>> cb.featureSearch(['Name'], colnames)
['subSpeciesName']
>>> cb.featureSearch(['name', 'Name'], colnames)
['name', 'nameType', 'subSpeciesName']
"""
##implement INTERSECTION mode later
def search(keywords, colnames):
suggested_colnames = set()
for colname in colnames:
for keyword in keywords:
if keyword in colname:
suggested_colnames.add(colname)
return pd.DataFrame( {'featureName': sorted(list(suggested_colnames))})
if type(keywords) is not list:
raise Exception('keywords argument expects a list')
if mode is 'UNION':
if all_colnames is None:
return search(keywords, self.all_colnames)
else:
return search(keywords, all_colnames)
elif mode is "INTERSECTION":
print('to implement later')
def tableSearch(self, keywords, csvname_to_colnames_list=None, mode='UNION'):
""" Find the tables that contain the keywords.
- Find tables that contain column names which have the substrings specified in keywords.
- Note: This search is case sensitive!
Parameters
----------
keywords : list[str]
- List of key words that the user is interested in
csvname_to_colnames_list : dict[str] = list
- Dictionary that maps a string (table name) to a list of column names it contains.
- If no argument is provided, this function will use the dictionary generated when the run method was called.
mode : str
- If mode is UNION, then return all tables that contain at least one keyword.
- If mode is INTERSECTION, then return all tables that contain all the keywords.
Returns
--------
list[str]
- List will contain all tables that contain a match with keywords.
- List will be sorted in alphabetical order.
Examples
--------
>>> csvname_to_colnames_list = {'table1': ['colName1', 'colName2'], 'table2':['colName3', 'colName4']}
>>> cb.tableSearch(['colName1'], csvname_to_colnames_list)
['table1']
>>> cb.tableSearch(['colName3'], csvname_to_colnames_list)
['table2']
>>> cb.tableSearch(['colName1', 'colName2'], csvname_to_colnames_list)
['table1', 'table2']
"""
def columnNamesContainKeyword(keyword, colname_list):
return any(keyword in colname for colname in colname_list)
if mode is 'UNION':
if csvname_to_colnames_list is None:
return list(filter(lambda x: x is not None, [key if False not in [True if any(keyword in colname for colname in self.csvname_to_colnames_list[key]) else False for keyword in keywords] else None for key in self.csvname_to_colnames_list]))
else:
return list(filter(lambda x: x is not None, [key if False not in [True if any(keyword in colname for colname in csvname_to_colnames_list[key]) else False for keyword in keywords] else None for key in csvname_to_colnames_list]))
elif mode is 'INTERSECTION':
csv_matches = []
if csvname_to_colnames_list is None:
for csvname in self.csvname_to_colnames_list:
keyword_checklist = []
for keyword in keywords:
keyword_checklist.append(columnNamesContainKeyword(keyword, self.csvname_to_colnames_list[csvname]))
if False not in keyword_checklist:
csv_matches.append(csvname)
return sorted(csv_matches)
else:
print("implement later")
def openTable(self, rel_dir, indices=None, encoding='utf-8'):
""" Open the csv that is referenced by the given relative directory.
Parameters
----------
rel_dir : str
- A path to the table that is relative to where the user is running Crystal Ball.
indices : list[int]
- Sets the (multi)index by columns represented by their numerical integer-locations.
Returns
--------
DataFrame
- The DataFrame containing the contents of the csv.
Examples
--------
(link juptyer notebook)
"""
df = pd.read_csv(rel_dir, engine='python', encoding=encoding , error_bad_lines=False)
if indices is not None:
df.set_index(list(df.columns[indices]), inplace=True)
return df
def subTable(self, supertable, chosen_index:list, chosen_columns:list):
""" Create a subtable from a supertable.
Parameters
----------
supertable : DataFrame
- Table from which to select chosen_columns from in order to form a subtable
chosen_index : list[str]
- The column names that will form the new (multi)index for the subtable.
chosen_columns : list[str]
- The column names that will form the new columns for the subtable.
Returns
--------
DataFrame
- DataFrame (the newly-formed subtable) that will have the (multi)index and columns specified in the arguments.
Examples
--------
(link juptyer notebook)
"""
## chosen_columns should default to empty list
# if len(chosen_columns) == 0:
# use all the columns from supertable
combined = chosen_index.copy()
combined.extend(chosen_columns)
subtable = supertable[combined].set_index(chosen_index)
return subtable
def mergeTables(self, tables:list):
""" Sequentially merge a list of tables that all share a common index.
- Merge defaults to using inner joins over the index.
Parameters
----------
tables : list[DataFrame]
- Contains a list of DataFrames that will be merged sequentially.
Returns
--------
DataFrame
- Table that results from sequentially merging the DataFrames given in the argument.
Examples
--------
(link juptyer notebook)
"""
# replace sequential mergeing with concat...
# TO IMPLEMENT LATER: other types of joins, merging by non-index
def chooseLargestString(string_list):
largest_string = string_list[0]
for string in string_list:
if len(string) > len(largest_string):
largest_string = string
return largest_string
if len(tables) < 2:
raise Exception("need at least two tables in order to merge")
num_of_dropped_rows = 0
max_num_of_rows = max(len(tables[0]), len(tables[1]))
current_merge = tables[0].merge(tables[1], how='inner', left_index=True, right_index=True)
diff = max_num_of_rows - len(current_merge)
max_num_of_rows = len(current_merge)
num_of_dropped_rows += diff
index_names = [tables[0].index.name, tables[1].index.name]
if len(tables) - 2 > 0:
for i in range(2, len(tables)):
current_merge = current_merge.merge(table[i], how='inner', left_index=True, right_index=True)
diff = max_num_of_rows - len(current_merge)
max_num_of_rows = len(current_merge)
num_of_dropped_rows += diff
index_names.append(tables[i].index.name)
print('Number of Dropped Rows: ',num_of_dropped_rows)
current_merge.index.name = chooseLargestString(index_names)
# CHECK FOR MULTI INDEX CASE, WHETHER THE ABOVE LINE BREAKS
return current_merge
def analyzeRelationships(self, to_analyze:list, visualize=True):
""" Analyze basic stats of one or more different indexes.
By comparing boxplots, you should be able to determine which indices are related.
Parameters
----------
to_analyze : list[list[str, Series]]
- A list of lists. The later should be of length two, in which the 0th index stores the table name and the 1st index contains a Series.
- The Series should contain the values of the column derived from the table associated with the name stored in the 0th index.
Returns
--------
DataFrame
- Table that contains basic stats about each given Series.
Examples
--------
(link juptyer notebook)
"""
descriptions = []
boxplot_data = []
boxplot_xtick_labels = []
for pair in to_analyze:
new_name = pair[1].name + ' from ' + pair[0]
descriptions.append(pair[1].describe().rename(new_name))
boxplot_data.append(pair[1])
boxplot_xtick_labels.append(new_name)
# add labels to the quartile ranges for exact measurment.
if visualize:
g = sns.boxplot(data=boxplot_data)
g.set(
title='Relationship Analysis',
xlabel='Features',
ylabel='Numerical Values',
xticklabels=boxplot_xtick_labels
)
plt.xticks(rotation=-10)
description_table = pd.concat(descriptions, axis=1)
return description_table
def compareRelationship(self, to_analyze1, to_analyze2, visualize=False):
""" Compare and contrast the difference between two Series.
By comparing boxplots, you should be able to determine which indices are related.
Parameters
----------
to_analyze1 : list[str, Series]
- A list that contains the name of the first table, and the contents of a specifc column from that table as a Series.
to_analyze2 : list[str, Series]
- A list that contains the name of the second table, and the contents of a specifc column from that table as a Series.
Returns
--------
DataFrame
- Table that contains basic stats about each given Series, as well as a third column that contains the difference between the stats.
Examples
--------
(link juptyer notebook)
"""
descriptions = []
boxplot_data = []
boxplot_xtick_labels = []
new_name = to_analyze1[1].name + ' from ' + to_analyze1[0]
description1 = to_analyze1[1].describe().rename(new_name)
descriptions.append(description1)
boxplot_data.append(to_analyze1[1])
boxplot_xtick_labels.append(new_name)
new_name = to_analyze2[1].name + ' from ' + to_analyze2[0]
description2 = to_analyze2[1].describe().rename(new_name)
descriptions.append(description2)
boxplot_data.append(to_analyze2[1])
boxplot_xtick_labels.append(new_name)
if visualize:
g = sns.boxplot(data=boxplot_data)
g.set(
title='Relationship Analysis',
xlabel='Features',
ylabel='Numerical Values',
xticklabels=boxplot_xtick_labels
)
plt.xticks(rotation=-10)
diff_description = abs(description1 - description2)
diff_description.name = "Difference"
descriptions.append(diff_description)
description_table = pd.concat(descriptions, axis=1)
return description_table
def export(self, df_to_export, write_to, export_type="CSV"):
""" Exports contents of dataframe to relative location specified by write_to parameter.
- Default export type is CSV
Parameters
----------
df_to_export : DataFrame
- DataFrame whose contents will be exported into a specifed location.
write_to : str
- Relative location (including file) that you will write into.
export_type : str
- Format that contents of df_to_export will be exported as.
Returns
--------
None
Examples
--------
(link juptyer notebook)
"""
if export_type is "CSV":
df_to_export.to_csv(write_to, encoding='utf-8', index=True, index_label=df_to_export.index.name)
else:
print('implemnt sql format')
# to implement later
# featureSearch should return a dictionary, where key is the index and value is the name of the feature
# this makes it easier for people to select the feature they want
# search function should also have an 'is_exact' option, to make search more precise.
# check if a lower case letter surrounds either side of the keyword, implies that it is an interjection
# create a function that let's you index into a python list with another list. useful for selecting many names at once
# from featureSearch result
# format output of lists better... can actually use DataFrame for formatting
# Print out "You have found the following column names/table names/etc." to make it easier for people to
# understand what they are seeing.
|
import unittest
from flask import session
from flask.globals import request
from flask_login import current_user
from app import create_app
from app.models import User, db
# data for register / login
FULL_NAME = 'Gibran Abdillah'
USERNAME = 'hadehbang'
PASSWORD = 'cumansegini'
EMAIL = 'gatauiniisiapa@gmail.com'
class TestClient(unittest.TestCase):
def setUp(self) -> None:
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_client = self.app.test_client()
self.app_context.push()
db.create_all(app=self.app)
def tearDown(self) -> None:
self.app_context.pop()
def test_main_route(self):
response = self.app_client.get('/')
self.assertEqual(200, response.status_code)
def auth_register(self):
response = self.app_client.post('/auth/register',
data={
'name':FULL_NAME,
'username':USERNAME,
'email':EMAIL,
'password':PASSWORD,
'password_confirmation':PASSWORD,
'submit':'Submit'
}, follow_redirects=True)
return response
def auth_login(self, username, password, r='response'):
with self.app_client as client:
response = client.post('/auth/login',
data={
'username':username,
'password':password
}, follow_redirects=True
)
if r == 'cookies':
return response.request.cookies.items()
return response
def test_auth_register_and_login(self):
resp_register = self.auth_register()
self.assertEqual(resp_register.request.path, '/auth/login')
resp_login = self.auth_login(USERNAME,PASSWORD)
self.assertEqual(resp_login.request.path, '/dashboard/welcome')
def test_auth_redirects(self):
response = self.app_client.get('/auth', follow_redirects=True)
self.assertEqual(response.request.path, '/')
def test_404_page(self):
response = self.app_client.get('/asdsadhsadasdf')
self.assertEqual(404, response.status_code)
def test_unaunthenticated_dashboard(self):
response = self.app_client.get('/dashboard/welcome', follow_redirects=True)
self.assertTrue(response.request.path != '/dashboard/welcome')
def test_auth_wrong_password(self):
response = self.auth_login(username=USERNAME, password='invalidpassword')
self.assertTrue(response.request.path != '/dashboard/welcome')
def test_api_user(self):
response = self.app_client.get('/api/users')
self.assertTrue(response.is_json)
def test_api_blogs(self):
response = self.app_client.get('/api/blogs')
self.assertTrue(response.is_json)
def test_api_reset_password(self):
response = self.app_client.get('/api/reset-password')
self.assertTrue(response.is_json)
def test_api_recent_posts(self):
response = self.app_client.get('/api/recent-blogs')
self.assertTrue(response.is_json)
def create_user(self):
u = User(full_name='Asede hadeh', username='admin', email='gatauisiap@asd.com')
u.set_password('admin')
u.is_admin = True
u.save()
def test_dashboard_admin(self):
self.create_user()
login = self.auth_login('admin', 'admin', 'cookies')
with self.app_client as client:
for keys, values in login:
client.set_cookie(server_name='localhost', key=keys, value=values)
response = client.get('/dashboard/add-user', follow_redirects=True)
# when not admin access /dashboard/add-user, it will redirect to homepage
self.assertTrue(response.request.path != '/dashboard/welcome')
def test_not_valid_token(self):
c = User.check_reset_token('asdasdasd')
self.assertIsNone(c)
def get_token(self):
t = User.query.all()[0].get_reset_token()
return t
def test_get_token(self):
self.assertTrue(
type(self.get_token()) == str
)
def test_valid_token(self):
token = self.get_token()
self.assertTrue(User.check_reset_token(token))
|
from .engine import TemplateEngine, Template
from .nodes import UrlNode, TextNode, StaticNode
from .ast import replace_on_tree
class EngineExtension:
def before_compile(self, engine: TemplateEngine, template: Template):
pass
class ViboraNodes(EngineExtension):
def __init__(self, app):
super().__init__()
self.app = app
def before_prepare(self, engine: TemplateEngine, template: Template):
"""
Handling specific Vibora nodes.
The template engine is not tied to Vibora, the integration is done through extensions.
:param engine:
:param template:
:return:
"""
replace_on_tree(lambda node: isinstance(node, UrlNode),
lambda node: TextNode(self.app.url_for(node.url)),
current_node=template.ast)
def replace_static(node):
if not self.app.static:
msg = 'Please configure a static handler before using a {% static %} tag in your templates.'
raise NotImplementedError(msg)
url = self.app.static.url_for(node.url)
return TextNode(url)
replace_on_tree(lambda x: isinstance(x, StaticNode), replace_static, current_node=template.ast)
|
from phone.phone_interface import PhoneInterface
from util import constants
def read_input(input_file, phone: PhoneInterface) -> str:
input_text = ''
output_text = []
use_phone = {
constants.PRESS_BUTTON_CALL: phone.press_button_call(),
constants.PRESS_BUTTON_DISMISS: phone.press_button_dismiss(),
constants.FLAG_AVATAR_DISPLAYED: phone.flag_avatar_displayed(),
constants.FLAG_POPUP_NO_NETWORK: phone.flag_popup_no_network(),
constants.FLAG_POPUP_CALL_DISMISSED: phone.flag_popup_call_dismissed(),
constants.FLAG_POPUP_ENDING_CALL: phone.flag_popup_ending_call(),
}
with open(input_file, 'r') as i:
input_text = i.read()
for input_line in input_text.splitlines():
next_entry = input_line.lower()
if next_entry in use_phone:
output_text.append(use_phone[next_entry])
else:
output_text.append(constants.NONEXISTENT_INPUT)
output_text.append('\n')
return ''.join(output_text)
def write_output(output_file, output_text) -> None:
with open(output_file, 'w') as o:
o.truncate(0)
o.write(output_text)
|
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@contact:yongguiluo@hotmail.com
@file: flask_wrapper.py
@time: 2019/3/8 18:20
"""
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'hello123'
if __name__ == '__main__':
app.run()
|
"""
Credited to https://stackoverflow.com/a/41318195, https://apple.stackexchange.com/a/115373
"""
import os
import subprocess
CMD = """
on run argv
display notification (item 2 of argv) with title (item 1 of argv)
end run
"""
async def notify(title, text='Restocked!! Go and check', sound='Funk'):
subprocess.call(['osascript', '-e', CMD, title, text])
os.system(f'say "Check notification!"')
os.system(f'afplay /System/Library/Sounds/{sound}.aiff')
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", help="directory of relax running", type=str, default="matflow-running")
args = parser.parse_args()
# making traj: initial and current
os.chdir(args.directory)
dirs_int = []
for i in os.listdir():
if i.isdecimal() == True:
dirs_int.append(int(i))
total_images = max(dirs_int) + 1 # including initial and final
for i in range(total_images):
if i == 0 or i == (total_images-1):
os.system("sflow convert -i %.2d/POSCAR -o %.2d/POSCAR.xyz" % (i, i))
else:
os.system("sflow convert -i %.2d/POSCAR -o %.2d/POSCAR.xyz" % (i, i))
os.system("sflow convert -i %.2d/CONTCAR -o %.2d/CONTCAR.xyz" % (i, i))
os.system("mkdir -p post-processing")
for i in range(total_images):
# initial traj
if i == 0:
os.system("cat %.2d/POSCAR.xyz > post-processing/traj-initial.xyz" % (i))
else:
os.system("cat %.2d/POSCAR.xyz >> post-processing/traj-initial.xyz" % (i))
# current traj
if i == 0:
os.system("cat %.2d/POSCAR.xyz > post-processing/traj-current.xyz" % (i))
elif i == (total_images-1):
os.system("cat %.2d/POSCAR.xyz >> post-processing/traj-current.xyz" % (i))
else:
os.system("cat %.2d/CONTCAR.xyz >> post-processing/traj-current.xyz" % (i))
# end making the traj
# build the energy barrier plot
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as spinterpolate
os.system("nebresults.pl")
neb_dat = np.loadtxt("neb.dat")
# style: linear spline
fun_dat = spinterpolate.interp1d(neb_dat[:, 1], neb_dat[:, 2], kind="linear")
denser_x = np.linspace(neb_dat[:, 1].min(), neb_dat[:, 1].max(), 30*len(neb_dat))
plt.plot(denser_x, fun_dat(denser_x))
plt.scatter(neb_dat[:, 1], neb_dat[:, 2], marker="o")
plt.xlabel("Reaction Coordinate (Angstrom)")
plt.ylabel("Energy (eV)")
plt.savefig("post-processing/mep-style-linear-spline.png")
plt.close()
# style: slinear
fun_dat = spinterpolate.interp1d(neb_dat[:, 1], neb_dat[:, 2], kind="slinear")
denser_x = np.linspace(neb_dat[:, 1].min(), neb_dat[:, 1].max(), 30*len(neb_dat))
plt.plot(denser_x, fun_dat(denser_x))
plt.scatter(neb_dat[:, 1], neb_dat[:, 2], marker="o")
plt.xlabel("Reaction Coordinate (Angstrom)")
plt.ylabel("Energy (eV)")
plt.savefig("post-processing/mep-style-slinear-spline.png")
plt.close()
# style: cubic spline
fun_dat = spinterpolate.interp1d(neb_dat[:, 1], neb_dat[:, 2], kind="quadratic")
denser_x = np.linspace(neb_dat[:, 1].min(), neb_dat[:, 1].max(), 30*len(neb_dat))
plt.plot(denser_x, fun_dat(denser_x))
plt.scatter(neb_dat[:, 1], neb_dat[:, 2], marker="o")
plt.xlabel("Reaction Coordinate (Angstrom)")
plt.ylabel("Energy (eV)")
plt.savefig("post-processing/mep-style-quadratic-spline.png")
plt.close()
# style: cubic spline
fun_dat = spinterpolate.interp1d(neb_dat[:, 1], neb_dat[:, 2], kind="cubic")
denser_x = np.linspace(neb_dat[:, 1].min(), neb_dat[:, 1].max(), 30*len(neb_dat))
plt.plot(denser_x, fun_dat(denser_x))
plt.scatter(neb_dat[:, 1], neb_dat[:, 2], marker="o")
plt.xlabel("Reaction Coordinate (Angstrom)")
plt.ylabel("Energy (eV)")
plt.savefig("post-processing/mep-style-cubic-spline.png")
plt.close()
# style: 5 order spline
fun_dat = spinterpolate.interp1d(neb_dat[:, 1], neb_dat[:, 2], kind=5)
denser_x = np.linspace(neb_dat[:, 1].min(), neb_dat[:, 1].max(), 30*len(neb_dat))
plt.plot(denser_x, fun_dat(denser_x))
plt.scatter(neb_dat[:, 1], neb_dat[:, 2], marker="o")
plt.xlabel("Reaction Coordinate (Angstrom)")
plt.ylabel("Energy (eV)")
plt.savefig("post-processing/mep-style-5-order-spline.png")
plt.close()
# style: KroghInterpolator
fun_dat = spinterpolate.KroghInterpolator(neb_dat[:, 1], neb_dat[:, 2])
denser_x = np.linspace(neb_dat[:, 1].min(), neb_dat[:, 1].max(), 30*len(neb_dat))
plt.plot(denser_x, fun_dat(denser_x))
plt.scatter(neb_dat[:, 1], neb_dat[:, 2], marker="o")
plt.xlabel("Reaction Coordinate (Angstrom)")
plt.ylabel("Energy (eV)")
plt.savefig("post-processing/mep-style-kroghinterpolator.png")
plt.close()
if __name__ == "__main__":
main()
|
from abc import ABC, abstractmethod
class AbstractClassExample(ABC):
@abstractmethod
def do_something(self):
print("wilson: from super {}".format(type(self).__name__))
print("Some implementation!")
class AnotherSubclass(AbstractClassExample):
def do_something(self):
print("wilson from sub: {}".format(type(self).__name__))
super().do_something()
print("The enrichment from AnotherSubclass")
x = AnotherSubclass()
x.do_something()
|
# $Id:#
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILEFI')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_025pre2'),
annotation = cms.untracked.string('RECOSIM'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/relval/CMSSW_4_2_3/RelValProdTTbar/GEN-SIM-RECO/MC_42_V12-v2/0062/82A88DE4-FB7A-E011-B69E-001A928116CC.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*", "drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'MC_44_V5::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillRECOSIM_cfi")
process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
process.bambu_step = cms.Path(process.BambuFillRECOSIM)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
|
from numpy import arange
from scipy.interpolate import interp1d
class MakeWaveformSeries(object):
"""docstring for MakeWaveformSeries"""
def __init__(self, freqs, hptilde, hctilde, df=None):
"""
assuming uniformly spaced!
"""
self.f_min = freqs[0]
self.f_max = freqs[-1]
if df is None:
#TODO: Add option to interpolate to unitformly spaced?
self.flist_Hz = freqs
self.hptilde = hptilde
self.hctilde = hctilde
self.df = self.flist_Hz[1] - self.f_min
self.npts = len(self.flist_Hz)
else:
self.df = df
interp_hptilde = interp1d(freqs, hptilde)
interp_hctilde = interp1d(freqs, hctilde)
self.flist_Hz = arange(self.f_min, self.f_max, self.df)
self.hptilde = interp_hptilde(self.flist_Hz)
self.hctilde = interp_hctilde(self.flist_Hz)
self.npts = len(self.flist_Hz)
|
import importlib
import inspect
import pkgutil
from typing import Any, Iterator
import returns
def _classes_in_module(module: Any) -> Iterator[type]:
yield from (
klass
for _, klass in inspect.getmembers(module, inspect.isclass)
if klass.__module__.startswith(module.__name__) # noqa: WPS609
)
try:
module_path = module.__path__[0] # noqa: WPS609
except AttributeError:
return # it's not a package with submodules
packages = pkgutil.walk_packages([module_path])
for finder, name, _ in packages:
if not getattr(finder, 'path', '').startswith(module_path):
continue
yield from _classes_in_module(
importlib.import_module(
'{0.__name__}.{1}'.format(module, name),
),
)
def _has_slots(klass: type) -> bool:
return '__slots__' in klass.__dict__ or klass is object # noqa: WPS609
def test_slots_defined():
"""Ensures __slots__ isn't forgotten anywhere."""
classes_without_slots = {
klass
for klass in _classes_in_module(returns)
if not _has_slots(klass) and
all(map(_has_slots, klass.__bases__)) and # noqa: WPS609
not klass.__module__.startswith('returns.contrib') # noqa: WPS609
}
assert not classes_without_slots
|
# -*- coding: utf-8 -*
# Author: Jianghan LI
# File: li.py
# Create Date: 2017-02-03 10:09-10:11
class Solution(object):
def findPeakElement2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 1:
return 0
for i in range(len(nums) - 1):
if nums[i] > nums[i + 1]:
return i
return len(nums) - 1
def findPeakElement(self, nums):
nums.append(float('-inf'))
return next(i for i in range(len(nums) - 1) if nums[i] > nums[i + 1])
s = Solution()
print s.findPeakElement([1, 2, 3, 4])
# 算法就是简单的暴力算法,
# 最后一行本来想返回i+1的,本以为i==len-1
# 但是产生了 UnboundLocalError: local variable 'i' referenced before assignment
# 后来发现是因为len==2时候不会进入1==len-1,不会进入循环,i不会被赋值
# 后来使用了python的查找下一个的函数next
# 在数组尾部加入负无穷,也可以避免边界的查找
# I add -infinity to the end of nums in order to avoid all boundary checks.
# Then I simply find the first element which is bigger than its next one.
# https://discuss.leetcode.com/topic/77703/two-lines-force-brute-search
|
import queue
import threading
import numba
import numpy as np
from ..utils import log_sum_exp
@numba.jit(nogil=True)
def _ctc_loss_np(logits, targets, blank_idx=0):
"""
http://www.cs.toronto.edu/~graves/icml_2006.pdf
:param logits: numpy array, sequence_len * num_labels
:param targets: numpy array, target labels
:param blank: blank index
:return: loss (float), gradient (same shape as logits)
"""
targets_len = targets.shape[0]
prediction_len = logits.shape[0]
num_labels = logits.shape[1]
extended_targets_len = targets_len * 2 + 1
extended_targets = np.ones(extended_targets_len, dtype=np.int64) * blank_idx
extended_targets[1::2] = targets
# alpha and beta computation
# forward - alpha
log_alpha = np.zeros((extended_targets_len, prediction_len))
log_alpha[:] = -np.inf # numba bugfix instead of log_alpha.fill(-np.inf)
if prediction_len > 1 or extended_targets_len == 1:
log_alpha[0, 0] = logits[0, extended_targets[0]]
if extended_targets_len > 1:
log_alpha[1, 0] = logits[0, extended_targets[1]]
for t in range(1, prediction_len): # timesteps
start = max(0, extended_targets_len - 2 * (prediction_len - t))
end = min(t * 2 + 2, extended_targets_len)
log_alpha[start:end, t] = log_alpha[start:end, t - 1]
for j in range(start, end):
current_label = extended_targets[j]
if j > 0:
log_alpha[j, t] = log_sum_exp(log_alpha[j, t], log_alpha[j - 1, t - 1])
if current_label != blank_idx and j - 2 >= 0 and extended_targets[j - 2] != current_label:
log_alpha[j, t] = log_sum_exp(log_alpha[j, t], log_alpha[j - 2, t - 1])
log_alpha[j, t] += logits[t, current_label]
if extended_targets_len > 1:
loss_forward = log_sum_exp(log_alpha[extended_targets_len - 1, prediction_len - 1],
log_alpha[extended_targets_len - 2, prediction_len - 1])
else:
loss_forward = log_alpha[extended_targets_len - 1, prediction_len - 1]
# backward - beta
log_beta = np.zeros((extended_targets_len, prediction_len))
log_beta[:] = -np.inf # numba bugfix instead of log_beta.fill(-np.inf)
if prediction_len > 1 or extended_targets_len == 1:
log_beta[extended_targets_len - 1, prediction_len - 1] = 0
if extended_targets_len > 1:
log_beta[extended_targets_len - 2, prediction_len - 1] = 0
for t in range(prediction_len - 2, -1, -1): # timesteps
start = max(0, extended_targets_len - 2 * (prediction_len - t))
end = min(t * 2 + 2, extended_targets_len)
for j in range(start, end):
current_label = extended_targets[j]
log_beta[j, t] = log_beta[j, t + 1] + logits[t + 1, extended_targets[j]]
if j < extended_targets_len - 1:
log_beta[j, t] = log_sum_exp(log_beta[j, t],
log_beta[j + 1, t + 1] + logits[t + 1, extended_targets[j + 1]])
if (current_label != blank_idx
and j + 2 < extended_targets_len
and extended_targets[j + 2] != current_label):
log_beta[j, t] = log_sum_exp(log_beta[j, t], log_beta[j + 2, t + 1] + logits[
t + 1, extended_targets[j + 2]])
alpha_beta = log_alpha + log_beta
prob_sum = np.zeros((prediction_len, num_labels))
prob_sum[:] = -np.inf
for i in range(extended_targets_len):
current_label = extended_targets[i]
prob_sum[:, current_label] = log_sum_exp(prob_sum[:, current_label], alpha_beta[i, :])
negative_term = prob_sum - loss_forward
grad = np.exp(logits) - np.exp(negative_term)
return -loss_forward, grad
def _ctc_3d_loss_np(logits, targets, logits_lengths, targets_length, blank_idx=0):
batch_size = len(targets_length)
grads = np.zeros_like(logits)
losses = np.zeros(batch_size)
# parallel computation, threading - because gil is released with numba.jit(nogil=True)
# equivalent iterative computation is:
# for i in range(batch_size):
# loss, grad = _ctc_loss_np(logits[:logits_lengths[i], i, :], targets[i, :targets_length[i]], blank_idx)
# grads[:logits_lengths[i], i, :] = grad
# losses[i] = loss
que = queue.Queue()
threads = []
for i in range(batch_size):
t = threading.Thread(target=lambda q, i, *args: q.put((i, _ctc_loss_np(*args))),
args=(que, i, logits[i, :logits_lengths[i], :],
targets[i, :targets_length[i]], blank_idx))
threads.append(t)
t.start()
for t in threads:
t.join()
while not que.empty():
i, (loss, grad) = que.get()
grads[i, :logits_lengths[i], :] = grad
losses[i] = loss
return losses, grads
# in Function use:
# tensor_type = logits.dtype
# loss, grads = _ctc_3d_loss_np(logits.detach().cpu().numpy(), targets.cpu().numpy(),
# logits_lengths.cpu().numpy(), targets_lengths.cpu().numpy(), blank_idx)
# loss = torch.tensor(loss, dtype=tensor_type)
# grads = torch.tensor(grads, dtype=tensor_type) # save for backward not works!
# if logits.is_cuda:
# loss = loss.cuda(logits.get_device())
|
import argparse
import os
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--log_file', type=str, default='', help='path to log file')
parser.add_argument('--n', type=int, default=4800, help='how many in one iteration')
args=parser.parse_args()
log_file=args.log_file
N=args.n
log_file2='/tmp/nn_log.txt'
cmd_str='sed -n /neural/p '+log_file+' >'+log_file2
print(cmd_str)
os.system(cmd_str)
#print('stats from', log_file2)
res_dict={}
with open(log_file2, 'r') as f:
ite=0
cnt=0
s=0.0
sum_step=0
#print('#iteration average_winrate average_num_steps')
for line in f:
arr=line.split()
res=float(arr[6])
res=max(0,res)
sum_step +=int(arr[-1])
s +=res
cnt +=1
if cnt == N:
#print('%d %f %f'%(ite, s/cnt, sum_step/cnt))
res_dict[ite]=[s/cnt,sum_step/cnt]
cnt=0
ite +=1
s = 0.0
sum_step=0
os.system('rm '+log_file2)
log_file3='/tmp/nn_log_loss.txt'
cmd_str3='sed -n /Average/p '+log_file+' >'+log_file3
print(cmd_str3)
os.system(cmd_str3)
#print('stats from ', log_file3)
print('#iteration average_winrate average_num_steps average_loss')
with open(log_file3, 'r') as f:
#print('#iteration average_loss')
ite=0
for line in f:
arr=line.split()
res=float(arr[-1])
print('%d %f %f %f'%(ite, res_dict[ite][0], res_dict[ite][1], res))
ite +=1
os.system('rm '+log_file3)
|
def degrees_converter(celsius):
"""
Converts degrees to farenheit
"""
return celsius * 9/5 + 32
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: CAMELS
# language: python
# name: camels
# ---
# %%
import sys
# %%
sys.path
# %%
import camels_aus
# %%
import os
from camels_aus.repository import CamelsAus, download_camels_aus
# %% [markdown]
# `CamelsAus` is a repository that takes care of loading the data from disk and fives it access as a consolidated dataset to the user, using xarray for most data.
# %%
repo = CamelsAus()
# %% [markdown]
# `download_camels_aus` streamlines downloading and extracting the files making up CAMELS-AUS 1.0.
# %%
camels_dir = os.path.join(os.getenv("HOME"), 'data/camels/aus')
# %%
repo.load_from_text_files(camels_dir)
repo.data
# %%
import matplotlib
from ipywidgets import Output, HBox
from ipyleaflet_dashboard_tools.gv import *
import json
from ipyleaflet import Map, GeoJSON
# %%
# %%time
ds = repo.data
v = GeoViewer(ds, lat='lat_outlet', lon='long_outlet', key='station_id')
out = Output()
click_handler_plot_ts = v.mk_click_handler_plot_ts(out, variable="streamflow_mmd")
mapview = v.build_map(click_handler_plot_ts)
mapview.layout.height = '600px'
# %%
# %%time
gj = repo.boundaries.to_json()
d = json.loads(gj)
# %%
# %%time
geo_json = GeoJSON(data=d, style = {'color': 'Blue', 'opacity':1, 'weight':1.9, 'dashArray':'9', 'fillOpacity':0.1})
mapview.add_layer(geo_json)
# %%
HBox([mapview, out])
# %%
# %%
# %%
import requests
# %%
loci_mdb = "https://gds.loci.cat/geometry/geofabric2_1_1_awradrainagedivision/9400206?_format=application/json&_view=simplifiedgeom"
def random_color(feature):
return {
'color': 'black',
'fillColor': 'green',#random.choice(['red', 'yellow', 'green', 'orange']),
}
# %%
mdb = json.loads(requests.get(loci_mdb).text)
# %%
geo_json_mdb = GeoJSON(
data=mdb,
style={
'opacity': 1, 'dashArray': '9', 'fillOpacity': 0.1, 'weight': 1
},
hover_style={
'color': 'green', 'dashArray': '0', 'fillOpacity': 0.3
},
style_callback=random_color
)
# %%
mapview.add_layer(geo_json_mdb)
# m.add_layer(bbox_poly)
# m.add_layer(geo_json_stations)
# %%
from sidecar import Sidecar
# %%
from IPython.display import display, clear_output, HTML, JSON
# %%
sc = Sidecar(title='MDB sites')
with sc:
display(mapview)
# %%
# %%
get_cap = 'http://waterinfo1-cdc.it.csiro.au:8600/geoserver/wfs?service=wfs&version=2.0.0&request=GetCapabilities'
# %%
s = requests.get(get_cap).text
# %%
s
# %%
mdb = json.loads(requests.get(get_cap).text)
# %%
|
"""
This is basically Xingyi's code adapted for LVIS.
We only work with GT annotations. This script does not parse predictions.
"""
################################################################################
## Import. ##
################################################################################
# Import lvis
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import os
import sys
import cv2
import numpy as np
import pickle
import ipdb
np.random.seed(0) # Get the same colors for the same class every time.
################################################################################
## Define things. ##
################################################################################
DATA = 'coco'
OUT_TYPE = 'gt'
DEBUG_ = False
VIS_THR = 0.2
LABEL = False
OUT_NAME_SUFFIX = "{}_{}".format(OUT_TYPE, DATA)
IMG_PATH = '/scratch/cluster/ishann/data/lvis/val2017/'
OUT_PATH = '/scratch/cluster/ishann/data/cross/{}_{}_bboxes'.format(DATA, OUT_TYPE)
if not os.path.isdir(OUT_PATH): os.makedirs(OUT_PATH)
if DATA=='lvis':
ANN_PATH = '/scratch/cluster/ishann/data/lvis/annotations/lvis_v0.5_val.json'
elif DATA=='coco':
ANN_PATH = '/scratch/cluster/ishann/data/coco/annotations/instances_val2017.json'
else:
print("Gp home script, youre drunk.")
coco = coco.COCO(ANN_PATH)
COLOR = [ 78, 154, 6]
################################################################################
## Define functions. ##
################################################################################
def _coco_box_to_bbox(box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.int32)
return bbox
def add_box(image, bbox, sc):
BOX_OFFSET = 2
color = np.array(COLOR).astype(np.int32).tolist()
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
return image
################################################################################
## Define main. ##
################################################################################
def main():
print("\nDATA:{}\nOUT_TYPE:{}\n".format(DATA, OUT_TYPE))
dets = []
img_ids = coco.getImgIds()
num_images = len(img_ids)
# ipdb.set_trace()
for i, img_id in enumerate(img_ids):
if i%10==0: print("{}/{}".format(i, len(img_ids)), end="\r")
# ipdb.set_trace()
if DEBUG_ and i>DEBUG_: break
img_info = coco.loadImgs(ids=[img_id])[0]
if DATA=="lvis":
img_name = img_info['file_name']
elif DATA=="coco":
img_name = "COCO_val2014_"+img_info['file_name']
img_path = IMG_PATH + img_name
img = cv2.imread(img_path)
gt_ids = coco.getAnnIds(imgIds=[img_id])
gts = coco.loadAnns(gt_ids)
gt_img = img.copy()
for j, pred in enumerate(gts):
bbox = _coco_box_to_bbox(pred['bbox'])
gt_img = add_box(gt_img, bbox, 1.00)
img_name = '{}_{}.jpg'.format(str(img_id).zfill(12), OUT_NAME_SUFFIX )
cv2.imwrite(os.path.join(OUT_PATH, img_name), gt_img)
################################################################################
## Execute main. ##
################################################################################
if __name__ == '__main__':
main()
|
def encDec(messageDigest, hashDigest):
n = 0
xorMess = ""
for i in messageDigest:
if n == 7:
n = 0
rawBinMess = int(i, 2) ^ int(hashDigest[n], 2)
if rawBinMess <= 31 or rawBinMess == 127:
xorMess += " " + hex(rawBinMess) + " "
else:
xorMess += chr(rawBinMess)
n += 1
return xorMess
|
#coding: utf-8
from youey.util.webcolors import *
from youey.util.prop import *
class Color(list):
def __init__(self, *args, alpha=None):
value = False
if len(args) == 0:
value = [0, 0, 0, 1]
elif len(args) == 1:
arg = args[0]
if type(arg) is Color:
value = arg.copy()
elif type(arg) in (int, float):
value = (arg,)*3
elif type(arg) in (tuple, list):
value = list(args[0])
elif type(arg) is str:
if arg == 'transparent':
value = [0, 0, 0, 0]
elif arg.startswith('rgb'):
segments = arg[:-1].split('(')
value = [float(c) for c in segments[1].split(',')]
else:
rgb = html5_parse_legacy_color(arg)
value = [rgb.red, rgb.green, rgb.blue]
elif len(args) in [3,4]:
value = args
if len(value) == 3:
value = list(value)
value.append(alpha if alpha is not None else 1.0)
elif alpha is not None:
value[3] = alpha
if not all((component <= 1.0) for component in value):
for i in range(3):
value[i] /= 255.0
super().__init__(value)
@prop
def r(self, *args, base_prop):
if args:
self[0] = args[0]
else:
return self[0]
@prop
def g(self, *args, base_prop):
if args:
self[1] = args[0]
else:
return self[1]
@prop
def b(self, *args, base_prop):
if args:
self[2] = args[0]
else:
return self[2]
@prop
def a(self, *args, base_prop):
if args:
self[3] = args[0]
else:
return self[3]
red = r
green = g
blue = b
alpha = a
@prop
def ints(self, *args, base_prop):
if args:
self.css = args[0]
else:
return (int(self.r*255), int(self.g*255), int(self.b*255), self.a)
@prop
def css(self, *args, base_prop):
if args:
c = Color(args[0])
self.r = c.r
self.g = c.g
self.b = c.b
self.a = c.a
else:
return f'rgba({",".join([str(segment) for segment in self.ints])})'
@prop
def name(self, *args, base_prop):
if args:
self.css = args[0]
else:
try:
value = rgb_to_name(tuple(self.ints[:3]))
except ValueError:
value = None
return value
@prop
def hex(self, *args, base_prop):
if args:
self.css = args[0]
else:
try:
value = rgb_to_hex(tuple(self.ints[:3]))
except ValueError:
value = None
return value
@prop
def transparent(self, *args, base_prop):
if args:
self.alpha = 0.0 if args[0] else 1.0
else:
return self.alpha == 0
def contrast_color(self, low_brightness_color='black', high_brightness_color='white'):
r,g,b,a = self
return Color(low_brightness_color) if ((r * 255 * 299 + g * 255 * 587 + b * 255 * 114) / 1000) > 150 else Color(high_brightness_color)
def to_css_color(color, alpha=None):
if type(color) is Color:
return color.css
if type(color) is str:
return color
if type(color) == tuple and len(color) >= 3:
if alpha is None:
alpha = color[3] if len(color) == 4 else 1.0
if all((component <= 1.0) for component in color):
color_rgb = [int(component*255) for component in color[:3]]
color_rgb.append(color[3] if len(color) == 4 else 1.0)
color = tuple(color_rgb)
return f'rgba{str(color)}'
def from_css_color(css_color_str):
segments = css_color_str[:-1].split('(')
components = [float(c) for c in segments[1].split(',')]
if len(components) == 3:
components.append(1.0)
return components
|
#the urls should really go here.....but i can't do that atm the moment because i don't know how :/
from flask.ext.classy import FlaskView, route
from flask import request, current_app, redirect
@route("/test")
def testSomething():
return "hello world"
|
from appJar import gui
import sqlite3
'''
CREATE TABLE USUARIO(ID INTEGER PRIMARY KEY AUTOINCREMENT, NAME TEXT NOT NULL, EMAIL TEXT NOT NULL, TYPE TEXT NOT NULL, DEPARTMENT TEXT NOT NULL, PROGRAM TEXT NOT NULL, CAMPUS TEXT NOT NULL, PERIODICITY TEXT NOT NULL);
CREATE TABLE RSS(ID INT PRIMARY KEY NOT NULL, NAME TEXT NOT NULL, TYPE TEXT NOT NULL, KNOWLEDGE_AREA TEXT NOT NULl, DATABASE_URL TEXT NOT NULL);
CREATE TABLE RSS_DB(ID INT PRIMARY KEY, BDD TEXT NOT NULL,URL TEXT NOT NULL, FOREIGN KEY(BDD) REFERENCES RSS(ID))
CREATE TABLE USUARIO_RSS(USUARIO_ID INT , RSS_ID INT, FOREIGN KEY(USUARIO_ID) REFERENCES USUARIO(ID));
'''
def reOrganize(var):
print app.getOptionBox(var)
def generate_reports():
print "Generando Reportes"
def menuPress(mn):
if mn == "Generar Reportes":
generate_reports()
else:
conn.close()
app.stop()
def usuarioPress(mn):
values = [app.getEntry('usuario_name'), app.getEntry('usuario_email'), app.getOptionBox('Tipo'), app.getOptionBox('Campus'), app.getOptionBox('Departamento'), app.getOptionBox('Programa'), app.getOptionBox('Periodicidad')]
if mn == "Guardar":
c.execute('INSERT INTO USUARIO(NAME, EMAIL, TYPE, DEPARTMENT, PROGRAM, CAMPUS, PERIODICITY) VALUES (?, ?, ?, ?, ?, ?, ?)', values)
conn.commit()
print "Guardando Usuario"
elif mn == "Editar":
values.append('1')
c.execute('UPDATE USUARIO NAME=?, EMAIL=?, TYPE=?, DEPARTMENT=?, PROGRAM =?, CAMPUS=?, PERIODICY=? WHERE ID=?', values)
conn.commit()
print "Editando Usuario"
else:
id_usuario=1
c.execute('DELETE FROM USUARIO WHERE ID=?', id_usuario)
conn.commit()
print "Eliminando Usuario"
def rssPress(mn):
if mn == "Guardar":
print "Guardando RSS"
else:
print "Eliminando RSS"
conn = sqlite3.connect('example.db')
c = conn.cursor()
app = gui('Portal de Informacion')
app.addMenuList('File', ['Generar Reportes','-', 'Close'], menuPress)
app.addMenuList('Usuario', ['Guardar', 'Editar', 'Eliminar'], usuarioPress)
app.addMenuList('RSS', ['Guardar', 'Eliminar'], rssPress)
app.addLabel('usuarios', 'Usuarios', 0, 0, 4)
app.setLabelBg('usuarios', 'red')
app.addLabel('usuario_name', 'Nombre de Usuario', 1, 0, 1)
app.addEntry('usuario_name', 1, 1, 1)
app.addLabel('usuario_email', 'Correo', 1, 2, 1)
app.addEntry('usuario_email', 1, 3, 1)
app.addLabelOptionBox('Tipo', ['Maestro', 'Estudiante', 'Administrativo'], 2, 0, 2)
app.addLabelOptionBox('Campus', ['Mexicali', 'Tijuana', 'Ensenada'], 2, 2, 2)
app.addLabelOptionBox('Departamento', ['CIENCIAS SOCIALES', 'DERECHO', 'PSICOLOGIA', 'INGENIERIA', 'ADMINISTRACION Y NEGOCIO', 'ADMINISTRATIVO', 'SOCIEDAD', 'TECNOLOGIA', 'PROPIEDAD INTELECTUAL', 'CULTURA', 'INVESTIGACION'], 3, 0, 2)
app.addLabelOptionBox('Programa', ['LICENCIATURA EN CONTADOR PUBLICO', 'DOCTORADO EN ADMINISTRACION', 'DOCTADORADO EN INGENIERIA', 'DOCTADORADO EN EDUCACION', 'DOCTORADO EN PSICOLOGIA', 'INGENIERIA EN CIENCIAS COMPUTACIONALES', 'INGENIERIA EN CIBERNETICA ELECTRONICA', 'INGENIERIA EN DISENO GRAFICO DIGITAL', 'INGENIERIA EN ENERGIAS RENOVABLES', 'INGENIERIA INDUSTRIAL', 'INGENIERIA MECANICA', 'INGENIERIA MECATRONICA', 'INGENIERIA DE SOFTWARE', 'LICENCIATURA EN ADMINISTRACION DE EMPRESAS', 'LICENCIATURA EN ADMINISTRACION DE MERCADOTECNIA', 'LICENCIATURA EN ADMINISTRACION DE NEGOCIOS', 'LICENCIATURA EN DERECHO', 'LICENCIATURA EN DISENO GRAFICO', 'PSICOLOGIA INFANTIL', 'LICENCIATURA EN NEGOCIOS INTERNACINALES', 'PSICOLOGIA CLINICA', 'PSICOLOGIA EDUCATIVA', 'PSICOLOGIA ORGANIZACIONAL', 'PREPARATORIA'], 3, 2, 2)
app.setOptionBoxChangeFunction('Tipo', reOrganize)
app.addLabel('id_rss', 'ID RSS', 4, 0, 1)
app.addEntry('id_rss', 4, 2, 3)
app.addLabelOptionBox('Periodicidad', ['1 Semana', '2 Semanas', '1 mes', '2 meses', '3 meses'], 5, 0, 4)
app.addLabel('rss_bd', 'RSSs', 7, 0, 4)
app.setLabelBg('rss_bd', 'blue')
app.addLabel('rss_name', 'Nombre RSS', 8, 0, 1)
app.addEntry('rss_name', 8, 1,1)
app.addLabelOptionBox('Area_Conocimiento', ['DERECHO', 'PSICOLOGIA', 'EDUCACION', 'FILOSOFIA', 'CS. SOCIALES', 'INGENIERIA', 'CS. COMPUTACIONALES', 'ADMINISTRACION Y NEGOCIOS', 'OTROS'], 8, 2, 2)
app.go()
|
from is_core.auth.permissions import BasePermission
class RelatedCoreAllowed(BasePermission):
"""
Grant permission if core permission with the name grants access
"""
name = None
def __init__(self, name=None):
super().__init__()
if name:
self.name = name
def has_permission(self, name, request, view, obj=None):
return (
view.related_core.permission.has_permission(self.name or name, request, view, obj)
if view.related_core else True
)
|
from .alpha_spheres import get_alpha_spheres_set
from . import clustering as clustering
|
import pytest
from feeds.external_api.groups import (
get_user_groups,
get_group_names,
validate_group_id
)
from feeds.exceptions import GroupsError
def test_validate_group_id_valid(mock_valid_group):
g = "a_group"
mock_valid_group(g)
assert validate_group_id(g)
def test_validate_group_id_invalid(mock_invalid_group):
g = "another_group"
mock_invalid_group(g)
assert validate_group_id(g) is False
def test_validate_group_id_fail(mock_network_error):
with pytest.raises(GroupsError) as e:
validate_group_id("fail")
assert "Unable to fetch group information" in str(e)
def test_get_user_groups(mock_user_groups):
dummy_ret = [{"id": "g1", "name": "Group Name"}]
mock_user_groups(dummy_ret)
groups = get_user_groups("some_token")
assert groups == dummy_ret
def test_get_user_groups_fail(mock_network_error):
with pytest.raises(GroupsError) as e:
get_user_groups("some_token")
assert "Unable to fetch group information" in str(e)
def test_get_group_names(mock_group_names):
dummy_ret = [
{"id": "g1", "name": "Group 1"},
{"id": "g2", "name": "Group 2"}
]
std = {"g1": "Group 1", "g2": "Group 2"}
mock_group_names(dummy_ret)
names = get_group_names(["g1", "g2"], "some_token")
for n in ["g1", "g2"]:
assert n in names
assert names[n] == std[n]
|
from setuptools import setup
setup(name='local_factorySim',
version='0.2dev0',
packages=['factorySim',],
install_requires=['gym', 'pycairo', 'pandas', 'fabulous', 'Polygon3',]
)
|
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from main.models import Contest, Problem, Submission
class ContestAdmin(ModelAdmin):
list_display = ("ccode", "title", "can_view", "can_submit")
class ProblemAdmin(ModelAdmin):
list_display = ("pcode", "title", "contest")
class SubmissionAdmin(ModelAdmin):
list_display = ("id", "problem", "user", "lang", "get_status_str")
admin.site.register(Contest, ContestAdmin)
admin.site.register(Problem, ProblemAdmin)
admin.site.register(Submission, SubmissionAdmin)
|
print(f'\033[33m{"—"*30:^30}\033[m')
print(f'\033[36m{"EXERCÍCIO Nº 2":^30}\033[m')
print(f'\033[33m{"—"*30:^30}\033[m')
print("Confirme sua data de nascimento")
dia = input("Dia: ")
mes = input("Mês: ")
ano = input("Ano: ")
print(f"Você nasceu no dia {dia} de {mes} de {ano}. Correto?")
|
import random
import sys
import uuid
import datetime
from confluent_kafka import Producer
from faker import Faker
fake = Faker('en_US')
if __name__ == '__main__':
if len(sys.argv) != 4:
sys.stderr.write('Usage: %s <bootstrap-brokers> <topic> <num msgs>\n' % sys.argv[0])
sys.exit(1)
broker = sys.argv[1]
topic = sys.argv[2]
total_msg = sys.argv[3]
# Producer configuration
# See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
conf = {'bootstrap.servers': broker}
# Create Producer instance
p = Producer(**conf)
# Optional per-message delivery callback (triggered by poll() or flush())
# when a message has been successfully delivered or permanently
# failed delivery (after retries).
def delivery_callback(err, msg):
if err:
sys.stderr.write('%% Message failed delivery: %s\n' % err)
else:
pass
#sys.stderr.write('%% Message delivered to %s [%d] @ %d\n' %
# (msg.topic(), msg.partition(), msg.offset()))
# Read lines from stdin, produce each line to Kafka
num_msg = int(total_msg)
# Get current start date to measure data generation time
ct1 = datetime.datetime.today()
print("Start time:", ct1)
# Initialize variables
line1=0
# Generate Packet
fake_data = {}
for i in range(0, num_msg):
gender = fake.random_element(elements=('M', 'F'))
if(gender == 'M'):
first_name = fake.first_name_male()
else:
first_name = fake.first_name_female()
# fake_data={}
customer_id = str(uuid.uuid4())
first_name = first_name
last_name = fake.last_name()
address = fake.street_address()
city = fake.city()
state = fake.state_abbr()
zip = fake.zipcode()
email = fake.ascii_free_email()
gender = gender
pet_type = fake.random_element(elements=('Dog', 'Fish', 'Cat', 'Reptile', 'Ferret', 'Hamster', 'Bird', 'Frog', 'Rabbit', 'Spider'))
last_visit = fake.past_date().strftime("%Y%m%d")
last_store = fake.random_element(elements=('East Fabian', 'Gerholdchester', 'Wintontown', 'New Candishaven', 'Roobfurt', 'Simonemouth'))
number_items_purchased = str(fake.random_int(min=1, max=30))
lifetime_purchase_amount = str(random.uniform(1.0, 100.8))
grooming_services = str(fake.boolean())
organic_food = str(fake.boolean())
vet_referral = str(fake.boolean())
# debug
# print(col_data)
# Generate the payload
line = '{ "schema": { "type": "struct", "fields": [ { "type": "string", "optional": false, "field": "id" }, ' + \
'{ "type": "string", "optional": false, "field": "first_name" }, { "type": "string", "optional": false, "field": "last_name" }, ' + \
'{ "type": "string", "optional": false, "field": "address" }, { "type": "string", "optional": false, "field": "city" }, ' + \
'{ "type": "string", "optional": false, "field": "state" }, { "type": "string", "optional": false, "field": "zip" }, ' + \
'{ "type": "string", "optional": false, "field": "email" }, { "type": "string", "optional": false, "field": "gender" }, ' + \
'{ "type": "string", "optional": false, "field": "pet_type" }, { "type": "string", "optional": false, "field": "last_visit" }, ' + \
'{ "type": "string", "optional": false, "field": "last_store" }, { "type": "int32", "optional": false, "field": "number_items_purchased" }, ' + \
'{ "type": "float", "optional": false, "field": "lifetime_purchase_amount" }, { "type": "string", "optional": false, "field": "grooming_services" }, ' + \
'{ "type": "string", "optional": false, "field": "organic_food" }, { "type": "string", "optional": false, "field": "vet_referral" } ] }, ' + \
'"payload": { "id": "' + customer_id + '", "first_name": "' + first_name + '", "last_name": "' + last_name + '", ' + \
'"address": "' + address + '", "city": "' + city + '", "state": "' + state + '", ' + \
'"zip": "' + zip + '", "email": "' + email + '", "gender": "' + gender + '", "pet_type": "' + pet_type + '", ' + \
'"last_visit": "' + last_visit + '", "last_store": "' + last_store + '", "number_items_purchased": ' + number_items_purchased + \
', "lifetime_purchase_amount": ' + lifetime_purchase_amount + ', "grooming_services": "' + grooming_services + '", ' + \
'"organic_food": "' + organic_food + '", "vet_referral": "' + vet_referral + '" } }'
try:
# Produce line (without newline)
p.produce(topic, line.rstrip(), callback=delivery_callback)
except BufferError:
pass
p.poll(0)
print(str(i) + " " + customer_id)
ct2 = datetime.datetime.today()
print("Start time:", ct1)
print("End time:", ct2)
print("Total time taken to produce message:", ct2-ct1)
# Wait until all messages have been delivered
sys.stderr.write('%% Waiting for %d deliveries\n' % len(p))
p.flush()
|
"""
Given an array arr of 4 digits, find the latest 24-hour time that can be made using each digit exactly once.
24-hour times are formatted as "HH:MM", where HH is between 00 and 23, and MM is between 00 and 59. The earliest 24-hour time is 00:00, and the latest is 23:59.
Return the latest 24-hour time in "HH:MM" format. If no valid time can be made, return an empty string.
Example 1:
Input: A = [1,2,3,4]
Output: "23:41"
Explanation: The valid 24-hour times are "12:34", "12:43", "13:24", "13:42", "14:23", "14:32", "21:34", "21:43", "23:14", and "23:41". Of these times, "23:41" is the latest.
Example 2:
Input: A = [5,5,5,5]
Output: ""
Explanation: There are no valid 24-hour times as "55:55" is not valid.
Example 3:
Input: A = [0,0,0,0]
Output: "00:00"
Example 4:
Input: A = [0,0,1,0]
Output: "10:00"
Constraints:
arr.length == 4
0 <= arr[i] <= 9
"""
class Solution:
def largestTimeFromDigits(self, A: List[int]) -> str:
largest = None
for a in itertools.permutations(A):
a = ''.join((str(i) for i in a))
if int(a[:2]) >= 24 or int(a[2:]) >= 60:
continue
if a == "2400":
continue
if largest is None or int(largest) < int(a):
largest = a
if largest is None:
return ""
return "{}:{}".format(largest[:2], largest[2:])
|
from django.core.management import BaseCommand
from django.utils.timezone import now
from django_freeradius.models import RadiusBatch
class Command(BaseCommand):
help = "Deactivating users added in batches which have expired"
def handle(self, *args, **options):
radbatches = RadiusBatch.objects.filter(expiration_date__lt=now())
for batch in radbatches:
batch.expire()
self.stdout.write('Deactivated users of batches expired')
|
# Author
# Pedro Silva
import configparser
import argparse
import nexmo
import pandas
class Notify(object):
"""
Notify contacts
Allows for pushing sms information to a set of contatct people.
It is capable of populating the list of clients by
parsing the contents of an excel file
Attributes:
nexmo (obj) : provider's object
api_key (str) : Nexmo's API KEY
api_secret (str) : Nexmo's API secret
sender (str) : Sender's name
fromaddr (str) : email's sender
smtp_pwd (str) : email's user password
smtp_server (str) : smtp server location
smtp_port (int) : smtp port
clients ([]) : list of clients
"""
def __init__(self, sender, api_key, api_secret,
fromaddr, smtp_pwd, smtp_server, smtp_port):
super(Notify, self).__init__()
self.nexmo = nexmo.Client(key=api_key, secret=api_secret)
self.api_key = api_key
self.api_secret = api_secret
self.sender = sender
self.fromaddr = fromaddr
self.smtp_pwd = smtp_pwd
self.smtp_server =smtp_server
self.smtp_port = smtp_port
self.clients = list()
def push_sms(self, content):
"""
Pushes sms to contact list
Args:
content (str) : SMS content
"""
if not self.clients:
return
for client in self.clients:
response = self.nexmo.send_message({'from': self.sender,
'to': '{0}'.format(client.phone),
'text': content})
client.sms = response
def build_release_content(self, title, body):
"""
Builds an sms based on the contents of the .ini file
Your password for TITLE is BODY
Args:
title (str) : sms title
body (str) : sms body
"""
return 'Your password for {0} is {1}'.format(title, body)
def build_simple_sms(self, content):
"""
Builds an sms based on the contents of the .ini file
Args:
content (str) : sms content
"""
return '{0}'.format(content)
def send_email_confirmation(self,
subject,
in_success,
in_error="We couldn't reach you by SMS, please get in touch with us!"):
"""
Send confirmation email
Loops through the SMS answers and sends out an email
Args:
subject (str) : email's subject
in_success (str) : email's body if SMS status is OK
in_error (str) : email's body if SMS status is NOK
"""
import smtplib
from email.message import EmailMessage
# Send the message via a remote SMTP server
server = smtplib.SMTP(self.smtp_server, self.smtp_port)
server.ehlo()
server.starttls()
server.login(self.fromaddr, self.smtp_pwd)
if self.clients:
for client in self.clients:
msg = EmailMessage()
msg['Subject'] = subject
msg['From'] = self.fromaddr
if client.sms is None:
body = in_success
elif client.sms['messages'][0]['status'] == 0:
body = in_success
else:
body = in_error
msg['To'] = client.email
msg.set_content(body)
server.send_message(msg)
server.quit()
def parser(self, filepath):
"""
Parse xlsx
Reads an excel list with the following format
First Name | Surname | Phone
Creates and stores an object representation for each client
Args:
filepath (str) : filepath for xls file
"""
wb = pandas.read_excel(filepath)
for idx, row in wb.iterrows():
try:
self.clients.append(Client(row['Name'],
row['Surname'],
row['Phone'],
row['Email']))
except:
raise KeyError('Please check xlsx content')
def print_response(self):
"""
Print Responses
Informs the user about what happened with the
latest messages that where sent
"""
for client in self.clients:
if client.sms:
response = client.sms['messages']
print('{{{{"client", "{0}"}},"status":{1}}}'.format(client, response))
else:
print('{{{{"client", "{0}"}},"status":{1}}}'.format(client, None))
class Client(object):
"""Creates a client object for each contact
Attributes:
firstname (str) : Contact's firstname
surname (str) : Contact's surname
phone (str): Contact's phonenumber
"""
def __init__(self, firstname, surname, phone, email=None):
super(Client, self).__init__()
self.firstname = firstname
self.surname = surname
self.phone = phone
self.email = email
self.sms = None
def __str__(self):
return ('{0} {1} {2}'.format(self.firstname,
self.surname,
self.phone,
self.email))
def __repr__(self):
return self.__str__()
def user_inputs():
parser = argparse.ArgumentParser()
parser.add_argument('--configuration', default='details.ini', type=str)
parser.add_argument('--destination', default=None, type=str)
parser.add_argument('--email', default=None, type=str)
parser.add_argument('--skip_sms', default=None, action='store_true')
args = parser.parse_args()
return args
if __name__ == "__main__":
# parse user input
args = user_inputs()
# reads necessary details
config = configparser.ConfigParser()
config.read(args.configuration)
# validate fields
try:
sender = config.get('SMS','SENDER')
api_key = config.get('NEXMO','API_KEY')
api_secret = config.get('NEXMO','API_SECRET')
except:
raise KeyError('Missing NEXMO details')
try:
destination = config.get('SMS','DESTINATION')
except:
if args.destination is None:
raise KeyError('Please enter a valid destination')
try:
sms_content = config.get('SMS','CONTENT')
except:
raise KeyError('Missing SMS content')
try:
fromaddr = config.get('EMAIL','SENDER')
smtp_pwd = config.get('EMAIL','PASSWORD')
except:
raise KeyError('Missing EMAIL details (FROM and PASSWORD)')
try:
smtp_server = config.get('EMAIL','SMTP')
except:
smtp_server = 'smtp.office365.com'
try:
smtp_port = int(config.get('EMAIL','PORT'))
except:
smtp_port = 587
# email confirmation details
try:
subject = config.get('SUBJECT')
except:
subject = 'Email notification'
try:
in_success=config.get('SUCCESS')
except:
in_success = 'We have sent you an SMS, please check your phone!'
try:
in_error=config.get('ERROR')
except:
in_error = 'We could not reach you by SMS, please get in touch with us!'
# creates notifier object with NEXMO and MAIL details
notify = Notify(sender=sender,
api_key=api_key,
api_secret=api_secret,
fromaddr=fromaddr,
smtp_pwd=smtp_pwd,
smtp_server=smtp_server,
smtp_port=smtp_port)
# command line override sets only one client
if args.destination is None:
notify.parser(destination)
else:
notify.clients.append(Client('CMD', 'Line',
args.destination,
args.email))
# send out notifications to clients
if not args.skip_sms:
notify.push_sms(notify.build_simple_sms(sms_content))
notify.send_email_confirmation(subject, in_success, in_error)
# for reference
notify.print_response()
|
# process media objects returned by Twitter's APIs
|
#! python2
#coding:utf8
from abc import ABCMeta, abstractmethod
import time
import sys
import os
from Rule.Rules import *
from File.Files import *
from PktsParser import *
class IReplay:
__metaclass__ = ABCMeta
@abstractmethod
def initFilter(cls):
pass
@abstractmethod
def startReplay(cls):
pass
@abstractmethod
def stopReplay(cls):
pass
class Replay(IReplay):
dir = ''
__stop = 0
__onePkgTargetIp = ''
__onePkgTargetPort = ''
__onePkgTimeStamp = 0
__curTargetIp = ''
__curTargetPort = ''
__curOriSourceIp = ''
__curOriSourcePort = ''
__curStartTime = 0
__curEndTime = 0
__repFiles = []
__rule = None
__filecls = None
__pktsparser = None
@classmethod
def __init__(cls):
cls.initFilter()
cls.__filecls = PcapFile()
cls.__pktsparser = PktsParser()
@classmethod
def getCurTargetIp(cls):
return cls.__curTargetIp
@classmethod
def getCurTargetPort(cls):
return cls.__curTargetPort
@classmethod
def getCurOriSourceIp(cls):
return cls.__curOriSourceIp
@classmethod
def getCurOriSourcePort(cls):
return cls.__curOriSourcePort
@classmethod
def getCurStartTime(cls):
return cls.__curStartTime
@classmethod
def getCurEndTime(cls):
return cls.__curEndTime
@classmethod
def __convertUnixTimestamp(cls, tm):
listTime = tm.split("T")
ymd = listTime[0].split("-")
hms = listTime[1].split(":")
t = (int(ymd[0]), int(ymd[1]), int(ymd[2]), int(hms[0]), int(hms[1]), int(hms[2]), 0, 0, 0)
secs = time.mktime(t)
return int(secs)
@classmethod
def __isCapFileExist(cls):
ret = True
if False == os.path.exists(cls.dir):
print "target replay ip packages not exist!"
ret = False
cls.__repFiles = []
cls.__filecls.getDirFiles(cls.dir, cls.__repFiles)
if len(cls.__repFiles) <= 0:
print "target replay ip packages not exist!"
ret = False
return ret
@classmethod
def initFilter(cls):
cls.__rule = PcapRules.GetInstance()
cls.__curTargetIp = cls.__rule.getReplayTargetIp()
cls.__curTargetPort = cls.__rule.getReplayTargetPort()
if len(cls.__rule.getReplayStarttime()) > 0:
cls.__curStartTime = cls.__convertUnixTimestamp(cls.__rule.getReplayStarttime())
if len(cls.__rule.getReplayEndtime()) > 0:
cls.__curEndTime = cls.__convertUnixTimestamp(cls.__rule.getReplayEndtime())
cls.dir = '../file/'
cls.dir += cls.__curTargetIp + "/"
@classmethod
def __parseFilePath(cls, fpath):
cls.__onePkgTargetIp = ''
cls.__onePkgTargetPort = ''
arrs = fpath.split('/')
if len(arrs) > 3:
cls.__onePkgTargetIp = arrs[2]
if len(arrs) > 4:
cls.__onePkgTargetPort = arrs[3]
cls.__onePkgTimeStamp = arrs[len(arrs)-1].split('.')[0]
@classmethod
def __canReplay(cls):
if int(cls.__curStartTime) > 0 and int(cls.__curEndTime) > 0 and (int(cls.__onePkgTimeStamp) < int(cls.__curStartTime) or int(cls.__onePkgTimeStamp) > int(cls.__curEndTime)):
return False
if False == (cls.__onePkgTargetIp == cls.__curTargetIp and (cls.__onePkgTargetPort == cls.__curTargetPort or len(cls.__onePkgTargetPort) <= 0)):
return False
return True
@classmethod
def __replayOneCapFile(cls, fpath):
if cls.__canReplay() == True:
cls.__pktsparser.parseCapStream(fpath)
@classmethod
def __replayCapFiles(cls):
for idx, val in enumerate(cls.__repFiles):
if cls.__stop == 1:
return
cls.__parseFilePath(val)
cls.__replayOneCapFile(val)
@classmethod
def startReplay(cls):
if cls.__isCapFileExist() == False:
return
cls.__stop = 0
print "Begin Replay... : target ip " + cls.__curTargetIp + " port " + cls.__curTargetPort
cls.__replayCapFiles()
@classmethod
def stopReplay(cls):
cls.__stop = 1
print "Stop Replay! : target ip " + cls.__curTargetIp + " port " + cls.__curTargetPort
def UnitTest():
th = Replay()
if __name__=='__main__':
UnitTest()
|
import pathlib
import time
import watchdog.events
import watchdog.observers
from GlobusTransfer import GlobusTransfer
from .log import logger
class Handler(watchdog.events.PatternMatchingEventHandler):
def __init__(self, args):
# Set the patterns for PatternMatchingEventHandler
watchdog.events.PatternMatchingEventHandler.__init__(
self, patterns=["*"], ignore_directories=True, case_sensitive=False
)
self.file_list = FileList()
self.expired_lists = []
self.globus = GlobusTransfer(
args.source,
args.destination,
args.destination_dir,
args.path,
notify=args.notify,
)
self.path = args.path
self.iteration = 0 # How many round trips have we made
def on_created(self, event):
logger.info("Watchdog received created event - % s." % event.src_path)
self.file_list.files[event.src_path] = time.time()
# Event is created, you can process it now
def on_modified(self, event):
logger.debug("Watchdog received modified event - % s." % event.src_path)
# Event is modified, you can process it now
now = time.time()
self.file_list.files[event.src_path] = now
if event.src_path in self.file_list.files:
logger.debug(
f"Old Stamp: {self.file_list.files[event.src_path]} New Stamp {now}"
)
else:
logger.debug(f"Existing file modifed but not in list inserting {now}")
def new_iteration(self, dwell_time):
"""
Start a new pass of the following.
Create new FileList()
For each file in main filelist where age > dwell_time move to file list
If FileList() not empty
Push onto list and create globus transfer
"""
new_list = FileList()
self.iteration += 1
for path, last_seen in self.file_list.files.items():
logger.debug(f"Path: {path} last seen: {last_seen}")
# is now - last_seen > dwell_time add to transfer list
now = time.time()
delta = now - last_seen
if delta > dwell_time:
logger.debug(f"File {path} Dwell Expired")
# add path to new list
new_list.files[path] = last_seen
# Submit to globus
logger.debug(
f"Adding {path} to transfer for iteration {self.iteration}"
)
self.globus.add_item(path, label=f"{self.iteration}")
# delete from origonal list
# can't be done as part of above loop for dictionary changed size during iteration error
for path, last_seen in new_list.files.items():
del self.file_list.files[path]
if new_list.files:
taskid = self.globus.submit_pending_transfer()
logger.info(f"Submitted Globus TaskID: {taskid}")
new_list.taskid = taskid
logger.debug("New List is not empty adding to expired_lists")
self.expired_lists.append(new_list)
# TODO check for each expired_lists.taskid status
for filelist in self.expired_lists:
taskid = filelist.taskid
resp = self.globus.tc.get_task(taskid)
logger.debug(f"Status of {taskid} is {resp['status']}")
# Transfer complete
if resp["status"] == "SUCCEEDED":
for path in filelist.files:
logger.debug(f"Deleting {path}")
# TODO: make function and check if atime is younger than stored time
pathlib.Path(path).unlink()
# Delete entry from expired_lists
self.expired_lists.remove(filelist)
# not complete but not healthy
elif resp["nice_status"] != "Queued":
logger.error(
f"Globus TaskID {taskid} unhealthy {resp['nice_status']} : {resp['nice_status_short_description']}"
)
logger.error(resp)
def prepopulate(self):
"""Prepopulate the main filelist with existing files."""
# Walk the main path
logger.info(f"Prepopulated requested, walking {self.path}")
p = pathlib.Path(self.path).glob("**/*")
files = (x for x in p if x.is_file())
ts = time.time() # only call time once
for f in files:
logger.debug(f"Prepopulating {f} to FileList")
self.file_list.files[str(f)] = ts
def status(self, details=False):
"""Dump the status of the current handler."""
logger.info(f"Currently have {len(self.expired_lists)} expired lists in flight")
logger.info(
f"Currently watching {len(self.file_list.files)} files under their dwell-time"
)
if details:
for path, value in self.file_list.files.items():
logger.info(f"Watching File: {path} Last Seen: {value}")
for filelist in self.expired_lists:
logger.info(f"Dumping expired list TaskID: {filelist.taskid}")
for path in filelist.files:
logger.info(f"Expired Entry: {filelist.taskid} Path: {path}")
class FileList:
def __init__(self):
self.files = {}
self.taskid = None
|
# from tracemalloc import start
from flair.data import Sentence
from flair.models import SequenceTagger
from flair.models.sequence_tagger_model import MultiTagger
from flair.data import Token
from tqdm import tqdm
import base as be
import mspacy as msp
class Flair:
"""Base class for Flair, reads in the basic parameters."""
def __init__(self, config):
self.outname = config["advanced_options"]["output_dir"] + config["corpus_name"]
self.input = config["input"]
self.lang = config["language"]
config = config["flair_dict"]
self.job = config["job"]
# load in a tagger based on the job I guess
class flair_pipe(Flair):
"""Pipeline class for Flair, build pipeline from config and apply it to data.
Inherits basic parameters from base class."""
# initialize flair NER by loading the tagger specified by lang
def __init__(self, lang):
super().__init__(lang)
# first, check for language
if self.lang == "de":
# set model for spacy senter
self.model = "de_core_news_md"
# then check for type of input, is it a str or a list of strings
if type(self.job) == str:
# if it is a string we will only need the normal sequence tagger
if self.job == "ner":
self.tagger = SequenceTagger.load("de-ner")
elif self.job == "pos":
self.tagger = SequenceTagger.load("de-pos")
# if it is not a str it should be a list of strings
elif type(self.job) == list:
# we will need the MultiTagger -> can not just use the passed arguments here
# as the keywords for german standard models are different
# -> maybe allow specification of certain models by the user later on?
self.tagger = MultiTagger.load(["de-pos", "de-ner"])
# same stuff for english
elif self.lang == "en":
# set model for spacy senter
self.model = "en_core_web_md"
if type(self.job) == str:
if self.job == "ner":
self.tagger = SequenceTagger.load("ner")
elif self.job == "pos":
self.tagger = SequenceTagger.load("pos")
elif type(self.job) == list:
self.tagger = MultiTagger.load(self.job)
# as flair requires sentencized text here is wrapper to a function in
# mspacy.py which sentencizes a data-string for English or German text
def senter_spacy(self, data: str) -> Flair:
"""Function to sentencize English or German text using the mspacy module.
Args:
data[str]: Input data string containing the text to be sentencized."""
self.sents = msp.MySpacy.sentencize_spacy(self.model, data)
return self
def apply_to(self) -> Flair:
"""Apply chosen Tagger to data after turning sentences list of strings into list of
Sentence objects."""
# wrap the recheived sentence strings from the spacy senter into Sentence-Objects
self.sentences = [Sentence(sent[0]) for sent in self.sents]
# apply tagger to list if Sentences
self.tagger.predict(self.sentences)
return self
def get_out(self, ret=False, start=0) -> list or None:
"""Assemble output post-pipeline.
Args:
ret[bool]=False: Return output as list (True) or write to file (False).
start[int]=0: Start index for data. (Maybe not needed?)."""
out = out_object_flair(self.sentences, self.job, start=0).start_output().out
for sent in self.sentences:
out.append("<s>\n")
out = out_object_flair(sent, self.job, start=0).iterate_tokens(out)
out.append("</s>\n")
if ret:
return out
elif not ret:
be.OutObject.write_vrt(self.outname, out)
# be.encode_corpus.encode_vrt("test", self.outname, self.job, "flair")
def get_multiple(self, chunks: list, ret=False) -> list or None:
"""Iterate through a list of chunks generated by be.chunk_sample_text, tag the tokens
and create output for full corpus, either return as list or write to .vrt.
Args:
chunks[list[list[str]]]: List of chunks which are lists containing
[opening <>, text, closing <>].
ret[bool]=False: Wheter to return output as list (True) or write to file (False)."""
self.sentences = None
out = out_object_flair(self.sentences, self.job, start=0).start_output().out
# iterate through the chunks
for chunk in tqdm(
chunks,
total=len(chunks),
bar_format="{l_bar}{bar:20}{r_bar}{bar:-20b}",
unit="chunks",
position=0,
leave=True,
):
# get the "< >" opening statement
if chunk[0] != "":
out.append(chunk[0] + "\n")
tmp_ = self.senter_spacy(chunk[1]).apply_to()
for sent in tmp_.sentences:
out = out_object_flair(sent, tmp_.job, start=0).iterate_tokens(out)
# append the "< >" closing statement
if chunk[2] != "":
out.append(chunk[2] + "\n")
# either return or write to file
if ret:
return out
elif not ret:
flat_out = []
for chunk in out:
flat_out.append(chunk)
be.OutObject.write_vrt(self.outname, out)
# be.encode_corpus.encode_vrt("test_chunks", self.outname, self.job, "flair")
# I guess we will need these more or less for every module separately as the
# internal structure of the returned objects varies...
# def __call__(self, tokens):
# sentences = [Sentence(tokens)]
# self.tagger.predict(sentences)
# self.named_entities = defaultdict(list)
# for sentence in sentences:
# for entity in sentence.get_spans():
# self.named_entities[
# "Start {} End {}".format(
# entity.start_pos, str(entity.end_pos).split()[0]
# )
# ].append([entity.text, entity.labels[0]])
# return self.named_entities
class out_object_flair(be.OutObject):
"""Postprocessing class for Flair. Inherits base OutObject."""
def __init__(self, doc, jobs, start) -> None:
super().__init__(doc, jobs, start)
self.job = jobs
self.sentences = doc
self.out = []
def start_output(self: Flair):
"""Initialize the output list."""
self.out.append("!")
if type(self.job) == str:
self.out[0] += " " + self.job
elif type(self.job) == list:
for job in self.job:
self.out[0] += " " + job
self.out[0] += "\n"
return self
def iterate_tokens(self, out: list) -> list:
"""Iterate through a sentence."""
for token in self.sentences:
self.assemble_output(token, out)
return out
def assemble_output(self, token: Token, out: list) -> list:
"""Build output line from a token.
Args:
token[Token]: Annotated token.
out[list]: Assembled output."""
out.append("{}".format(token.text))
for job in self.job:
label = token.get_label(job).value
if label != "O":
out[-1] += " " + label
elif label == "O":
out[-1] += " - "
out[-1] += "\n"
return out
|
# MIT License
#
# Copyright (C) IBM Corporation 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Base classes for differential privacy mechanisms.
"""
import abc
from copy import copy, deepcopy
from numbers import Real
class DPMachine(abc.ABC):
"""
Parent class for :class:`.DPMechanism` and :class:`.DPTransformer`, providing and specifying basic functionality.
"""
@abc.abstractmethod
def randomise(self, value):
"""Randomise `value` with the mechanism.
Parameters
----------
value : int or float or str or method
The value to be randomised.
Returns
-------
int or float or str or method
The randomised value, same type as `value`.
"""
def copy(self):
"""Produces a copy of the class.
Returns
-------
self : class
Returns the copy.
"""
return copy(self)
def deepcopy(self):
"""Produces a deep copy of the class.
Returns
-------
self : class
Returns the deep copy.
"""
return deepcopy(self)
def set_epsilon(self, epsilon):
r"""Sets the value of epsilon to be used by the mechanism.
Parameters
----------
epsilon : float
The value of epsilon for achieving :math:`\epsilon`-differential privacy with the mechanism. Must have
`epsilon > 0`.
Returns
-------
self : class
"""
return self.set_epsilon_delta(epsilon, 0.0)
@abc.abstractmethod
def set_epsilon_delta(self, epsilon, delta):
r"""Sets the value of epsilon and delta to be used by the mechanism.
`epsilon` and `delta` cannot both be zero.
Parameters
----------
epsilon : float
The value of epsilon for achieving :math:`(\epsilon,\delta)`-differential privacy with the mechanism. Must
have `epsilon >= 0`.
delta : float
The value of delta for achieving :math:`(\epsilon,\delta)`-differential privacy with the mechanism.
Must have `0 <= delta <= 1`.
`delta=0` gives strict (pure) differential privacy (:math:`\epsilon`-differential privacy). `delta > 0`
gives relaxed (approximate) differential privacy.
Returns
-------
self : class
"""
class DPMechanism(DPMachine, abc.ABC):
r"""
Base class for all mechanisms. Instantiated from :class:`.DPMachine`.
Notes
-----
* Each `DPMechanism` must define a `randomise` method, to handle the application of differential privacy
* Mechanisms that only operate in a limited window of :math:`\epsilon` or :math:`\delta` must define a
`set_epsilon_delta` method. Error-checking, for example for non-zero :math:`\delta` should be done in
`set_epsilon_delta`; `set_epsilon` should be left unchanged.
* When new methods are added, `__repr__` should be updated accordingly in the mechanism.
* Each mechanism's
"""
def __init__(self):
self._epsilon = None
self._delta = None
def __repr__(self):
output = str(self.__module__) + "." + str(self.__class__.__name__) + "()"
if self._epsilon is not None and self._delta is not None and self._delta > 0.0:
output += ".set_epsilon_delta(" + str(self._epsilon) + "," + str(self._delta) + ")"
elif self._epsilon is not None:
output += ".set_epsilon(" + str(self._epsilon) + ")"
return output
@abc.abstractmethod
def randomise(self, value):
"""Randomise `value` with the mechanism.
Parameters
----------
value : int or float or str or method
The value to be randomised.
Returns
-------
int or float or str or method
The randomised value, same type as `value`.
"""
def get_bias(self, value):
"""Returns the bias of the mechanism at a given `value`.
Parameters
----------
value : int or float
The value at which the bias of the mechanism is sought.
Returns
-------
bias : float or None
The bias of the mechanism at `value` if defined, `None` otherwise.
"""
raise NotImplementedError
def get_variance(self, value):
"""Returns the variance of the mechanism at a given `value`.
Parameters
----------
value : int or float
The value at which the variance of the mechanism is sought.
Returns
-------
bias : float or None
The variance of the mechanism at `value` if defined, `None` otherwise.
"""
raise NotImplementedError
def get_mse(self, value):
"""Returns the mean squared error (MSE) of the mechanism at a given `value`.
Parameters
----------
value : int or float
The value at which the MSE of the mechanism is sought.
Returns
-------
bias : float or None
The MSE of the mechanism at `value` if defined, `None` otherwise.
"""
return self.get_variance(value) + (self.get_bias(value)) ** 2
def set_epsilon_delta(self, epsilon, delta):
r"""Sets the value of epsilon and delta to be used by the mechanism.
`epsilon` and `delta` cannot both be zero.
Parameters
----------
epsilon : float
The value of epsilon for achieving :math:`(\epsilon,\delta)`-differential privacy with the mechanism. Must
have `epsilon >= 0`.
delta : float
The value of delta for achieving :math:`(\epsilon,\delta)`-differential privacy with the mechanism.
Must have `0 <= delta <= 1`.
`delta=0` gives strict (pure) differential privacy (:math:`\epsilon`-differential privacy). `delta > 0`
gives relaxed (approximate) differential privacy.
Returns
-------
self : class
Raises
------
ValueError
If `epsilon` is negative, or if `delta` falls outside [0,1], or if `epsilon` and `delta` are both zero.
"""
if not isinstance(epsilon, Real) or not isinstance(delta, Real):
raise TypeError("Epsilon and delta must be numeric")
if epsilon < 0:
raise ValueError("Epsilon must be non-negative")
if not 0 <= delta <= 1:
raise ValueError("Delta must be in [0, 1]")
if epsilon + delta == 0:
raise ValueError("Epsilon and Delta cannot both be zero")
self._epsilon = float(epsilon)
self._delta = float(delta)
return self
def check_inputs(self, value):
"""Checks that all parameters of the mechanism have been initialised correctly, and that the mechanism is ready
to be used.
Parameters
----------
value : int or float or str or method
The value to be checked.
Returns
-------
True if the mechanism is ready to be used.
Raises
------
Exception
If parameters have not been set correctly, or if `value` falls outside the domain of the mechanism.
"""
del value
if self._epsilon is None:
raise ValueError("Epsilon must be set")
return True
class TruncationAndFoldingMixin:
"""
Mixin for truncating or folding the outputs of a mechanism. Must be instantiated with a :class:`.DPMechanism`.
"""
def __init__(self):
if not isinstance(self, DPMechanism):
raise TypeError("TruncationAndFoldingMachine must be implemented alongside a :class:`.DPMechanism`")
self._lower_bound = None
self._upper_bound = None
def __repr__(self):
output = ".set_bounds(" + str(self._lower_bound) + ", " + str(self._upper_bound) + ")" \
if self._lower_bound is not None else ""
return output
def set_bounds(self, lower, upper):
"""Sets the lower and upper bounds of the mechanism.
Must have lower <= upper.
Parameters
----------
lower : float
The lower bound of the mechanism.
upper : float
The upper bound of the mechanism.
Returns
-------
self : class
"""
if not isinstance(lower, Real) or not isinstance(upper, Real):
raise TypeError("Bounds must be numeric")
if lower > upper:
raise ValueError("Lower bound must not be greater than upper bound")
self._lower_bound = float(lower)
self._upper_bound = float(upper)
return self
def check_inputs(self, value):
"""Checks that all parameters of the mechanism have been initialised correctly, and that the mechanism is ready
to be used.
Parameters
----------
value : float
Returns
-------
True if the mechanism is ready to be used.
"""
del value
if (self._lower_bound is None) or (self._upper_bound is None):
raise ValueError("Upper and lower bounds must be set")
return True
def _truncate(self, value):
if value > self._upper_bound:
return self._upper_bound
if value < self._lower_bound:
return self._lower_bound
return value
def _fold(self, value):
if value < self._lower_bound:
return self._fold(2 * self._lower_bound - value)
if value > self._upper_bound:
return self._fold(2 * self._upper_bound - value)
return value
|
import re
from typing import Any, Mapping, Sequence
import pytest
from snuba_sdk.aliased_expression import AliasedExpression
from snuba_sdk.column import Column
from snuba_sdk.conditions import Condition, InvalidConditionError, Op
from snuba_sdk.entity import Entity
from snuba_sdk.expressions import InvalidExpressionError
from snuba_sdk.function import Function
from snuba_sdk.query import Query
from snuba_sdk.query_visitors import InvalidQueryError
def test_invalid_query() -> None:
with pytest.raises(
InvalidQueryError, match=re.escape("queries must have a valid Entity")
):
Query(match="events") # type: ignore
with pytest.raises(
InvalidConditionError,
match=re.escape(
"invalid condition: LHS of a condition must be a Column, CurriedFunction or Function, not <class 'snuba_sdk.aliased_expression.AliasedExpression'>"
),
):
(
Query(Entity("events"))
.set_select([AliasedExpression(Column("transaction"), "tn")])
.set_where(
[Condition(AliasedExpression(Column("project_id"), "pi"), Op.IN, (1,))] # type: ignore
)
)
def test_invalid_query_set() -> None:
query = Query(Entity("events"))
tests: Mapping[str, Sequence[Any]] = {
"match": (0, "0 must be a valid Entity"),
"select": (
(0, [], [0]),
"select clause must be a non-empty list of SelectableExpression",
),
"groupby": ([0, [0]], "groupby clause must be a list of SelectableExpression"),
"where": ([0, [0]], "where clause must be a list of conditions"),
"having": ([0, [0]], "having clause must be a list of conditions"),
"orderby": ([0, [0]], "orderby clause must be a list of OrderBy"),
"limitby": ("a", "limitby clause must be a LimitBy"),
"limit": (100000, "limit '100000' is capped at 10,000"),
"offset": ("", "offset '' must be an integer"),
"granularity": (-1, "granularity '-1' must be at least 1"),
}
match, err = tests["match"]
with pytest.raises(InvalidQueryError, match=re.escape(err)):
query.set_match(match)
for val in tests["select"][0]:
with pytest.raises(InvalidQueryError, match=re.escape(tests["select"][1])):
query.set_select(val)
for val in tests["groupby"][0]:
with pytest.raises(InvalidQueryError, match=re.escape(tests["groupby"][1])):
query.set_groupby(val)
for val in tests["where"][0]:
with pytest.raises(InvalidQueryError, match=re.escape(tests["where"][1])):
query.set_where(val)
for val in tests["having"][0]:
with pytest.raises(InvalidQueryError, match=re.escape(tests["having"][1])):
query.set_having(val)
for val in tests["orderby"][0]:
with pytest.raises(InvalidQueryError, match=re.escape(tests["orderby"][1])):
query.set_orderby(val)
with pytest.raises(InvalidQueryError, match=re.escape(tests["limitby"][1])):
query.set_limitby(tests["limitby"][0])
with pytest.raises(InvalidExpressionError, match=re.escape(tests["limit"][1])):
query.set_limit(tests["limit"][0])
with pytest.raises(InvalidExpressionError, match=re.escape(tests["offset"][1])):
query.set_offset(tests["offset"][0])
with pytest.raises(
InvalidExpressionError, match=re.escape(tests["granularity"][1])
):
query.set_granularity(tests["granularity"][0])
def test_invalid_subquery() -> None:
with pytest.raises(
InvalidQueryError,
match=re.escape(
"inner query is invalid: query must have at least one expression in select"
),
):
Query(Query(match=Entity("events"))).set_select(
[Column("event_id"), Column("title")]
)
with pytest.raises(
InvalidQueryError,
match=re.escape(
"inner query is invalid: query must have at least one expression in select"
),
):
Query(
Query(
match=Entity("events"),
select=[Column("title"), Column("timestamp")],
),
).set_match(Query(match=Entity("events"))).set_select(
[Function("uniq", [Column("new_event")], "uniq_event"), Column("title")]
)
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from datamanage.lite.dmonitor.flow_views import (
DmonitorFlowViewSet,
DmonitorDatasetViewSet,
DmonitorDataOperationsViewSet,
)
from datamanage.lite.dmonitor.alert_views import (
AlertViewSet,
AlertDetailViewSet,
AlertShieldViewSet,
AlertNotifyWayViewSet,
AlertTargetViewSet,
)
from datamanage.lite.dmonitor.monitor_views import AlertConfigViewSet
from datamanage.lite.dmonitor.metric_views import DmonitorMetricsViewSet
from datamanage.lite.dmonitor.task_views import BatchExecutionsViewSet, BatchScheduleViewSet
from datamanage.lite.dmonitor.views import (
DmonitorResultTableViewSet,
)
router = DefaultRouter(trailing_slash=True)
router.register(r'metrics', DmonitorMetricsViewSet, basename='dmonitor_metrics')
router.register(r'result_tables', DmonitorResultTableViewSet, basename='dmonitor_result_tables')
router.register(r'batch_executions', BatchExecutionsViewSet, basename='dmonitor_batch_executions')
router.register(r'batch_schedules', BatchScheduleViewSet, basename='dmonitor_batch_schedules')
router.register(r'alert_configs', AlertConfigViewSet, basename='alert_configs')
router.register(r'alerts', AlertViewSet, basename='alerts')
router.register(r'alert_details', AlertDetailViewSet, basename='alert_details')
router.register(r'alert_targets', AlertTargetViewSet, basename='alert_targets')
router.register(r'alert_shields', AlertShieldViewSet, basename='alert_shields')
router.register(r'flows', DmonitorFlowViewSet, basename='dmonitor_flow')
router.register(r'data_sets', DmonitorDatasetViewSet, basename='dmonitor_data_set')
router.register(r'data_operations', DmonitorDataOperationsViewSet, basename='dmonitor_data_operation')
router.register(r'notify_ways', AlertNotifyWayViewSet, basename='dmonitor_notify_ways')
urlpatterns = [
url(r'', include(router.urls)),
]
|
"""empty message
Revision ID: 1c3f88dbccc3
Revises: 2e7b377cbc7b
Create Date: 2020-07-30 18:51:01.816284
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '1c3f88dbccc3'
down_revision = 'ab06a94e5d4c'
branch_labels = None
depends_on = None
def upgrade():
op.execute("CREATE TYPE irbstatus AS ENUM('incomplete_in_protocol_builder', 'completed_in_protocol_builder', 'hsr_assigned')")
op.execute("CREATE TYPE studystatus AS ENUM('in_progress', 'hold', 'open_for_enrollment', 'abandoned')")
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('study', sa.Column('irb_status', sa.Enum('incomplete_in_protocol_builder', 'completed_in_protocol_builder', 'hsr_assigned', name='irbstatus'), nullable=True))
op.add_column('study', sa.Column('status', sa.Enum('in_progress', 'hold', 'open_for_enrollment', 'abandoned', name='studystatus'), nullable=True))
op.drop_column('study', 'protocol_builder_status')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('study', sa.Column('protocol_builder_status', postgresql.ENUM('incomplete', 'active', 'hold', 'open', 'abandoned', name='protocolbuilderstatus'), autoincrement=False, nullable=True))
op.drop_column('study', 'status')
op.drop_column('study', 'irb_status')
# ### end Alembic commands ###
op.execute('DROP TYPE studystatus')
op.execute('DROP TYPE irbstatus')
|
import datetime
from dataclasses import dataclass
from typing import Optional
from aspen.database.models import PublicRepositoryType
@dataclass
class AccessionWorkflowDirective:
"""This is a way to encapsulate the accession workflows hanging off of an entity."""
repository_type: PublicRepositoryType
start_datetime: datetime.datetime
end_datetime: Optional[datetime.datetime]
"""Set to None if this workflow failed, otherwise this is the time the workflow
succeeded. If the workflow failed, the remaining fields in the dataclass are
ignored."""
public_identifier: Optional[str]
|
# Given string S and a dictionary of words words, find the number of words[i] that is a subsequence of S.
#
# Example :
# Input:
# S = "abcde"
# words = ["a", "bb", "acd", "ace"]
# Output: 3
# Explanation: There are three words in words that are a subsequence of S: "a", "acd", "ace".
# Note:
#
# All words in words and S will only consists of lowercase letters.
# The length of S will be in the range of [1, 50000].
# The length of words will be in the range of [1, 5000].
# The length of words[i] will be in the range of [1, 50].
import collections
class Solution(object):
def numMatchingSubseq(self, S, words):
"""
:type S: str
:type words: List[str]
:rtype: int
"""
dic = collections.defaultdict(lambda: [])
res = 0
for word in words:
dic[word[0]].append(word)
for s in S:
if len(dic[s]) > 0:
for word in list(dic[s]):
if len(word) == 1:
res+=1
else:
dic[word[1]].append(word[1:])
dic[s].remove(word)
return res
s=Solution()
print(s.numMatchingSubseq("abcde",["a", "bb", "acd", "ace"]))
|
import requests
from bs4 import BeautifulSoup
website_url = "https://www.azlyrics.com"
artist = "Bon Iver"
artist = ''.join(artist.lower().split(' '))
artist_url = "%s/%s/%s.html" % (website_url, artist[0], artist)
artist_html = requests.get(artist_url)
artist_soup = BeautifulSoup(artist_html.content, "html.parser")
#Extract all links to song lyrics
song_links = []
for link in artist_soup.find_all("a", href=True):
if artist in link["href"]:
song_links.append("%s/%s" % (website_url, str(link["href"]).replace("../", "")))
#Extract lyrics from each link
for song_url in song_links:
song_html = requests.get(song_url)
song_soup = BeautifulSoup(song_html.content, "html.parser")
|
from django.shortcuts import render, redirect
from apps.inicio.models import DatosPersonales, OrdenRiego, Noticia, Parcela, AuthUser, Reparto, AuthUser, Caudal, Destajo
from apps.inicio.models import *
from apps.inicio.forms import PersonaForm
from apps.usuario.forms import OrdenRForm, AuthUserForm
from django.http import HttpResponse
from django.urls import reverse_lazy
from django.views.generic import ListView, CreateView,UpdateView,DeleteView, TemplateView, View
import datetime
import time
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
def Pruebas(request):
print('--------------------------->> ',time.strftime("%A"),'/',time.strftime("%B"))
return render(request,'usuario.html')
from django.db import connection
def usuario(request):
dicc={}
#===============noticia===========
lst_not=Noticia.objects.filter(estado='Publicada').order_by('-pk')
dicc['pk_max']=0
for x in lst_not:
if x.pk > dicc['pk_max']:
dicc['pk_max']= x.pk
dicc['lst_noticias']=lst_not
# ===========caudal===================
cau=Caudal.objects.all().order_by("fecha")
p=0
for x in cau:
p+=1
cant_cau=cau.count()
dicc['fecha']=datetime.datetime.now()
dicc['caudales']=cau
dicc['cant_cau']=cant_cau
# ===========rep_reparto==============
cursor = connection.cursor()
cursor.execute("CALL sp_cant_por_reparto")
result = []
detalles = cursor.fetchall()
for row in detalles:
dic = dict(zip([col[0] for col in cursor.description], row))
result.append(dic)
cursor.close()
dicc['repartos']=result
return render(request,'usuario.html',dicc)
class PerfilEditar(UpdateView):
model=AuthUser
form_class=AuthUserForm
template_name='usuario/perfil_editar.html'
success_url=reverse_lazy('usuario')
class ApiTraerParc(View):
def get(self, request, *args, **kwargs):
userpk = self.request.GET.get('userpk')
rpta ="Err"
if userpk != "":
if Parcela.objects.filter(id_auth_user=userpk).exists():
pr=Parcela.objects.filter(id_auth_user=userpk)
rpta='{'
cont=0
for p in pr:
if cont == 0:
rpta+='"'+str(cont)+'":"'+p.nombre+'"'
else:
rpta+=',"'+str(cont)+'":"'+p.nombre+'"'
cont+=1
rpta+='}'
return HttpResponse(rpta)
class ApiTraerOrd(View):
def get(self, request, *args, **kwargs):
userpk = self.request.GET.get('userpk')
rpta ="Err"
if userpk != "":
if OrdenRiego.objects.filter(id_parcela__id_auth_user=userpk).exists():
pr=OrdenRiego.objects.filter(id_parcela__id_auth_user=userpk).order_by('-fecha_inicio')
rpta='{'
cont=0
fechaa = "sin fecha"
for p in pr:
if p.fecha_inicio != None:
fechaa = " "+str(p.fecha_inicio.day)+'/'+str(p.fecha_inicio.month)+'/'+str(p.fecha_inicio.year)+' - H: '
fechaa += str(p.fecha_inicio.hour)+':'+str(p.fecha_inicio.minute)
if cont == 0:
rpta+='"'+str(cont)+('":" F: '+fechaa+'_ Est: '+ p.estado+'"')
else:
rpta+=',"'+str(cont)+('":" F: '+fechaa+'_ Est: '+ p.estado+'"')
cont+=1
rpta+='}'
return HttpResponse(rpta)
class ApiAsamb(View):
def get(self, request, *args, **kwargs):
userpk = self.request.GET.get('userpk')
rpta ="Err"
if Asamblea.objects.filter(estado=1).exists():
lsta=Asamblea.objects.filter(estado=1)
rpta='{'
cant=0
for a in lsta:
if cant == 0:
rpta+='"0":" tipo: '+a.tipo+', el '+str(a.fecha_asamblea.day)+'/'+str(a.fecha_asamblea.month)+'/'+str(a.fecha_asamblea.year)+'_'+str(a.fecha_asamblea.hour)+':'+str(a.fecha_asamblea.minute)+'"'
else:
rpta+=',"'+cant+'":" tipo: '+a.tipo+', el '+str(a.fecha_asamblea.day)+'/'+str(a.fecha_asamblea.month)+'/'+str(a.fecha_asamblea.year)+'_'+str(a.fecha_asamblea.hour)+':'+str(a.fecha_asamblea.minute)+'"'
cant+=1
rpta+='}'
return HttpResponse(rpta)
class ApiTraerMul(View):
def get(self, request, *args, **kwargs):
userpk = self.request.GET.get('userpk')
rpta ='{"cant":'
cant=0
if MultaOrden.objects.filter(id_orden__id_parcela__id_auth_user=userpk).exists():
lmo=MultaOrden.objects.filter(id_orden__id_parcela__id_auth_user=userpk)
cant+=lmo.count()
if MultaAsistencia.objects.filter(id_hoja_asistencia__id_auth_user=userpk).exists():
lma=MultaAsistencia.objects.filter(id_hoja_asistencia__id_auth_user=userpk)
cant+=lma.count()
if MultaLimpia.objects.filter(id_det_limpia__id_destajo__id_parcela__id_auth_user=userpk).exists():
lmd=MultaLimpia.objects.filter(id_det_limpia__id_destajo__id_parcela__id_auth_user=userpk)
cant+=lmd.count()
rpta+=str(cant)+'}'
return HttpResponse(rpta)
class ApiTraerPerf(View):
def get(self, request, *args, **kwargs):
userpk = self.request.GET.get('userpk')
rpta ="Err"
if userpk != "":
if AuthUser.objects.get(pk=userpk).foto:
#print(" >> Si tiene foto: ["+str(AuthUser.objects.get(pk=userpk).foto)+"]")
rpta=str(AuthUser.objects.get(pk=userpk).foto)
else:
rpta="Inv"
#print(" >> No tiene foto")
return HttpResponse(rpta)
class ApiContra(View):
def get(self, request, *args, **kwargs):
userpk = self.request.GET.get('userpk')
ncon = self.request.GET.get('ncon')
rpta="Err"
from django.contrib.auth.models import User
if AuthUser.objects.filter(pk=userpk).exists():
u = User.objects.get(pk=userpk)
u.set_password(ncon)
u.save()
rpta="Ok"
return HttpResponse(rpta)
class ApiEdiSx(View):
def get(self, request, *args, **kwargs):
userpk = self.request.GET.get('userpk')
sx = self.request.GET.get('sx')
rpta="Err"
if AuthUser.objects.filter(pk=userpk).exists():
AuthUser.objects.filter(pk=userpk).update(sexo=sx)
rpta="Ok"
return HttpResponse(rpta)
class ApiEdiFn(View):
def get(self, request, *args, **kwargs):
userpk = self.request.GET.get('userpk')
fn = self.request.GET.get('fn')
rpta="Err"
if AuthUser.objects.filter(pk=userpk).exists():
AuthUser.objects.filter(pk=userpk).update(fecha_nacimiento=fn)
rpta="Ok"
return HttpResponse(rpta)
class FiltrarParcelas(ListView):
model = Parcela
template_name = 'parcela/u_misparcelas_lis.html'
def get_queryset(self):
queryset = super(FiltrarParcelas, self).get_queryset()
idauth = self.request.GET.get('id_auth')
queryset = Parcela.objects.filter(id_auth_user=idauth)
return queryset
class MisOrdenes(ListView):
model = Parcela
template_name = 'orden/u_mis_ordenes_lis.html'
def get_queryset(self):
queryset = super(MisOrdenes, self).get_queryset()
idauth = self.request.GET.get('id_auth')
queryset = OrdenRiego.objects.filter(id_parcela__id_auth_user=idauth).order_by('-pk')
return queryset
class VerRepartos(View):
def get(self, request, *args, **kwargs):
from django.db import connection
cursor = connection.cursor()
cursor.execute("CALL sp_rep_disponibles")
result = []
detalles = cursor.fetchall()
for row in detalles:
dic = dict(zip([col[0] for col in cursor.description], row))
result.append(dic)
cursor.close()
diccionario={}
diccionario['object_list']=result
for d in result:
print(' - - - >',d)
return render(request,'reparto/u_reparto_lis.html',diccionario)
class EliOrden(UpdateView):
model=OrdenRiego
form_class=OrdenRForm
template_name='orden/u_orden_eli.html'
success_url=reverse_lazy('usuario')
class SolOrdenList(TemplateView):
def get(self, request, *args, **kwargs):
idauth = self.request.GET.get('id_auth')
idrepa = self.request.GET.get('id_repa')
parcelas=Parcela.objects.filter(id_auth_user=idauth)
reparto =Reparto.objects.get(id_reparto=idrepa)
return render(request,'orden/u_orden_sol.html',{'parcelas':parcelas,'reparto':reparto})
def post(self, request, *args, **kwargs):
id_repa = self.request.POST.get('id_repa')
id_par = self.request.POST.get('id_parcela')
cantidad = self.request.POST.get('cantidad')
id_au = self.request.POST.get('id_auth')
importes = (float(cantidad)*2.5)
us=AuthUser.objects.get(id=id_au)
parcelas=Parcela.objects.filter(id_auth_user=us)
reparto =Reparto.objects.get(id_reparto=id_repa)
validador = {}
validador['hecho']=False
validador['parcelas'] = parcelas
validador['reparto'] = reparto
if float(cantidad) <= 0:
validador['mensaje'] = 'Ingrese horas correctas!'
return render(request,'orden/u_orden_sol.html',validador)
else:
id_r=Reparto.objects.get(id_reparto=int(id_repa))
id_p=Parcela.objects.get(id_parcela=int(id_par))
ver_ord=OrdenRiego.objects.filter(id_reparto=id_r,id_parcela=id_p)
if ver_ord:
validador['hecho'] = True
validador['mensaje'] = 'Usted ya ha sacado una orden para esa parcela en este reparto!'
else:
t=datetime.datetime.now()
ori=OrdenRiego(id_reparto=id_r, id_parcela=id_p,duracion=float(cantidad),
cantidad_has=float(cantidad),unidad='h',estado='Solicitada',
fecha_establecida=t,importe=importes)
ori.save()
validador['hecho'] = True
validador['mensaje'] = 'Orden registrada con éxito'
return render(request,'orden/u_orden_sol.html',validador)
class NoticiaList(ListView):
model=Noticia
template_name='noticia/u_noticia_lis.html'
def get(self, request, *args, **kwargs):
dicc = {}
dicc['object_list']=Noticia.objects.filter(estado="Publicada").order_by('-pk')
return render(request, self.template_name,dicc)
class OrdenDelete(DeleteView):
model=OrdenRiego
form_class=OrdenRForm
template_name='orden/u_orden_eli.html'
success_url=reverse_lazy('u_orden_lis')
class OrdenList(ListView):
model=OrdenRiego
template_name='orden/u_orden_lis.html'
paginate_by=10
class UsuarioDelete(DeleteView):
model=DatosPersonales
form_class=PersonaForm
template_name='usuario/borrar_usuario.html'
success_url=reverse_lazy('listar')
class UsuarioUpdate(UpdateView):
model=DatosPersonales
form_class=PersonaForm
template_name='usuario/crear_usuario.html'
success_url=reverse_lazy('listar')
class UsuarioCreate(CreateView):
model=DatosPersonales
form_class=PersonaForm
template_name='usuario/crear_usuario.html'
success_url=reverse_lazy('listar')
class UsuarioList(ListView):
model=DatosPersonales
template_name='usuario/lista_usuarios.html'
paginate_by=10
class AsambLst(View):
def get(self,request,*args,**kwargs):
dicc={}
if Asamblea.objects.filter(estado=2).exists():
dicc['object_list']=Asamblea.objects.filter(estado=2).order_by('-pk')
return render(request,"asamblea/lst_asamblea.html",dicc)
class AsambDet(View):
def get(self,request,*args,**kwargs):
apk=self.request.GET.get("apk")
dicc={}
if Asamblea.objects.filter(pk=apk).exists():
dicc['object_list']=Asamblea.objects.get(pk=apk)
if Asamblea.objects.filter(estado=2).exists():
dicc['agenda']=AgendaAsamblea.objects.filter(id_asamblea=apk).order_by('-pk')
return render(request,"asamblea/asamblea_det.html",dicc)
## ======================= LstDestajos ============================
class LstDestajos(View):
def get(self,request,*args,**kwargs):
userpk=self.request.GET.get("userpk")
dicc={}
if Destajo.objects.filter(id_parcela__id_auth_user=userpk).exists():
dicc['lst_dstjs']=Destajo.objects.filter(id_parcela__id_auth_user=userpk).order_by('id_canal','num_orden')
return render(request,"destajo/lst_dstjs.html",dicc)
class LstMultas(View):
def get(self, request, *args, **kwargs):
userpk = self.request.GET.get('userpk')
dicc={}
if MultaOrden.objects.filter(id_orden__id_parcela__id_auth_user=userpk).exists():
dicc['lst_ord']=MultaOrden.objects.filter(id_orden__id_parcela__id_auth_user=userpk)
if MultaAsistencia.objects.filter(id_hoja_asistencia__id_auth_user=userpk).exists():
dicc['lst_asi']=MultaAsistencia.objects.filter(id_hoja_asistencia__id_auth_user=userpk)
if MultaLimpia.objects.filter(id_det_limpia__id_destajo__id_parcela__id_auth_user=userpk).exists():
dicc['lst_des']=MultaLimpia.objects.filter(id_det_limpia__id_destajo__id_parcela__id_auth_user=userpk)
return render(request,"multa/mul_lst.html",dicc)
class ApiOrd(View):
def get(self, request, *args, **kwargs):
ordpk=self.request.GET.get("ordpk")
estado=self.request.GET.get("estado")
#print(" >> ordpk: "+str(ordpk)+" >> std: "+estado)
OrdenRiego.objects.filter(pk=ordpk).update(estado=estado)
return HttpResponse("Ok")
class ApiQr(View):
def get(self, request, *args, **kwargs):
userpk=self.request.GET.get("userpk")
rpta = "Ok"
if userpk == '1':
if OrdenRiego.objects.filter(estado='Iniciada').exists():
enr=OrdenRiego.objects.filter(estado='Iniciada')
rpta='{'
cont=0
for x in enr:
if cont==0:
rpta+='"0":" En el '+x.id_parcela.id_canal.nombre+' está regando '+x.id_parcela.id_auth_user.first_name+' '+x.id_parcela.id_auth_user.last_name+' en la toma '+ str(x.id_parcela.num_toma)+'"'
else:
rpta+=',"'+str(cont)+'":" En el '+x.id_parcela.id_canal.nombre+' está regando '+x.id_parcela.id_auth_user.first_name+' '+x.id_parcela.id_auth_user.last_name+' en la toma '+ str(x.id_parcela.num_toma)+'"'
cont+=1
rpta+='}'
else:
rpta="Err"
return HttpResponse(rpta)
"""
class ApiGraf1(View):
def get(self, request, *args, **kwargs):
userpk=self.request.GET.get("userpk")
# ===========caudal===================
json = "{"
cau=Caudal.objects.all().order_by("fecha")
json+='"0":"'+str(cau.count())+'","1":"'+str(datetime.datetime.now())+'"'
cont=2
for x in cau:
json+=',"'+str(cont)+'":"'+str(x.fecha.day)+'/'+str(x.fecha.month)+'/'+str(x.fecha.year)+'"'
cont+=1
cont = 50
for x in cau:
json+=',"'+str(cont)+'":"'+str(x.nivel)+'"'
cont+=1
json += '}'
return HttpResponse(json)
"""
"""
# ===========caudal===================
cau=Caudal.objects.all().order_by("fecha")
p=0
for x in cau:
p+=1
cant_cau=cau.count()
dicc['fecha']=datetime.datetime.now()
dicc['caudales']=cau
dicc['cant_cau']=cant_cau
rpta = json.dumps(cau)
#print(" json: "+json.dumps(cau))
"""
|
from PyObjCTools.TestSupport import *
from PyObjCTest.protected import *
class TestProtected (TestCase):
def testProtectedNotInDir(self):
d = dir(PyObjCTest_Protected)
self.assertIn('publicMethod', d)
self.assertNotIn('_protectedMethod', d)
def testProtectedCallable(self):
o = PyObjCTest_Protected.new()
self.assertEqual(None, o._protectedMethod())
self.assertEqual(None, o.publicMethod())
if __name__ == "__main__":
PyObjCTest.main()
|
# -*- coding: utf-8 -*-
import netCDF4
import mds.constants
# import mds.date_time
import mds.ordered_set
import mds.math
import mds.netcdf.convention
class Dataset(object):
"""
Constructs an instance based on a *name* and an optional default
*favor_convention_class*
filter_out_nd_coordinates
See :py:func:`mds.netcdf.convention.select_convention`.
favor_convention_class
See :py:func:`mds.netcdf.convention.select_convention`.
This class is implemented using `netCDF4.Dataset <http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4.Dataset-class.html>`_ from the
`netcdf4-python package <https://code.google.com/p/netcdf4-python/>`_.
If you need functionality that isn't available in this class' interface,
then you can use the layered *dataset* instance::
dimensions = dataset.dataset.dimensions
"""
def __init__(self,
name,
filter_out_nd_coordinates,
favor_convention_class=None):
self.name = name
self.dataset = netCDF4.Dataset(filename=self.name, mode="r")
self.convention = mds.netcdf.convention.select_convention(
self.dataset, filter_out_nd_coordinates,
favor_class=favor_convention_class if favor_convention_class
is not None else mds.netcdf.convention.CF)
if self.convention is None:
raise RuntimeError("Convention rules are not implemented")
def __del__(self):
if hasattr(self, "dataset"):
self.dataset.close()
# Revamp when necessary.
# def __enter__(self):
# print "enter!"
# def __exit__(self, exc_type, exc_val, exc_tb):
# print "exit!"
# if hasattr(self, "dataset"):
# self.dataset.close()
# return False
def attribute_names(self):
"""
OrderedSet of names of global attributes in the dataset.
"""
return mds.ordered_set.OrderedSet(self.dataset.ncattrs())
def attribute(self,
attribute_name):
"""
Return attribute with name *attribute_name*.
"""
assert attribute_name in self.attribute_names(), attribute_name
return getattr(self.dataset, attribute_name)
def dimension_names(self):
"""
OrderedSet of names of dimensions in the dataset.
"""
return mds.ordered_set.OrderedSet(self.dataset.dimensions.keys())
def dimension(self,
dimension_name):
"""
Return dimension with name *dimension_name*.
A dimension with name *dimension_name* must be present in the dataset.
The dimension returned is a `netCDF4.Dimension <http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4.Dimension-class.html>`_
instance.
"""
assert dimension_name in self.dataset.dimensions, \
"{} not in {}".format(dimension_names,
self.dataset.dimensions.keys())
return self.dataset.dimensions[dimension_name]
def variable_names(self):
"""
OrderedSet of names of variables in the dataset.
The set includes the names of dimension variables.
"""
return mds.ordered_set.OrderedSet(self.dataset.variables.keys())
def variable(self,
variable_name):
"""
Return variable with name *variable_name*.
A variable with name *variable_name* must be present in the dataset.
The variable returned is a `netCDF4.Variable <http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4.Variable-class.html>`_
instance.
"""
assert variable_name in self.dataset.variables, \
"{} not in {}".format(variable_name, self.dataset.variables.keys())
return self.dataset.variables[variable_name]
def data_variable_names(self):
"""
Return an :py:class:`mds.ordered_set.OrderedSet` with the names of the
data variables.
"""
return self.convention.data_variable_names()
def spatial_data_variable_names(self):
"""
Return an OrderedSet with the names of the spatial data variables.
"""
return self.convention.spatial_data_variable_names()
def temporal_data_variable_names(self):
"""
Return an OrderedSet with the names of the temporal data variables.
"""
return self.convention.temporal_data_variable_names()
def variable_dimension_names(self,
variable_name):
"""
Return the names of `variable_name`'s dimensions.
A variable may depend multiple times on the same dimension, so we
return a tuple with the names instead of an OrderedSet.
"""
assert variable_name in self.dataset.variables, variable_name
variable = self.dataset.variables[variable_name]
return variable.dimensions
def space_dimension_names(self,
variable_name):
"""
Return an OrderedSet with the names of *variable_name*'s spatial
dimensions.
"""
return self.convention.space_dimension_names(variable_name)
def non_space_dimension_names(self,
variable_name):
"""
Return an OrderedSet with the names of *variable_name*'s non-spatial
dimensions.
"""
dimension_names = self.variable_dimension_names(variable_name)
space_dimension_names = self.convention.space_dimension_names(
variable_name)
return tuple(name for name in dimension_names if not name in
space_dimension_names)
def time_dimension_names(self,
variable_name):
"""
Return an OrderedSet with the names of *variable_name*'s temporal
dimensions.
"""
return self.convention.time_dimension_names(variable_name)
def is_spatial_variable(self,
variable_name):
"""
Return whether variable *variable_name* is spatial.
A variable is considered spatial if it has two dimensions representing
the x and y dimensions.
"""
return self.convention.is_spatial_variable(variable_name)
def is_temporal_variable(self,
variable_name):
"""
Return whether variable *variable_name* is temporal.
"""
return self.convention.is_temporal_variable(variable_name)
def compatible_data_variable_names(self,
variable_name):
"""
Return an OrderedSet with the names of data variables that are
compatible with data variable *variable_name*.
See also :py:meth:`.convention.Convention.compatible_data_variable_names`.
"""
return self.convention.compatible_data_variable_names(variable_name)
def dependent_variable_names(self,
variable_name):
return self.convention.dependent_variable_names(variable_name)
def extent(self,
variable_name):
"""
Return *variable_names*'s spatial extent.
The result is a list with four values: ``[x_min, y_min, x_max, y_max]``.
"""
return self.convention.extent(variable_name)
def spatial_dimension_slices(self,
variable_name,
extent):
return self.convention.spatial_dimension_slices(variable_name, extent)
def dimension_slice(self,
dimension_name,
start_value,
end_value,
value_selection_method):
assert dimension_name in self.dataset.dimensions
dimension = self.dataset.dimensions[dimension_name]
assert len(dimension) > 0
if value_selection_method == mds.constants.SELECT_BY_INDEX or \
dimension_name not in self.variable_names():
# A start value index points to the first value.
# An end value index points to the last value.
# A start slice index points to the first value.
# An end slice index points to the one-past-the-last value.
start_value, end_value = int(start_value), int(end_value)
assert start_value <= end_value
start_index = mds.math.clamp(0, start_value, len(dimension))
end_index = mds.math.clamp(0, end_value, len(dimension)) + 1
slice_ = (start_index, end_index)
elif value_selection_method == mds.constants.SELECT_BY_VALUE:
assert dimension_name in self.dataset.variables
dimension_variable = self.dataset.variables[dimension_name]
coordinates = dimension_variable[:]
# TODO We don't support slicing nD coordinate variables yet.
assert len(coordinates.shape) == 1
slice_ = mds.math.values_to_slice(coordinates, start_value,
end_value)
else:
assert False
assert slice_[0] <= slice_[1]
return slice_
def xcopy(self,
variable_names,
output_filename,
extent=None,
dimension_selections=[],
value_selection_method=mds.constants.SELECT_BY_VALUE,
history_message=None):
"""
Copy the variables *variable_names* from the layered dataset to a
netCDF file named *output_filename*, honoring the spatial *extent* and
*dimension_selections* passed in. If no spatial variable is selected,
the value of *extent* is discarded.
The *extent* passed in must be an sequence containing
``[x_min, y_min, x_max, y_max]``. If not provided, the full extent is
copied.
The *dimension_selections* passed in must be an iterable of sequences
containing ``[dimension_name, start_value, end_value]``. Dimensions
not present in *dimension_selections* will be copied in full.
The interpretation of the ``start_value`` and ``end_value`` stored in
each dimension selection depends on the value of
*value_selection_method*. This argument must be one of the
selection methods defined in :mod:`mds`.
The *history_message* is written to the netCDF file. The value is
appended to the value of the global history attribute. If no value is
passed, the history attribute, if present, is not changed.
"""
assert len(variable_names) > 0
assert all([variable_name in self.data_variable_names() for
variable_name in variable_names])
variable_names = list(variable_names)
first_spatial_variable_name = next((variable_name for variable_name in
variable_names if self.is_spatial_variable(variable_name)),
None)
if extent is None and not first_spatial_variable_name is None:
# No extent passed in. Use the extent of the first spatial variable
# selected.
extent = self.extent(first_spatial_variable_name)
elif extent is not None and first_spatial_variable_name is None:
# None of the selected variables is spatial.
extent = None
if dimension_selections is None:
dimension_selections = []
# Create target dataset with same format as source.
new_dataset = netCDF4.Dataset(output_filename, mode="w",
clobber=True, format=self.dataset.file_format)
# Copy global attributes.
for attribute_name in self.dataset.ncattrs():
new_dataset.setncattr(attribute_name, self.dataset.getncattr(
attribute_name))
if history_message is not None:
history_messages = []
if "history" in new_dataset.ncattrs():
history_messages = new_dataset.history.split("\n")
history_messages.append(history_message)
new_dataset.history = "\n".join(history_messages)
new_dataset.Source_Software = "Esri ArcGIS"
# List of lists with dependent variables.
dependent_variable_names = [self.dependent_variable_names(
variable_name) for variable_name in variable_names]
# Flattened list.
dependent_variable_names = [variable_name for name_list in
dependent_variable_names for variable_name in name_list]
assert all([variable_name not in variable_names for variable_name in
dependent_variable_names])
variable_names += dependent_variable_names
# Set of names of dimensions used by the variables passed in.
dimension_names = mds.ordered_set.OrderedSet()
for variable_name in variable_names:
dimension_names |= self.variable_dimension_names(variable_name)
# Dictionary with slice by dimension name.
# Initialize the slices by the full range of values.
dimension_slices = {dimension_name: (0,
len(self.dataset.dimensions[dimension_name])) for dimension_name in
dimension_names}
if not first_spatial_variable_name is None:
# Add slice of spatial dimensions.
assert not extent is None
dimension_slices.update(self.spatial_dimension_slices(
first_spatial_variable_name, extent))
assert all([dimension_name in dimension_names for dimension_name in
dimension_slices.keys()])
# Update (non-spatial) dimensions with user defined slicing settings.
for dimension_selection in dimension_selections:
dimension_name, start_value, end_value = dimension_selection
if dimension_name in self.variable_names():
dimension_variable = self.variable(dimension_name)
if value_selection_method == mds.SELECT_BY_VALUE and \
self.convention.is_time_dimension_variable(
dimension_variable):
# User passed in iso formatted date/time strings. Convert
# these to datetime instances and subsequently to dimension
# coordinates.
start_value = mds.date_time.from_iso_format(start_value)
end_value = mds.date_time.from_iso_format(end_value)
time_variable = self.variable(dimension_name)
start_value, end_value = mds.netcdf.dates_to_coordinates([
start_value, end_value], time_variable)
dimension_slices[dimension_name] = self.dimension_slice(
dimension_name, start_value, end_value, value_selection_method)
# About to write dimensions and variables. First order variable and
# dimension names like in the source dataset.
dimension_names = list(mds.ordered_set.order(dimension_names,
self.dimension_names()))
variable_names = list(mds.ordered_set.order(set(variable_names),
self.variable_names()))
# Copy dimensions.
for dimension_name in dimension_names:
new_dataset.createDimension(dimension_name,
dimension_slices[dimension_name][1] -
dimension_slices[dimension_name][0] if not
self.dimension(dimension_name).isunlimited() else None)
def init_variable(
variable,
variable_name):
new_variable = new_dataset.createVariable(variable_name,
datatype=variable.dtype, dimensions=variable.dimensions)
for attribute_name in variable.ncattrs():
new_variable.setncattr(attribute_name, variable.getncattr(
attribute_name))
return new_variable
def copy_variable(
variable,
variable_name):
new_variable = init_variable(variable, variable_name)
# When copying, there is no need to scale the values. It is
# better not to because it results in small differences due to
# casting.
variable.set_auto_maskandscale(False)
new_variable.set_auto_maskandscale(False)
slices_ = [slice(*dimension_slices[dimension_name]) for
dimension_name in variable.dimensions]
new_variable[:] = variable[slices_] if slices_ else variable[:]
for dimension_name in dimension_names:
if dimension_name in self.dataset.variables:
variable = self.dataset.variables[dimension_name]
copy_variable(variable, dimension_name)
for variable_name in variable_names:
assert not variable_name in self.dataset.dimensions
variable = self.dataset.variables[variable_name]
copy_variable(variable, variable_name)
new_dataset.close()
|
from cloudburst.client.client import CloudburstConnection
import time
def dag_start(cloudburst, key, size):
return 1
def dag_sleep(cloudburst, up_res):
import time
import uuid
time.sleep(1)
uid = str(uuid.uuid4())
return str({uid:1})
def dag_end(cloudburst, *values):
return len(values)
cloudburst_client = CloudburstConnection('127.0.0.1', '127.0.0.1', local=True)
SLEEP_NUM = 1
start_name = 'dag_start'
sleep_name = 'dag_sleep'
end_name = 'dag_end'
start_func = cloudburst_client.register(dag_start, start_name)
end_func = cloudburst_client.register(dag_end, end_name)
sleep_names = [ sleep_name + str(i) for i in range(SLEEP_NUM)]
for n in sleep_names:
cloudburst_client.register(dag_sleep, n)
dag_name = 'dag_parallel'
functions = [start_name] + sleep_names + [end_name]
conns = [(start_name, n) for n in sleep_names] + [(n, end_name) for n in sleep_names]
success, error = cloudburst_client.register_dag(dag_name, functions, conns)
print(f'Create dag {dag_name} {success} {error}')
arg_map = {start_name: ['dag1', 1]}
elasped_list = []
for _ in range(5):
start = time.time()
res = cloudburst_client.call_dag(dag_name, arg_map).get()
end = time.time()
print(res)
elasped_list.append(end - start)
print('dag results: elasped {}'.format(elasped_list))
suc, err = cloudburst_client.delete_dag(dag_name)
|
"""
Autor: Daniel de Souza Baulé (16200639)
Disciplina: INE5452 - Topicos Especiais em Algoritmos II
Atividade: Segundo simulado - Questoes extra-URI
Escalonador de Processos
"""
from src.EscalonadorDeProcessos.MaxHeap import MaxHeap
from src.EscalonadorDeProcessos.Processo import Processo
from pprint import pp
# Obtem o tempo a ser computado (Para adição de processos)
tempo_considerado = int(input()) + 1
# Obtem os grupos de processos adicionados em cada timeslice
grupos_de_processos_num = list()
for _ in range(tempo_considerado):
grupos_de_processos_num.append([int(x) for x in input().split()])
# Obtem número de processos
num_processos = int(input())
# Obtem informações dos processos
dados_dos_processos = dict()
for _ in range(num_processos):
i, t, p, v = [int(x) for x in input().split()]
dados_dos_processos[i] = (t, p, v)
# Cria a heap e a lista dos processos concluidos
heap_de_processos = MaxHeap(0)
processos_computados = list()
time_slice = 0
# Computa os processos, adicionando os novos grupos na heap de acordo com o timeslice
for time_slice in range(tempo_considerado):
if not(heap_de_processos.vazia()):
if heap_de_processos.raiz().processou():
raiz = heap_de_processos.remove_maximo_max_heap()
raiz.saiu_da_heap(time_slice)
processos_computados.append(raiz)
heap_de_processos.max_heapfica()
processos_chegando = grupos_de_processos_num[time_slice]
for num_processo in processos_chegando:
t, p, v = dados_dos_processos[num_processo]
heap_de_processos.insere_max_heap(Processo(num_processo, t, p, v, time_slice))
# Computa os processos restantes na heap
while not heap_de_processos.vazia():
time_slice += 1
if heap_de_processos.raiz().processou():
raiz = heap_de_processos.remove_maximo_max_heap()
raiz.saiu_da_heap(time_slice)
processos_computados.append(raiz)
# Imprime ordem em que os processos foram computados
ordem_dos_processos = ' '.join([str(x) for x in processos_computados])
print(f'Ordem dos processos: {ordem_dos_processos}')
# Computa demais valores:
processo_0 = processos_computados[0]
identificador_menor_tempo_de_heap = processo_0.v
menor_tempo_de_heap = processo_0.heap_time
identificador_maior_tempo_de_heap = processo_0.v
maior_tempo_de_heap = processo_0.heap_time
for processo in processos_computados[1:]:
if processo.heap_time < menor_tempo_de_heap or \
(processo.heap_time == menor_tempo_de_heap and processo.v < identificador_menor_tempo_de_heap):
identificador_menor_tempo_de_heap = processo.v
menor_tempo_de_heap = processo.heap_time
if processo.heap_time > maior_tempo_de_heap or \
(processo.heap_time == maior_tempo_de_heap and processo.v < identificador_maior_tempo_de_heap):
identificador_maior_tempo_de_heap = processo.v
maior_tempo_de_heap = processo.heap_time
print(f'Identificador do processo com menor tempo de heap: {identificador_menor_tempo_de_heap}')
print(f'Valor do menor tempo de heap: {menor_tempo_de_heap}')
print(f'Identificador do processo com maior tempo de heap: {identificador_maior_tempo_de_heap}')
print(f'Valor do maior tempo de heap: {maior_tempo_de_heap}')
|
from .conversion import convert as geojson
|
"""macros/delete.py - Deleting character macros."""
from .. import common
from ..vchar import errors
__HELP_URL = "https://www.inconnu-bot.com/#/macros?id=deletion"
async def delete(ctx, macro_name: str, character=None):
"""Delete the given macro."""
try:
tip = f"`/macro delete` `macro:{macro_name}` `character:CHARACTER`"
character = await common.fetch_character(ctx, character, tip, __HELP_URL)
character.delete_macro(macro_name)
await ctx.respond(f"Deleted **{character.name}'s** `{macro_name}` macro.", hidden=True)
except errors.MacroNotFoundError as err:
await common.present_error(ctx, err, character=character.name, help_url=__HELP_URL)
except common.FetchError:
pass
|
# Basics
""" Summary:
Dictionary are a list of Key and Value
{Key: Value}
You can create a key connected to its own definition
e.g.
{"Bug": "An error in a program that prevents the program from running as expected"}
You can also create more keys, separating each key /w a comma.
{
"Bug": "An error in a program that prevents the program from running as expected",
"Loop": The action of doing something over and over again."
}
"""
programming_dictionary = {
"Bug": "An error in a program that prevents the program from running as expected.",
"Function": "A piece of code that you can easily call over and over again.",
}
# adding a new item to dictionary
programming_dictionary["Item"] = "This is a new Value definition."
print(programming_dictionary)
# editing an item to dictionary
programming_dictionary["Bug"] = "This value has been edited."
print(programming_dictionary)
# example of retrive code - creating a loop through a dictionary
for key in programming_dictionary:
print(key)
print(programming_dictionary)
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
res = []
if root is None:
return res
if sum == root.val and root.left is None and root.right is None:
return [[root.val]]
# left side
left_res = self.pathSum(root.left, sum - root.val)
# right side
right_res = self.pathSum(root.right, sum - root.val)
# add current prefix
for t in left_res + right_res:
res.append([root.val] + t)
return res
|
# -*- coding: utf-8 -*-
"""Parsers for tables."""
import copy
import textwrap
from typing import List, Optional, Union
import tabulate
from ..constants.tables import KEY_MAP_ADAPTER, KEY_MAP_CNX, KEY_MAP_SCHEMA, TABLE_FMT
from ..tools import json_dump, listify
def tablize(
value: List[dict],
err: Optional[str] = None,
fmt: str = TABLE_FMT,
footer: bool = True,
**kwargs,
) -> str:
"""Create a table string from a list of dictionaries.
Args:
value: list to create table from
err: error string to display at top of table
fmt: table format to use
footer: include err at bottom too
"""
table = tabulate.tabulate(value, tablefmt=fmt, headers="keys")
if footer:
if fmt == "simple":
header = "\n" + "\n".join(reversed(table.splitlines()[0:2]))
table += header
if err:
pre = err + "\n"
post = "\n" + err
else:
pre = "\n"
post = "\n"
return "\n".join([pre, table, post])
def tablize_schemas(
schemas: List[dict],
config: Optional[dict] = None,
err: Optional[str] = None,
fmt: str = TABLE_FMT,
footer: bool = True,
orig: bool = True,
orig_width: int = 20,
) -> str:
"""Create a table string for a set of config schemas.
Args:
schemas: config schemas to create a table from
config: current config with keys that map to schema names
err: error string to show at top
fmt: table format to use
footer: show err at bottom too
orig: show original values in output too
orig_width: column width to use for orig values
"""
values = []
config = config or None
if isinstance(schemas, dict):
schemas = list(schemas.values())
# TBD TRANSLATE ENUM DICTS!!
for schema in sorted(schemas, key=lambda x: [x["required"], x["name"]]):
value = tab_map(value=schema, key_map=KEY_MAP_SCHEMA, orig=orig, orig_width=orig_width)
if config:
config_value = config.get(schema["name"], None)
if isinstance(config_value, dict): # pragma: no cover
config_value = json_dump(config_value)
value["Current Value"] = config_value
values.append(value)
return tablize(value=values, err=err, fmt=fmt, footer=footer)
def tablize_adapters(
adapters: List[dict],
err: Optional[str] = None,
fmt: str = TABLE_FMT,
footer: bool = True,
) -> str:
"""Create a table string for a set of adapter schemas.
Args:
adapters: adapter schemas to create a table from
err: error string to show at top
fmt: table format to use
footer: show err at bottom too
"""
values = []
for adapter in adapters:
value = tab_map(value=adapter, key_map=KEY_MAP_ADAPTER, orig=False)
# value["Connection IDs"] = "\n".join([x["id"] for x in adapter["cnx"]])
values.append(value)
return tablize(value=values, err=err, fmt=fmt, footer=footer)
def tablize_cnxs(
cnxs: List[dict], err: Optional[str] = None, fmt: str = TABLE_FMT, footer: bool = True
) -> str:
"""Create a table string for a set of adapter connection schemas.
Args:
cnxs: connection schemas to create a table from
err: error string to show at top
fmt: table format to use
footer: show err at bottom too
"""
values = []
for cnx in cnxs:
value = tab_map(value=cnx, key_map=KEY_MAP_CNX, orig=False)
values.append(value)
return tablize(value=values, err=err, fmt=fmt, footer=footer)
def tablize_sqs(data: List[dict], err: str, fmt: str = TABLE_FMT, footer: bool = True) -> str:
"""Create a table string for a set of sqs.
Args:
data: sqs to create a table from
err: error string to show at top
fmt: table format to use
footer: show err at bottom too
"""
values = [tablize_sq(x) for x in data]
return tablize(value=values, err=err, fmt=fmt, footer=footer)
def tablize_sq(data: dict) -> dict:
"""Create a table entry for a sq.
Args:
data: sq to create a table entry for
"""
value = {}
value["Name"] = data["name"] # textwrap.fill(data["name"], width=30)
value["UUID"] = data["uuid"]
value["Description"] = textwrap.fill(data.get("description") or "", width=30)
value["Tags"] = "\n".join(listify(data.get("tags", [])))
return value
def tablize_users(users: List[dict], err: str, fmt: str = TABLE_FMT, footer: bool = True) -> str:
"""Create a table string for a set of users.
Args:
users: users to create a table from
err: error string to show at top
fmt: table format to use
footer: show err at bottom too
"""
values = [tablize_user(user=x) for x in users]
return tablize(value=values, err=err, fmt=fmt, footer=footer)
def tablize_user(user: dict) -> dict:
"""Create a table entry for a user.
Args:
user: user to create a table entry for
"""
tab_map = {
"Name": "user_name",
"UUID": "uuid",
"Full Name": "full_name",
"Role Name": "role_name",
"Email": "email",
"Last Login": "last_login",
"Source": "source",
}
value = {k: user.get(v) for k, v in tab_map.items()}
return value
def tablize_roles(
roles: List[dict], cat_actions: dict, err: str, fmt: str = TABLE_FMT, footer: bool = True
) -> str:
"""Create a table string for a set of roles.
Args:
roles: roles to create a table from
cat_actions: category -> actions mapping
err: error string to show at top
fmt: table format to use
footer: show err at bottom too
"""
values = [tablize_role(role=x, cat_actions=cat_actions) for x in roles]
return tablize(value=values, err=err, fmt=fmt, footer=footer)
def tablize_role(role: dict, cat_actions: dict) -> dict:
"""Create a table entry for a role.
Args:
role: role to create a table entry for
cat_actions: category -> actions mapping
"""
tab_map = {"Name": "name", "UUID": "uuid"}
value = {k: role.get(v) for k, v in tab_map.items()}
perms = role["permissions_flat"]
value_perms = []
for cat, action in perms.items():
if all(list(action.values())):
has_perms = "all"
else:
has_perms = ", ".join([k for k, v in action.items() if v])
value_perms.append(f"{cat}: {has_perms}")
value_perms = "\n".join(value_perms)
value["Categories: actions"] = value_perms
return value
def tab_map(
value: dict,
key_map: List[List[Union[str, str, int]]],
orig: bool = False,
orig_width: int = 20,
) -> dict:
"""Create a new schema that has columns in a table friendly output format.
Args:
value: schema to parse
key_map: key map containing key name -> column title -> column width
orig: include values from original schema not in key map
orig_width: default column width to use for values from original schema
"""
orig_value = copy.deepcopy(value)
new_value = {}
for key, name, width in key_map:
if key in orig_value and name:
key_value = orig_value.pop(key)
if isinstance(key_value, list):
key_value = "\n".join([str(x) for x in key_value])
if isinstance(key_value, str) and key_value and width:
key_value = textwrap.fill(key_value, width=width)
new_value[name] = key_value
if orig:
for orig_key, orig_key_value in orig_value.items():
if isinstance(orig_key_value, dict): # pragma: no cover
continue
new_key_value = orig_key_value
if isinstance(orig_key_value, list):
new_key_value = "\n".join([str(x) for x in orig_key_value])
if isinstance(orig_key_value, str) and orig_key_value and orig_width:
new_key_value = textwrap.fill(orig_key_value, width=orig_width)
new_value[orig_key] = new_key_value
return new_value
|
'''
Given an integer array nums, find the contiguous subarray (containing at least one number)
which has the largest sum and return its sum.
A subarray is a contiguous part of an array.
Example 1:
Input: nums = [-2,1,-3,4,-1,2,1,-5,4]
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Example 2:
Input: nums = [1]
Output: 1
Example 3:
Input: nums = [5,4,-1,7,8]
Output: 23
'''
from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
max_sub = nums[0]
cur_sum = 0
for n in nums:
if cur_sum < 0:
cur_sum = 0
cur_sum += n
max_sub = max(max_sub, cur_sum)
return max_sub
s = Solution()
print(s.maxSubArray(nums=[-2, 1, -3, 4, -1, 2, 1, -5, 4]))
|
# check python version, this script requires python3
import sys
if sys.version_info[0] < 3:
print('ERROR: This script requires Python 3')
sys.exit(1)
import os
import subprocess
from argparse import ArgumentParser
# ################################ #
# Main Program #
# ################################ #
# argument parsing
parser = ArgumentParser(description="Simple script that turns a series of turntable images in a given directory into a little .mov clip, using ffmpeg", epilog="")
parser.add_argument("inputDir", help="input directory of images (must be PNG files)")
pArgs = parser.parse_args()
inputDir = vars(pArgs)["inputDir"]
inputFileList = os.listdir(inputDir)
# TODO: could do some sanity checks here
# for now, we assume the directory contains PNG files with the correct filenames & number padding
numDigits = 1
for inputFile in inputFileList:
inputFileName = inputFile[0:inputFile.rfind('.')]
numDigits = len(inputFileName)
break
# run ffmpeg
cmdline = ['ffmpeg', '-framerate', '60', '-i', './' + inputDir + '/%0' + str(numDigits) + 'd.png', '-vcodec', 'png', inputDir + '.mov']
print(cmdline)
try:
subprocess.check_output(cmdline)
except Exception as e:
print("******************************************************************")
print(" - Error: {0}".format(e))
print("******************************************************************")
|
# The MIT License (MIT)
#
# Copyright (c) 2017-2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
#: This value is set automatically before the Node.py entry point is invoked
#: from scripts that are installed via the Node.py package manager. It will be
#: a dictionary with the following keys:
#:
#: * location: Either `system`, `global` or `local`
#: * original_path: The original value of `sys.path` before it was augmented
#: by the Node.py entry point.
#: * args: The original value of `sys.argv` when the script was invoked.
script = None
#: A list of command-line arguments to spawn a new Node.py child-process.
#: This is usually the Python interpreter and the path to the Node.py Python
#: module.
exec_args = [sys.executable, os.path.join(os.path.dirname(__file__), 'main.py')]
#: The name of the Python implementation that we're running, eg. cpython,
#: pypy, jython, ironpython, etc.
implementation = None
if hasattr(sys, 'implementation'):
implementation = sys.implementation.name.lower()
else:
implementation = sys.subversion[0].lower()
#: The value of the `NODEPY_ENV` environment variable, which must be either
#: `"production"` or `"development"`. If an invalid value is specified, a
#: warning is printed and it defaults to `"development"`.
env = os.getenv('NODEPY_ENV', 'development')
if env not in ('production', 'development'):
print('warning: invalid value of environment variable NODEPY_ENV="{}".'
.format(env))
print(' falling back to NODEPY_ENV="development".')
os.environ['NODEPY_ENV'] = env = 'development'
|
'''
based on the code in the thread https://forum.omz-software.com/topic/1686/3d-in-pythonista by omz
'''
from objc_util import *
import sceneKit as scn
import ui
import math
@on_main_thread
def demo():
main_view = ui.View()
w, h = ui.get_screen_size()
main_view.frame = (0,0,w,h)
main_view.name = 'particles demo'
scene_view = scn.View(main_view.frame, superView=main_view)
scene_view.autoresizingMask = scn.ViewAutoresizing.FlexibleHeight | scn.ViewAutoresizing.FlexibleWidth
scene_view.antialiasingMode = scn.AntialiasingMode.Multisampling16X
scene_view.allowsCameraControl = True
scene_view.backgroundColor = 'black'
scene_view.scene = scn.Scene()
root_node = scene_view.scene.rootNode
text_mesh = scn.Text.textWithString('Pythonista', 6.0)
text_mesh.flatness = 0.2
text_mesh.chamferRadius = 0.4
text_mesh.font = ('HelveticaNeue-Bold', 18)
bbox_min, bbox_max = text_mesh.boundingBox
text_width = bbox_max.x - bbox_min.x
text_node = scn.Node.nodeWithGeometry(text_mesh)
text_node.castsShadow = False
text_container = scn.Node.node()
text_container.addChildNode(text_node)
text_container.position = (0, 40, 0)
text_node.position = (-text_width/2, 0, 0)
fire = scn.ParticleSystem()
fire.birthRate = 300000
fire.loops = True
fire.emissionDuration = 8
fire.emissionDurationVariation = 4
fire.idleDuration = 2
fire.idleDurationVariation = 0.5
fire.emittingDirection = (0, 1, 0)
fire.spreadingAngle = 15
fire.particleDiesOnCollision = False
fire.particleLifeSpan = 0.4
fire.particleLifeSpanVariation = 0.5
fire.particleVelocity = 20
fire.particleVelocityVariation = 30
fire.particleImage = ui.Image.named('shp:WhitePuff05')
fire.particleSize = 0.4
fire.particleSizeVariation = 0.2
fire.particleIntensity = 1.5
fire.particleIntensityVariation = 2
fire.stretchFactor = 0.02
colorAnim = scn.CoreKeyframeAnimation()
colorAnim.values = [(.99, 1.0, .71, 0.8), (1.0, .52, .0, 0.8), (1., .0, .1, 1.), (.78, .0, .0, 0.3)]
colorAnim.keyTimes = (0., 0.1, 0.8, 1.)
fire.timingFunctions = [scn.CoreMediaTimingFunction.functionWithName(aFunc) for aFunc in [scn.MediaTimingFunctionEaseOut, scn.MediaTimingFunctionEaseInEaseOut, scn.MediaTimingFunctionEaseIn]]
prop_con = scn.ParticlePropertyController.controllerWithAnimation(colorAnim)
fire.propertyControllers = {scn.SCNParticlePropertyColor:prop_con}
fire.emitterShape = text_mesh
fire.birthLocation = scn.ParticleBirthLocation.SCNParticleBirthLocationSurface
text_node.addParticleSystem(fire)
root_node.addChildNode(text_container)
light_node = scn.Node.node()
light_node.position = (0, 105, 5)
light_node.rotation = (1, 0, 0, -math.pi/2)
light = scn.Light.light()
light.type = 'spot'
light.spotOuterAngle = 90
light.castsShadow = True
light.shadowSampleCount = 16
light.color = 'white'
light_node.light = light
root_node.addChildNode(light_node)
main_view.present(style='fullscreen', hide_title_bar=False)
demo()
|
# Generated by Django 2.0 on 2019-09-07 12:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Teams',
fields=[
('teamname', models.CharField(max_length=250, primary_key=True, serialize=False)),
('email', models.EmailField(max_length=250, unique=True)),
('job', models.CharField(default='', max_length=100, null=True)),
('company', models.CharField(default='', max_length=250, null=True)),
('points', models.IntegerField(default=0)),
],
),
]
|
from cms.models import Page, Title
from django.contrib import admin
from django.contrib.admin.options import csrf_protect_m
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
class ExtensionAdmin(admin.ModelAdmin):
change_form_template = "admin/cms/extensions/change_form.html"
add_form_template = "admin/cms/extensions/change_form.html"
class PageExtensionAdmin(ExtensionAdmin):
def save_model(self, request, obj, form, change):
if not change and 'extended_object' in request.GET:
obj.extended_object = Page.objects.get(pk=request.GET['extended_object'])
page = Page.objects.get(pk=request.GET['extended_object'])
else:
page = obj.extended_object
if not page.has_change_permission(request):
raise PermissionDenied()
super(PageExtensionAdmin, self).save_model(request, obj, form, change)
def delete_model(self, request, obj):
if not obj.extended_object.has_change_permission(request):
raise PermissionDenied()
obj.delete()
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
def queryset(self, request):
return super(PageExtensionAdmin, self).queryset(request).filter(extended_object__publisher_is_draft=True)
@csrf_protect_m
def add_view(self, request, form_url='', extra_context=None):
"""
Check if the page already has an extension object. If so, redirect to edit view instead.
"""
extended_object_id = request.GET.get('extended_object', False)
if extended_object_id:
try:
page = Page.objects.get(pk=extended_object_id)
extension = self.model.objects.get(extended_object=page)
opts = self.model._meta
change_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.module_name),
args=(extension.pk,),
current_app=self.admin_site.name)
return HttpResponseRedirect(change_url)
except self.model.DoesNotExist:
pass
return super(ExtensionAdmin, self).add_view(request, form_url, extra_context)
class TitleExtensionAdmin(ExtensionAdmin):
def save_model(self, request, obj, form, change):
if not change and 'extended_object' in request.GET:
obj.extended_object = Title.objects.get(pk=request.GET['extended_object'])
title = Title.objects.get(pk=request.GET['extended_object'])
else:
title = obj.extended_object
if not title.page.has_change_permission(request):
raise PermissionDenied()
super(TitleExtensionAdmin, self).save_model(request, obj, form, change)
def delete_model(self, request, obj):
if not obj.extended_object.page.has_change_permission(request):
raise PermissionDenied()
obj.delete()
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
def queryset(self, request):
return super(TitleExtensionAdmin, self).queryset(request).filter(extended_object__page__publisher_is_draft=True)
@csrf_protect_m
def add_view(self, request, form_url='', extra_context=None):
"""
Check if the page already has an extension object. If so, redirect to edit view instead.
"""
extended_object_id = request.GET.get('extended_object', False)
if extended_object_id:
try:
title = Title.objects.get(pk=extended_object_id)
extension = self.model.objects.get(extended_object=title)
opts = self.model._meta
change_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.module_name),
args=(extension.pk,),
current_app=self.admin_site.name)
return HttpResponseRedirect(change_url)
except self.model.DoesNotExist:
pass
return super(ExtensionAdmin, self).add_view(request, form_url, extra_context)
|
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
"""
This is a DRC/LVS/PEX interface file for klayout.
"""
import os
import re
import shutil
import debug
from globals import OPTS
from run_script import *
# Keep track of statistics
num_drc_runs = 0
num_lvs_runs = 0
num_pex_runs = 0
def write_drc_script(cell_name, gds_name, extract, final_verification, output_path, sp_name=None):
"""
Write a klayout script to perform DRC and optionally extraction.
"""
global OPTS
# DRC:
# klayout -b -r drc_FreePDK45.lydrc -rd input=sram_8_256_freepdk45.gds -rd topcell=sram_8_256_freepdk45 -rd output=drc_FreePDK45.lyrdb
# Copy .lydrc file into the output directory
full_drc_file = OPTS.openram_tech + "tech/{}.lydrc".format(OPTS.tech_name)
drc_file = os.path.basename(full_drc_file)
if os.path.exists(full_drc_file):
shutil.copy(full_drc_file, output_path)
else:
debug.warning("Could not locate file: {}".format(full_drc_file))
# Create an auxiliary script to run calibre with the runset
run_file = output_path + "run_drc.sh"
f = open(run_file, "w")
f.write("#!/bin/sh\n")
cmd = "{0} -b -r {1} -rd input={2} -rd topcell={3} -rd output={3}.drc.report".format(OPTS.drc_exe[1],
drc_file,
gds_name,
cell_name)
f.write(cmd)
f.write("\n")
f.close()
os.system("chmod u+x {}".format(run_file))
def run_drc(cell_name, gds_name, sp_name=None, extract=True, final_verification=False):
"""Run DRC check on a cell which is implemented in gds_name."""
global num_drc_runs
num_drc_runs += 1
write_drc_script(cell_name, gds_name, extract, final_verification, OPTS.openram_temp, sp_name=sp_name)
(outfile, errfile, resultsfile) = run_script(cell_name, "drc")
# Check the result for these lines in the summary:
# Total DRC errors found: 0
# The count is shown in this format:
# Cell replica_cell_6t has 3 error tiles.
# Cell tri_gate_array has 8 error tiles.
# etc.
try:
f = open(resultsfile, "r")
except FileNotFoundError:
debug.error("Unable to load DRC results file from {}. Is klayout set up?".format(resultsfile), 1)
results = f.readlines()
f.close()
errors=len([x for x in results if "<visited>" in x])
# always display this summary
result_str = "DRC Errors {0}\t{1}".format(cell_name, errors)
if errors > 0:
debug.warning(result_str)
else:
debug.info(1, result_str)
return errors
def write_lvs_script(cell_name, gds_name, sp_name, final_verification=False, output_path=None):
""" Write a klayout script to perform LVS. """
# LVS:
# klayout -b -rd input=sram_32_2048_freepdk45.gds -rd report=my_report.lyrdb -rd schematic=sram_32_2048_freepdk45.sp -rd target_netlist=sram_32_2048_freepdk45_extracted.cir -r lvs_freepdk45.lvs
global OPTS
if not output_path:
output_path = OPTS.openram_temp
# Copy .lylvs file into the output directory
full_lvs_file = OPTS.openram_tech + "tech/{}.lylvs".format(OPTS.tech_name)
lvs_file = os.path.basename(full_lvs_file)
if os.path.exists(full_lvs_file):
shutil.copy(full_lvs_file, output_path)
else:
debug.warning("Could not locate file: {}".format(full_lvs_file))
run_file = output_path + "/run_lvs.sh"
f = open(run_file, "w")
f.write("#!/bin/sh\n")
cmd = "{0} -b -r {1} -rd input={2} -rd report={4}.lvs.report -rd schematic={3} -rd target_netlist={4}.spice".format(OPTS.lvs_exe[1],
lvs_file,
gds_name,
sp_name,
cell_name)
f.write(cmd)
f.write("\n")
f.close()
os.system("chmod u+x {}".format(run_file))
def run_lvs(cell_name, gds_name, sp_name, final_verification=False, output_path=None):
"""Run LVS check on a given top-level name which is
implemented in gds_name and sp_name. Final verification will
ensure that there are no remaining virtual conections. """
global num_lvs_runs
num_lvs_runs += 1
if not output_path:
output_path = OPTS.openram_temp
write_lvs_script(cell_name, gds_name, sp_name, final_verification)
(outfile, errfile, resultsfile) = run_script(cell_name, "lvs")
# check the result for these lines in the summary:
try:
f = open(outfile, "r")
except FileNotFoundError:
debug.error("Unable to load LVS results from {}".format(outfile), 1)
results = f.readlines()
f.close()
# Look for CONGRATULATIONS or ERROR
congrats = len([x for x in results if "CONGRATULATIONS" in x])
total_errors = len([x for x in results if "ERROR" in x])
if total_errors>0:
debug.error("{0}\tLVS mismatch (results in {1})".format(cell_name, resultsfile))
elif congrats>0:
debug.info(1, "{0}\tLVS matches".format(cell_name))
else:
debug.info(1, "{0}\tNo LVS result".format(cell_name))
total_errors += 1
return total_errors
def run_pex(name, gds_name, sp_name, output=None, final_verification=False, output_path=None):
"""Run pex on a given top-level name which is
implemented in gds_name and sp_name. """
debug.error("PEX not implemented", -1)
global num_pex_runs
num_pex_runs += 1
if not output_path:
output_path = OPTS.openram_temp
os.chdir(output_path)
if not output_path:
output_path = OPTS.openram_temp
if output == None:
output = name + ".pex.netlist"
# check if lvs report has been done
# if not run drc and lvs
if not os.path.isfile(name + ".lvs.report"):
run_drc(name, gds_name)
run_lvs(name, gds_name, sp_name)
# # pex_fix did run the pex using a script while dev orignial method
# # use batch mode.
# # the dev old code using batch mode does not run and is split into functions
# pex_runset = write_script_pex_rule(gds_name, name, sp_name, output)
# errfile = "{0}{1}.pex.err".format(output_path, name)
# outfile = "{0}{1}.pex.out".format(output_path, name)
# script_cmd = "{0} 2> {1} 1> {2}".format(pex_runset,
# errfile,
# outfile)
# cmd = script_cmd
# debug.info(2, cmd)
# os.system(cmd)
# # rename technology models
# pex_nelist = open(output, 'r')
# s = pex_nelist.read()
# pex_nelist.close()
# s = s.replace('pfet', 'p')
# s = s.replace('nfet', 'n')
# f = open(output, 'w')
# f.write(s)
# f.close()
# # also check the output file
# f = open(outfile, "r")
# results = f.readlines()
# f.close()
# out_errors = find_error(results)
# debug.check(os.path.isfile(output), "Couldn't find PEX extracted output.")
# correct_port(name, output, sp_name)
return out_errors
def write_batch_pex_rule(gds_name, name, sp_name, output):
"""
"""
# write the runset file
file = OPTS.openram_temp + "pex_runset"
f = open(file, "w")
f.close()
return file
def write_script_pex_rule(gds_name, cell_name, sp_name, output):
global OPTS
run_file = OPTS.openram_temp + "run_pex.sh"
f = open(run_file, "w")
f.write("#!/bin/sh\n")
f.write('export OPENRAM_TECH="{}"\n'.format(os.environ['OPENRAM_TECH']))
f.write('echo "$(date): Starting PEX using Klayout {}"\n'.format(OPTS.drc_exe[1]))
f.write("retcode=$?\n")
f.write("mv {0}.spice {1}\n".format(cell_name, output))
f.write('echo "$(date): Finished PEX using Klayout {}"\n'.format(OPTS.drc_exe[1]))
f.write("exit $retcode\n")
f.close()
os.system("chmod u+x {}".format(run_file))
return run_file
def print_drc_stats():
debug.info(1, "DRC runs: {0}".format(num_drc_runs))
def print_lvs_stats():
debug.info(1, "LVS runs: {0}".format(num_lvs_runs))
def print_pex_stats():
debug.info(1, "PEX runs: {0}".format(num_pex_runs))
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from shared import ds_util
from shared.datastore.athlete import Athlete
from shared.datastore.activity import Activity
from shared.datastore.club import Club
from shared.services.strava.client import ClientWrapper
class ClubWorker(object):
def __init__(self, club_id, service):
self.club_id = club_id
self.service = service
self.client = ClientWrapper(service)
def sync(self):
self.sync_club()
club = self.sync_activities()
return club
def sync_club(self):
self.client.ensure_access()
club = self.client.get_club(self.club_id)
club_entity = Club.to_entity(club, parent=self.service.key)
club_entity['members'] = [Athlete.to_entity(member) for member in club.members]
ds_util.client.put(club_entity)
return club_entity
def sync_activities(self):
self.client.ensure_access()
with ds_util.client.transaction():
club = Club.get(self.club_id, parent=self.service.key)
activity_query = ds_util.client.query(kind='Activity', ancestor=club.key)
activity_query.keys_only()
ds_util.client.delete_multi(
activity.key for activity in activity_query.fetch()
)
for activity in self.client.get_club_activities(club.id):
activity_entity = Activity.to_entity(activity, parent=club.key)
ds_util.client.put(activity_entity)
return club
|
# NOTE: This code is expected to run in a virtual Python environment.
from abc import ABC, abstractmethod
import os
import unittest
import logging
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from slicer.util import VTKObservationMixin
import pydicom
import numpy as np
import csv
import sys
import traceback
from pydicom.sr.codedict import codes
from typing import Any, List, Union, Dict, Callable
from highdicom import AlgorithmIdentificationSequence, UID
from highdicom.seg import Segmentation, SegmentDescription, SegmentAlgorithmTypeValues, SegmentationTypeValues
#
# DicomViz
#
class DicomViz(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent) -> None:
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Interoperable DICOM Annotation Workflows"
self.parent.categories = ["Annotation"]
self.parent.dependencies = []
self.parent.contributors = ["Aarash Heydari"]
self.parent.helpText = """
Performs the selected I/O actions against a loaded DICOM file.
"""
self.parent.acknowledgementText = "Thank you!"
self.moduleName = self.__class__.__name__
logging.info("Debug: Successfully loaded module")
#
# Register sample data sets in Sample Data module
#
#
# DicomVizWidget
#
class DicomVizWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self) # needed for parameter node observation
self.logic = None
self._parameterNode = None
self._updatingGUIFromParameterNode = False
def setup(self):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.setup(self)
# Load widget from .ui file (created by Qt Designer).
# Additional widgets can be instantiated manually and added to self.layout.
uiWidget = slicer.util.loadUI(self.resourcePath('UI/DicomViz.ui'))
self.layout.addWidget(uiWidget)
self.ui = slicer.util.childWidgetVariables(uiWidget)
# Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's
# "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's.
# "setMRMLScene(vtkMRMLScene*)" slot.
uiWidget.setMRMLScene(slicer.mrmlScene)
# Create logic class. Logic implements all computations that should be possible to run
# in batch mode, without a graphical user interface.
self.logic = DicomVizLogic()
# Connections
# These connections ensure that we update parameter node when scene is closed
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose)
# These connections ensure that whenever user changes some settings on the GUI, that is saved in the MRML scene
# (in the selected parameter node).
self.ui.inputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
self.ui.actionSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
self.ui.filePathSelector.connect("valueChanged(string)", self.updateParameterNodeFromGUI)
# DELETED # self.ui.invertOutputCheckBox.connect("toggled(bool)", self.updateParameterNodeFromGUI)
# self.ui.outputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
# DELETED# self.ui.invertedOutputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
# Buttons
self.ui.applyButton.connect('clicked(bool)', self.onApplyButton)
# Make sure parameter node is initialized (needed for module reload)
self.initializeParameterNode()
def cleanup(self):
"""
Called when the application closes and the module widget is destroyed.
"""
self.removeObservers()
def enter(self):
"""
Called each time the user opens this module.
"""
# Make sure parameter node exists and observed
self.initializeParameterNode()
def exit(self):
"""
Called each time the user opens a different module.
"""
# Do not react to parameter node changes (GUI wlil be updated when the user enters into the module)
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
def onSceneStartClose(self, caller, event):
"""
Called just before the scene is closed.
"""
# Parameter node will be reset, do not use it anymore
self.setParameterNode(None)
def onSceneEndClose(self, caller, event):
"""
Called just after the scene is closed.
"""
# If this module is shown while the scene is closed then recreate a new parameter node immediately
if self.parent.isEntered:
self.initializeParameterNode()
def initializeParameterNode(self):
"""
Ensure parameter node exists and observed.
"""
# Parameter node stores all user choices in parameter values, node selections, etc.
# so that when the scene is saved and reloaded, these settings are restored.
self.setParameterNode(self.logic.getParameterNode())
# Select default input nodes if nothing is selected yet to save a few clicks for the user
if not self._parameterNode.GetNodeReference("InputVolume"):
firstVolumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
if firstVolumeNode:
self._parameterNode.SetNodeReferenceID("InputVolume", firstVolumeNode.GetID())
def setParameterNode(self, inputParameterNode):
"""
Set and observe parameter node.
Observation is needed because when the parameter node is changed then the GUI must be updated immediately.
"""
if inputParameterNode:
self.logic.setDefaultParameters(inputParameterNode)
# Unobserve previously selected parameter node and add an observer to the newly selected.
# Changes of parameter node are observed so that whenever parameters are changed by a script or any other module
# those are reflected immediately in the GUI.
if self._parameterNode is not None:
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
self._parameterNode = inputParameterNode
if self._parameterNode is not None:
self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
# Initial GUI update
self.updateGUIFromParameterNode()
def updateGUIFromParameterNode(self, caller=None, event=None):
"""
This method is called whenever parameter node is changed.
The module GUI is updated to show the current state of the parameter node.
"""
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
# Make sure GUI changes do not call updateParameterNodeFromGUI (it could cause infinite loop)
self._updatingGUIFromParameterNode = True
# Update node selectors and sliders
self.ui.inputSelector.setCurrentNode(self._parameterNode.GetNodeReference("InputVolume"))
# self.ui.filePathSelector.setCurrentNode(self._parameterNode.GetNodeReference("OutputVolume"))
# self.ui.invertedOutputSelector.setCurrentNode(self._parameterNode.GetNodeReference("OutputVolumeInverse"))
# self.ui.imageThresholdSliderWidget.value = float(self._parameterNode.GetParameter("Threshold"))
# self.ui.invertOutputCheckBox.checked = (self._parameterNode.GetParameter("Invert") == "true")
# Update buttons states and tooltips
# AARASH: Consider adding this back if I need the input volume to also be a parameter
if self._parameterNode.GetNodeReference("InputVolume"):#and self._parameterNode.GetNodeReference("OutputVolume"):
self.ui.applyButton.toolTip = "Apply the selection actions"
self.ui.applyButton.enabled = True
else:
self.ui.applyButton.toolTip = "Select input volume nodes"
self.ui.applyButton.enabled = False
# All the GUI updates are done
self._updatingGUIFromParameterNode = False
def updateParameterNodeFromGUI(self, caller=None, event=None):
"""
This method is called when the user makes any change in the GUI.
The changes are saved into the parameter node (so that they are restored when the scene is saved and loaded).
"""
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
wasModified = self._parameterNode.StartModify() # Modify all properties in a single batch
self._parameterNode.SetNodeReferenceID("InputVolume", self.ui.inputSelector.currentNodeID)
# NOTE!!!! self.ui.actionSelector.currentItem() could be None or dirQListWidgetItem, which has a _.text() property corresponding to the text in DicomViz.ui
if self.ui.actionSelector.currentItem():
self._parameterNode.SetParameter("Action", self.ui.actionSelector.currentItem().text())
else:
self._parameterNode.SetParameter("Action", "")
self._parameterNode.SetParameter("FilePath", self.ui.filePathSelector.plainText)
# self._parameterNode.SetParameter("Threshold", str(self.ui.imageThresholdSliderWidget.value))
# self._parameterNode.SetParameter("Invert", "true" if self.ui.invertOutputCheckBox.checked else "false")
# self._parameterNode.SetNodeReferenceID("OutputVolumeInverse", self.ui.invertedOutputSelector.currentNodeID)
self._parameterNode.EndModify(wasModified)
def onApplyButton(self):
"""
Run processing when user clicks "Apply" button.
"""
try:
logging.info("****Attempted Apply******\n\n")
if self.ui.actionSelector.currentItem() is None:
raise Exception("Select an action.")
# # Compute output
# self.logic.process(self.ui.inputSelector.currentNode(), self.ui.outputSelector.currentNode(),
# self.ui.imageThresholdSliderWidget.value, self.ui.invertOutputCheckBox.checked)
self.logic.process(self.ui.inputSelector.currentNode(), self.ui.actionSelector.currentItem().text(), self.ui.filePathSelector.plainText)
except Exception as e:
slicer.util.errorDisplay("Failed to compute results: "+str(e))
import traceback
traceback.print_exc()
#
# DicomVizLogic
#
#######AARASH CHANGE START HERE#
class DicomFile:
inner: Union[pydicom.dataset.FileDataset, pydicom.dicomdir.DicomDir]
file_path: str
def __init__(
self,
dicom_file: Union[pydicom.dataset.FileDataset, pydicom.dicomdir.DicomDir],
file_path: str
):
self.inner = dicom_file
self.file_path = file_path
# An abstraction around ROIs with the necessary APIs to write-back DICOM annotations.
class AbstractRoi(ABC):
#########################
### Interface methods ###
#########################
'''
Returns an array of segmentation pixel data of boolean data type
representing a mask image. The array may be a 2D or 3D numpy array.
If it is a 2D numpy array, it represents the segmentation of a
single frame image, such as a planar x-ray or single instance from
a CT or MR series. In this case, `get_spanned_dicom_files` should
return a list of size 1.
If it is a 3D array, it represents the segmentation of either a
series of source images (such as a series of CT or MR images) a
single 3D multi-frame image (such as a multi-frame CT/MR image), or
a single 2D tiled image (such as a slide microscopy image).
If ``pixel_array`` represents the segmentation of a 3D image, the
first dimension represents individual 2D planes. Unless the
``plane_positions`` parameter is provided, the frame in
``pixel_array[i, ...]`` should correspond to either
``source_images[i]`` (if ``source_images`` is a list of single
frame instances) or source_images[0].pixel_array[i, ...] if
``source_images`` is a single multiframe instance.
'''
@abstractmethod
def get_pixel_mask(self) -> np.ndarray:
pass
@abstractmethod
def get_centroid(self) -> List[float]:
pass
@abstractmethod
def get_area_millimeters(self) -> float:
pass
@abstractmethod
def get_spanned_dicom_files(self) -> List[DicomFile]:
pass
# Renders a new ROI within the DICOM viewer.
# Implementation is particularly dependent on the internal data structures
# of the DICOM viewer software being used.
@abstractmethod
def render_new(**kwargs: Dict[str, Any]) -> None:
pass
#########################
## Static util methods ##
#########################
# Note: The relation between world coordinates and voxel
# coordinates is as follows:
# World coordinate (0, 0) ~= voxel coordinate (256, 256)
# in the middle of the photo
# Positive X coordinates are the patient's right, which is
# is the viewer's left side of an image, which trends toward
# voxel-X-coordinate 0
# Contrarywise, as world-X -> negative values :: voxel-X -> 512
# Similarly for Y coordinates,
# world-Y -> positive (anterior) :: voxel-Y -> 0
# world-Y -> negative (posterior) :: voxel-Y -> 512
#
# Because of this inverse relationship, both voxel_to_world
# and world_to_voxel negate the coordinate input.
@staticmethod
def voxel_to_world(vX: int, vY: int, spacing: List[float], origin: List[float]):
wX = -(vX * spacing[0]) + origin[0]
wY = -(vY * spacing[1]) + origin[1]
return wX, wY
@staticmethod
def world_to_voxel(wX: float, wY: float, spacing: List[float], origin: List[float]):
vX = -(wX - origin[0]) / spacing[0]
vY = -(wY - origin[1]) / spacing[1]
return round(vX), round(vY)
@staticmethod
# Source: https://stackoverflow.com/questions/44865023/circular-masking-an-image-in-python-using-numpy-arrays
def create_circular_mask(h, w, centerX, centerY, radius):
# Grid of indices
Y, X = np.ogrid[:h, :w]
# Grid of distances
dist_from_center = np.sqrt((X - centerX)**2 + (Y-centerY)**2)
# Grid of 1s and 0s for distances <= radius
mask = np.array(
[[1 if dist <= radius else 0 for dist in dist_row]
for dist_row in dist_from_center]
)
return mask
class DicomViewerState:
dicom_files: List[DicomFile]
rois: List[AbstractRoi]
# dicom_files should be sorted by Z axis increasing
def __init__(
self,
dicom_files: List[DicomFile],
rois: List[AbstractRoi]
):
self.dicom_files = dicom_files
self.rois = rois
def get_series_instance_uid(self):
assert(len(self.dicom_files)) > 0
return self.dicom_files[0].inner.SeriesInstanceUID
# An abstraction around a DICOM viewer for producing annotated DICOM files.
class AbstractDicomViewerBackend(ABC):
@abstractmethod
def get_state(self) -> DicomViewerState:
pass
# An abstraction around any action that this tool ought to perform.
# This includes I/O actions such as embedding an annotation into a DICOM overlay,
# importing annotations from CSV, etc.
class AbstractAction(ABC):
@abstractmethod
def apply(self, state: DicomViewerState) -> None:
pass
class SlicerBackend(AbstractDicomViewerBackend):
# Maintain a handle to the volume node being operated upon. This encompasses all of the DICOM instances of a series.
def __init__(self, vtkMRMLScalarVolumeNode):
self.volume_node = vtkMRMLScalarVolumeNode
# Loads the currently rendered DICOM objects and ROIs.
def get_state(self) -> DicomViewerState:
logging.info("Listing annotated dicom...")
dicom_files: List[DicomFile] = []
inst_uids = self.volume_node.GetAttribute("DICOM.instanceUIDs").split()
# Load each file by its instance uid, collecting into `dicom_files`
for inst_uid in inst_uids:
file_path = slicer.dicomDatabase.fileForInstance(inst_uid)
dicom_file = pydicom.read_file(file_path)
dicom_files.append(DicomFile(dicom_file, file_path))
dicom_files = sorted(dicom_files, key=lambda x: x.inner.InstanceNumber)
state = DicomViewerState(dicom_files, [])
# Collect the ROIs that reference the current volume into the DICOM viewer state
roi_nodes = slicer.mrmlScene.GetNodesByClass("vtkMRMLAnnotationROINode")
roi_nodes = [n for n in roi_nodes if n.GetAttribute('AssociatedNodeID') == self.volume_node.GetID()]
for roi_node in roi_nodes:
roi = SlicerRoi(roi_node, self.volume_node, state)
state.rois.append(roi)
logging.info("Finished loading %d DICOM files and %d ROIs" % (len(state.dicom_files), len(state.rois)))
return state
class SlicerRoi(AbstractRoi):
roi_node: Any
volume_node: Any
state: DicomViewerState
def __init__(self, vtkMRMLAnnotationROINode, vtkMRMLScalarVolumeNode, state: DicomViewerState):
self.roi_node = vtkMRMLAnnotationROINode
self.volume_node = vtkMRMLScalarVolumeNode
self.state = state
# Returns the center of ROI in 3D world coordinates.
def get_centroid(self) -> List[float]:
# The _.GetXYZ function mutates an array which is passed in.
xyz: List[float] = [0., 0., 0.]
self.roi_node.GetXYZ(xyz)
return xyz
# Returns the area of the cross-section of the ROI around the centroid.
# In this Slicer implementation, we assume the ROI is a rectangle.
def get_area_millimeters(self):
radius = [0, 0, 0]
self.roi_node.GetRadiusXYZ(radius)
area = radius[0]*radius[1]*2
return area
def get_pixel_mask(self):
# For rectangles, we build a rectangle using the corners.
corners = [0, 0, 0, 0, 0, 0]
self.roi_node.GetBounds(corners)
min_x, min_y = AbstractRoi.world_to_voxel(corners[1], corners[3], self.volume_node.GetSpacing(), self.volume_node.GetOrigin())
max_x, max_y = AbstractRoi.world_to_voxel(corners[0], corners[2], self.volume_node.GetSpacing(), self.volume_node.GetOrigin())
mask = np.zeros((len(self.state.dicom_files), 512, 512)).astype(np.uint8)
spanned_files = self.get_spanned_dicom_files()
low_index = min([round(float(dcm.inner.ImagePositionPatient[2])) for dcm in spanned_files])
high_index = max([round(float(dcm.inner.ImagePositionPatient[2])) for dcm in spanned_files])
mask[low_index:high_index, min_x:max_x, min_y:max_y] = 1
return mask
def get_spanned_dicom_files(self) -> List[DicomFile]:
spanned_dicom_files = [dicom_file for dicom_file in self.state.dicom_files if SlicerRoi.contains(self.roi_node, dicom_file)]
return spanned_dicom_files
def render_new(**kwargs: Dict[str, Any]) -> None:
pass
def contains(roi_node: Any, dicom_file: DicomFile):
z_coordinate = float(dicom_file.inner.ImagePositionPatient[2])
corners = [0, 0, 0, 0, 0, 0]
roi_node.GetBounds(corners)
z_min, z_max = corners[4], corners[5]
return z_min < z_coordinate and z_max > z_coordinate
class WriteToDicomOverlay(AbstractAction):
def apply(self, state: DicomViewerState):
# Each DICOM file may store up to 16 overlays.
# This map from Series UID to index tracks how many
# overlays have already been applied for each DICOM file.
roi_idx_map: Dict[str, int] = {}
for roi in state.rois:
for dcm in roi.get_spanned_dicom_files():
mask: np.ndarray = roi.get_pixel_mask()
roi_idx = roi_idx_map.get(dcm.inner.ImagePositionPatient[2], 0)
roi_idx_map[dcm.inner.ImagePositionPatient[2]] = roi_idx + 1
if roi_idx > 16:
# cannot support more than 16 overlays for the same image
raise Exception("Cannot support more than 16 overlays")
# packbits converts array of integer 1s and 0s to array of numbers.
# It interprets a run of 8 bits as a number.
# i.e. [1, 0, 0, 0, 0, 0, 0, 0] -> [128]
# The reshaping and flattening is to accomodate filePathSelector's expected format.
reshaped_mask = np.packbits(mask.reshape(-1,8)[:,::-1].flatten('C'))
dcm.inner.add_new(0x60000010 + roi_idx*0x20000 , 'US', 512)
dcm.inner.add_new(0x60000011 + roi_idx*0x20000 , 'US', 512)
dcm.inner.add_new(0x60000022 + roi_idx*0x20000 , 'LO', "DICOM Overlay annotation added by python script")
dcm.inner.add_new(0x60000040 + roi_idx*0x20000 , 'CS', 'R')
dcm.inner.add_new(0x60000050 + roi_idx*0x20000 , 'SS', [1,1])
dcm.inner.add_new(0x60000100 + roi_idx*0x20000 , 'US', 1)
dcm.inner.add_new(0x60000102 + roi_idx*0x20000 , 'US', 0)
dcm.inner.add_new(0x60003000 + roi_idx*0x20000 , 'OW', reshaped_mask)
print('Saved to:', dcm.file_path)
dcm.inner.save_as(dcm.file_path)
def __repr__(self):
return "WriteToDicomOverlay"
class AppendToCsv(AbstractAction):
def __init__(self, csv_file_path):
self.csv_file_path = csv_file_path
def apply(self, state: DicomViewerState):
for roi in state.rois:
spanned_dcm = roi.get_spanned_dicom_files()
assert(len(spanned_dcm) > 0)
center_dcm: DicomFile = spanned_dcm[len(spanned_dcm) / 2]
# Prepare columns values
series_uid = center_dcm.inner.SeriesInstanceUID
centroid = roi.get_centroid()
diameter_mm = 2*np.sqrt(roi.get_area_millimeters() / np.pi)
worldX, worldY = AbstractRoi.voxel_to_world(
centroid[0],
centroid[1],
center_dcm.inner.PixelSpacing,
center_dcm.inner.ImagePositionPatient
)
# columns = ["seriesuid", "coordX", "coordY", "coordZ", "diameter_mm"]
with open(self.csv_file_path, 'a') as csv_file:
writer = csv.writer(csv_file)
writer.writerow([
series_uid,
worldX,
worldY,
str(center_dcm.dicom_file.ImagePositionPatient[2]).strip("\'"),
diameter_mm
])
def __repr__(self):
return "AppendToCsv"
# AARASH TODO FINISH AND TEST
class ExportToDicomSeg(AbstractAction):
def __init__(self, out_dir: str):
self.out_dir = out_dir
def apply(self, state: DicomViewerState):
for roi in state.rois:
# Describe the algorithm that created the segmentation
algorithm_identification = AlgorithmIdentificationSequence(
name='aarash_dicom_plugin',
version='v1.0',
family=codes.cid7162.ArtificialIntelligence
)
# Describe the segment
description_segment_1 = SegmentDescription(
segment_number=1,
segment_label='first segment',
segmented_property_category=codes.cid7150.Tissue,
segmented_property_type=codes.cid7166.ConnectiveTissue,
algorithm_type=SegmentAlgorithmTypeValues.AUTOMATIC,
algorithm_identification=algorithm_identification,
tracking_uid=UID(),
tracking_id='test segmentation of slide microscopy image'
)
logging.info(np.shape(roi.get_pixel_mask()))
# Create the Segmentation instance
seg_dataset = Segmentation(
source_images=[dcm.inner for dcm in state.dicom_files],
pixel_array=roi.get_pixel_mask(),
segmentation_type=SegmentationTypeValues.BINARY,
segment_descriptions=[description_segment_1],
series_instance_uid=UID(),
series_number=2,
sop_instance_uid=UID(),
instance_number=1,
manufacturer='Manufacturer',
manufacturer_model_name='Model',
software_versions='v1',
device_serial_number='Device XYZ'
)
seg_dataset.save_as(self.out_dir + "seg.dcm")
print("Saved!")
# AARASH TODO FINISH AND TEST
RoiGeneratorFunc = Callable[[str, float, float, int, float], None]
class ImportFromCsv(AbstractAction):
def __init__(self, csv_file_path: str, roi_generator_function: RoiGeneratorFunc):
self.csv_file_path = csv_file_path
self.roi_generator_function = roi_generator_function
def apply(self, state: DicomViewerState):
spacing, origin = state.dicom_files[0].inner.PixelSpacing, state.dicom_files[0].inner.ImagePositionPatient
series_uid = state.get_series_instance_uid()
with open(self.csv_file_path) as csvfile:
csvreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
columns = next(csvreader) # Read the first row, which is names of columns rather than data.
# Find a row where the SeriesUID matches the current series
for i, row in enumerate(csvreader):
try:
s = row[0].split(",")
csv_series_uid = s[0]
coordX, coordY = float(s[1]), float(s[2])
# Remove formatting from Z world coordinate and round it to the nearest int
coordZ: int = np.round(float(s[3].strip("\'").rstrip('0')))
diameter_mm = float(s[4])
except:
print("Encountered error at row at idx = %d" % i)
print(row[0])
continue
if series_uid == csv_series_uid:
print("equality at csvrow = %d" % i)
vX, vY = AbstractRoi.world_to_voxel(float(coordX), float(coordY), spacing, origin)
print(f"ROI at coordinates {coordX}, {coordY}, {vX}, {vY}, {coordZ}")
self.roi_generator_function(csv_series_uid, coordX, coordY, coordZ, diameter_mm)
def __repr__(self):
return "ImportFromCsv"
#########AARASH CHANGE END HERE
class DicomVizLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self):
"""
Called when the logic class is instantiated. Can be used for initializing member variables.
"""
ScriptedLoadableModuleLogic.__init__(self)
def setDefaultParameters(self, parameterNode):
"""
Initialize parameter node with default settings.
"""
if not parameterNode.GetParameter("Threshold"):
parameterNode.SetParameter("Threshold", "100.0")
if not parameterNode.GetParameter("Invert"):
parameterNode.SetParameter("Invert", "false")
def process(self, inputVolume, action=None, filePath=""):
"""
TODO: Change comments
Run the processing algorithm.
Can be used without GUI widget.
:param inputVolume: volume to be thresholded
:param outputVolume: thresholding result
:param imageThreshold: values above/below this threshold will be set to 0
:param invert: if True then values above the threshold will be set to 0, otherwise values below are set to 0
"""
logging.info('Processing started')
if action == "Append to CSV":
action = AppendToCsv(filePath)
elif action == "Embed in DICOM Overlay":
action = WriteToDicomOverlay()
elif action == "Import from CSV":
action = ImportFromCsv(filePath)
elif action == "Export to DICOM Segmentation Object":
action = ExportToDicomSeg(filePath)
else:
raise Exception("Invalid action")
if not inputVolume:
raise ValueError("Input volume is invalid")
import time
startTime = time.time()
state: DicomViewerState = SlicerBackend(inputVolume).get_state()
action.apply(state)
stopTime = time.time()
logging.info('Processing completed in {0:.2f} seconds'.format(stopTime-startTime))
#
# DicomVizTest
#
class DicomVizTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear()
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_DicomViz1()
def test_DicomViz1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
# Get/create input data
import SampleData
registerSampleData()
inputVolume = SampleData.downloadSample('DicomViz1')
self.delayDisplay('Loaded test data set')
inputScalarRange = inputVolume.GetImageData().GetScalarRange()
self.assertEqual(inputScalarRange[0], 0)
self.assertEqual(inputScalarRange[1], 695)
outputVolume = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLScalarVolumeNode")
threshold = 100
# Test the module logic
logic = DicomVizLogic()
# Test algorithm with non-inverted threshold
logic.process(inputVolume, outputVolume, threshold, True)
outputScalarRange = outputVolume.GetImageData().GetScalarRange()
self.assertEqual(outputScalarRange[0], inputScalarRange[0])
self.assertEqual(outputScalarRange[1], threshold)
# Test algorithm with inverted threshold
logic.process(inputVolume, outputVolume, threshold, False)
outputScalarRange = outputVolume.GetImageData().GetScalarRange()
self.assertEqual(outputScalarRange[0], inputScalarRange[0])
self.assertEqual(outputScalarRange[1], inputScalarRange[1])
self.delayDisplay('Test passed')
|
from typing import (
Dict,
Union,
Tuple,
Callable,
List,
Optional,
Any,
Generator
)
import tensorflow as tf
import numpy as np
import pandas as pd
from ml_hadoop_experiment.tensorflow.numpy_to_sparse_tensors import \
create_sparse_np_stacked
add_to_list_type = Callable[
[pd.DataFrame, List[Tuple[str, np.array]]],
None
]
features_specs_type = Dict[
str,
Union[tf.io.FixedLenFeature, tf.io.VarLenFeature]
]
def _make_feature_list_scalar(key: str, default_value: Optional[Union[int, str, float]],
dtype: Any) -> add_to_list_type:
if default_value is not None:
if (isinstance(default_value, int) and dtype != np.int32 and dtype != np.int64) or \
(isinstance(default_value, str) and dtype != np.str) or \
(isinstance(default_value, float) and dtype != np.float32 and dtype != np.float64):
raise ValueError(f"default_value {default_value} of type {type(default_value)} "
f"incompatible with feature of type {dtype}")
def add_tensors(pandas_df: pd.DataFrame, tensors: List[Tuple[str, tf.Tensor]]) -> None:
# WARNING we have to call astype(dtype) because the from_record method may have generated
# an incorrect type for this column. If we call astype with the same type it will be a
# no-op anyway.
features: np.array = None
if default_value is not None:
features = pandas_df[key].fillna(default_value).astype(dtype).values
else:
if pandas_df[key].isnull().values.any():
raise ValueError(f"For key {key} some inputs are null in the dataframe, "
f"and no default value was provided")
else:
features = pandas_df[key].astype(dtype).values
tensors.append((key, features))
return add_tensors
def _make_feature_list_varlen(key: str, dtype: Any) -> add_to_list_type:
def add_tensors(pandas_df: pd.DataFrame, list_: List[Tuple[str, np.array]]) -> None:
def iter_() -> Generator:
for v in pandas_df[key].values:
if v is None: # pandas will have parsed missing feature list as None: convert to []
yield np.array([], dtype)
else:
yield np.array(v, dtype)
feature_list = list(iter_())
indices, values, dense_shape = create_sparse_np_stacked(feature_list, dtype)
list_.append((key + "/shape", dense_shape))
list_.append((key + "/indices", indices))
list_.append((key + "/values", values))
return add_tensors
def generate_create_tensor_fn(feature_spec: features_specs_type) -> Callable[[pd.DataFrame],
Dict[str, np.array]]:
"""
From a feature_spec, generate all the necessary converters that will be able to transform
a pandas dataframe to a container of tensors.
Return a method that, when called on a dataframe, will generate all the "raw" tensors
for all the dimensions in the feature_spec. This return value can then be directly sent
to the tensorflow inference function.
"""
generators: List[add_to_list_type] = []
tf_to_np = {tf.int32: np.int32,
tf.int64: np.int64,
tf.float32: np.float32,
tf.float64: np.float64,
tf.string: np.str}
for key, value in feature_spec.items():
if isinstance(value, tf.io.VarLenFeature):
if value.dtype in tf_to_np:
gen = _make_feature_list_varlen(key, tf_to_np[value.dtype])
else:
raise NotImplementedError(f'{key} has unknown type: {value.dtype}')
elif isinstance(value, tf.io.FixedLenFeature):
if len(value.shape) == 0 or (len(value.shape) == 1 and value.shape[0] == 1):
if value.dtype in tf_to_np:
gen = _make_feature_list_scalar(key, value.default_value, tf_to_np[value.dtype])
else:
raise NotImplementedError(f'{key} has unknown type: {value.dtype}')
else:
raise NotImplementedError(f"spec for FixedLenFeature of non-scalar shape not"
f"supported (got {value.shape} for key {key})")
else:
raise NotImplementedError(f'{key} has unknown type: {type(value)}')
generators.append(gen)
def make_tensors_from_pandas_dataframe(pandas_df: pd.DataFrame) -> Dict[str, np.array]:
tensors: List[Tuple[str, np.array]] = []
for generator in generators:
generator(pandas_df, tensors)
# sanity check that all tensors have been expanded to the same size:
items_count = pandas_df.shape[0]
for k, v in tensors:
if "/" not in k: # numpy array representing a dense tensor
assert items_count == v.shape[0]
elif k.endswith("/shape"): # numpy array representing shape of a sparse array
assert items_count == v[0]
return dict(tensors)
return make_tensors_from_pandas_dataframe
|
import unittest
from datanator_query_python.query import query_kegg_organism_code
from datanator_query_python.config import config
class TestKOC(unittest.TestCase):
@classmethod
def setUpClass(cls):
db = 'datanator'
conf = config.TestConfig()
username = conf.USERNAME
password = conf.PASSWORD
MongoDB = conf.SERVER
cls.src = query_kegg_organism_code.QueryKOC(username=username, password=password,
server=MongoDB, database=db, collection_str='kegg_organism_code')
@classmethod
def tearDownClass(cls):
cls.src.client.close()
def test_get_org_code_by_ncbi(self):
_id = 9606
self.assertEqual(self.src.get_org_code_by_ncbi(_id), 'hsa')
_id = 1234556
self.assertEqual(self.src.get_org_code_by_ncbi(_id), 'No code found.')
def test_get_ncbi_by_org_code(self):
org = 'ATH'
self.assertEqual(self.src.get_ncbi_by_org_code(org), 3702)
org = 'nonsense'
self.assertEqual(self.src.get_ncbi_by_org_code(org), -1)
|
from flask import Flask, render_template, request, make_response
import os
import random
import boto3
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
IAMAccessKey = config['DEFAULT']["IAMAccessKey"]
IAMSecretKey = config['DEFAULT']["IAMSecretKey"]
s3_region = config['DEFAULT']["s3_region"]
s3_bucket = config['DEFAULT']["s3_bucket"]
s3 = boto3.client(
's3',
aws_access_key_id=IAMAccessKey,
aws_secret_access_key=IAMSecretKey,
region_name=s3_region
)
image_direc = "dogs/static/Images"
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def index():
resp = make_response(render_template("index.html"))
resp.set_cookie("score", "0")
return resp
@app.route("/dog", methods=["GET"])
@app.route("/dog/random", methods=['GET'])
def get_random_dog_handler():
DOG_PATH = os.environ.get("DOG_PATH", "dogs/static/images")
breed_path = random.choice(os.listdir(DOG_PATH))
dog_image = random.choice(os.listdir(DOG_PATH + "/" + breed_path))
breed = breed_path.split("-")[1].replace("_", " ")
full_path = "images" + "/" + breed_path + "/" + dog_image
return render_template(
"dog.html", breed=breed, full_path=full_path)
@app.route("/dog/<breed>/<dog_id>", methods=["GET"])
def get_dog_handler(breed, dog_id):
DOG_PATH = os.environ.get("DOG_PATH", "dogs/static/images")
breeds = os.listdir(DOG_PATH)
breed_path = [
breed_path for breed_path in breeds if breed in breed_path][0]
breed = breed_path.split("-")[1].replace("_", " ")
dog_images = os.listdir(DOG_PATH + "/" + breed_path)
dog_path = [dog_path for dog_path in dog_images if dog_id in dog_path][0]
full_path = "images" + "/" + breed_path + "/" + dog_path
return render_template("dog.html", breed=breed, full_path=full_path)
@app.route("/guess/", methods=["POST"])
def guess_handler():
breed = request.form['breed']
full_path = request.form['full_path']
guess = request.form['dropdown']
score = int(request.cookies.get("score"))
if breed == guess:
score += 1
resp = make_response(render_template("guess.html", guess=guess,
breed=breed, full_path=full_path,
score=score))
resp.set_cookie("score", str(score))
return resp
# TODO
@app.route("/upload", methods=["GET", "POST"])
def upload_handler():
pass
if __name__ == "__main__":
app.run(
debug=True,
host='0.0.0.0'
)
|
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import unittest
from oci_cli.util import pymd5
class TestPyMd5(unittest.TestCase):
def test_hexdigest(self):
hash = pymd5.md5(b"Hello World").hexdigest()
self.assertEquals(hash, 'b10a8db164e0754105b7a99be72e3fe5')
def test_copy(self):
crypto = pymd5.new(b"Hello World")
copy = crypto.copy()
hash = copy.hexdigest()
self.assertEquals(hash, 'b10a8db164e0754105b7a99be72e3fe5')
def test_new(self):
crypto = pymd5.new(b"Hello World")
hash = crypto.hexdigest()
self.assertEquals(hash, 'b10a8db164e0754105b7a99be72e3fe5')
def test_md5(self):
md5 = pymd5.md5()
# TODO: The tests that follow are not very useful
def test_digest(self):
dig = pymd5.md5(b"Hello World").digest()
# self.assertEquals(dig, '\xb1\n\x8d\xb1d\xe0uA\x05\xb7\xa9\x9b\xe7.?\xe5')
def test_F(self):
pymd5.F(0, 0, 0)
def test_G(self):
pymd5.G(0, 0, 0)
def test_H(self):
pymd5.H(0, 0, 0)
def test_I(self):
pymd5.I(0, 0, 0)
|
#
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This module contains the MySQLOptionsParser used to read the MySQL
configuration files.
This module belongs to Connector python, and it should be removed once
C/py v2.0.0 is released and in the meanwhile will be used from here.
"""
import codecs
import io
import os
import re
from ConfigParser import SafeConfigParser, MissingSectionHeaderError
DEFAULT_OPTION_FILES = {
'nt': 'C:\\my.ini',
'posix': '/etc/mysql/my.cnf'
}
DEFAULT_EXTENSIONS = {
'nt': ('ini', 'cnf'),
'posix': 'cnf'
}
class MySQLOptionsParser(SafeConfigParser):
"""This class implements methods to parse MySQL option files"""
def __init__(self, files=None, keep_dashes=True):
"""Initialize
files[in] The files to parse searching for configuration items.
keep_dashes[in] If False, dashes in options are replaced with
underscores.
Raises ValueError if defaults is set to True but defaults files
cannot be found.
"""
# Regular expression to allow options with no value(For Python v2.6)
self.OPTCRE = re.compile( # pylint: disable=C0103
r'(?P<option>[^:=\s][^:=]*)'
r'\s*(?:'
r'(?P<vi>[:=])\s*'
r'(?P<value>.*))?$'
)
self._options_dict = {}
SafeConfigParser.__init__(self)
self.default_extension = DEFAULT_EXTENSIONS[os.name]
self.keep_dashes = keep_dashes
if not files:
raise ValueError('files argument should be given')
if isinstance(files, str):
self.files = [files]
else:
self.files = files
self._parse_options(list(self.files))
self._sections = self.get_groups_as_dict()
def optionxform(self, optionstr):
"""Converts option strings
optionstr[in] input to be converted.
Converts option strings to lower case and replaces dashes(-) with
underscores(_) if keep_dashes variable is set.
"""
if not self.keep_dashes:
optionstr = optionstr.replace('-', '_')
return optionstr.lower()
def _parse_options(self, files):
"""Parse options from files given as arguments.
This method checks for !include or !includedir directives and if there
is any, those files included by these directives are also parsed
for options.
files[in] The files to parse searching for configuration items.
Raises ValueError if any of the included or file given in arguments
is not readable.
"""
index = 0
err_msg = "Option file '{0}' being included again in file '{1}'"
for file_ in files:
try:
with open(file_, 'r') as op_file:
for line in op_file.readlines():
if line.startswith('!includedir'):
_, dir_path = line.split(None, 1)
for entry in os.listdir(dir_path):
entry = os.path.join(dir_path, entry)
if entry in files:
raise ValueError(err_msg.format(
entry, file_))
if (os.path.isfile(entry) and
entry.endswith(
self.default_extension)):
files.insert(index + 1, entry)
elif line.startswith('!include'):
_, filename = line.split(None, 1)
if filename in files:
raise ValueError(err_msg.format(
filename, file_))
files.insert(index + 1, filename)
index += 1
except (IOError, OSError) as exc:
raise ValueError("Failed reading file '{0}': {1}".format(
file_, str(exc)))
read_files = self.read(files)
not_read_files = set(files) - set(read_files)
if not_read_files:
raise ValueError("File(s) {0} could not be read.".format(
', '.join(not_read_files)))
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Overridden from ConfigParser and modified so as to allow options
which are not inside any section header
filenames[in] The file names to read.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for priority, filename in enumerate(filenames):
try:
out_file = io.StringIO()
for line in codecs.open(filename, encoding='utf-8'):
line = line.strip()
match_obj = self.OPTCRE.match(line)
if not self.SECTCRE.match(line) and match_obj:
optname, delimiter, optval = match_obj.group('option',
'vi',
'value')
if optname and not optval and not delimiter:
out_file.write(line + "=\n")
else:
out_file.write(line + '\n')
else:
out_file.write(line + '\n')
out_file.seek(0)
self._read(out_file, filename)
except IOError:
continue
try:
self._read(out_file, filename)
for group in self._sections.keys():
try:
self._options_dict[group]
except KeyError:
self._options_dict[group] = {}
for option, value in self._sections[group].items():
self._options_dict[group][option] = (value, priority)
self._sections = self._dict()
except MissingSectionHeaderError:
self._read(out_file, filename)
out_file.close()
read_ok.append(filename)
return read_ok
def get_groups(self, *args):
"""Returns options as a dictionary.
Returns options from all the groups specified as arguments, returns
the options from all groups if no argument provided. Options are
overridden when they are found in the next group.
*args[in] Each group to be returned can be requested by providing
its name as an argument.
Returns a dictionary
"""
if len(args) == 0:
args = self._options_dict.keys()
options = {}
for group in args:
try:
for option, value in self._options_dict[group].items():
if option not in options or options[option][1] <= value[1]:
options[option] = value
except KeyError:
pass
for key in options.keys():
if key == '__name__' or key.startswith('!'):
del options[key]
else:
options[key] = options[key][0]
return options
def get_groups_as_dict_with_priority(self, *args): # pylint: disable=C0103
"""Returns options as dictionary of dictionaries.
Returns options from all the groups specified as arguments. For each
group the option are contained in a dictionary. The order in which
the groups are specified is unimportant. Also options are not
overridden in between the groups.
The value is a tuple with two elements, first being the actual value
and second is the priority of the value which is higher for a value
read from a higher priority file.
*args[in] Each group to be returned can be requested by providing
its name as an argument.
Returns an dictionary of dictionaries
"""
if len(args) == 0:
args = self._options_dict.keys()
options = dict()
for group in args:
try:
options[group] = dict(self._options_dict[group])
except KeyError:
pass
for group in options.keys():
for key in options[group].keys():
if key == '__name__' or key.startswith('!'):
del options[group][key]
return options
def get_groups_as_dict(self, *args):
"""Returns options as dictionary of dictionaries.
Returns options from all the groups specified as arguments. For each
group the option are contained in a dictionary. The order in which
the groups are specified is unimportant. Also options are not
overridden in between the groups.
*args[in] Each group to be returned can be requested by providing
its name as an argument.
Returns an dictionary of dictionaries
"""
if len(args) == 0:
args = self._options_dict.keys()
options = dict()
for group in args:
try:
options[group] = dict(self._options_dict[group])
except KeyError:
pass
for group in options.keys():
for key in options[group].keys():
if key == '__name__' or key.startswith('!'):
del options[group][key]
else:
options[group][key] = options[group][key][0]
return options
|
fname = input('Enter file: ')
try: fhandle = open(fname, 'r')
except:
print('No such file.')
quit()
days = dict()
for line in fhandle:
if not line.startswith('From'): continue
words = line.split()
if len(words) < 3: continue
day = words[2]
days[day] = days.get(day, 0) + 1
print(days)
|
c = get_config()
# Allow all IP addresses to use the service and run it on port 80.
c.NotebookApp.ip = '0.0.0.0'
c.NotebookApp.port = 80
# Don't load the browser on startup.
c.NotebookApp.open_browser = False
|
#!/usr/bin/python
import rospy
import numpy as np
from sam_msgs.msg import ThrusterRPMs
from geometry_msgs.msg import TwistStamped
from uavcan_ros_bridge.msg import ESCStatus
from sbg_driver.msg import SbgEkfEuler
from nav_msgs.msg import Odometry
import message_filters
from sensor_msgs.msg import Imu
import tf
class SamMMDummy(object):
def __init__(self):
self.dr_thrust_topic = rospy.get_param('~thrust_dr', '/sam/dr/motion_dr')
self.base_frame = rospy.get_param('~base_frame', 'sam/base_link')
self.odom_frame = rospy.get_param('~odom_frame', 'sam/odom')
self.pub_odom = rospy.Publisher(self.dr_thrust_topic, Odometry, queue_size=10)
while not rospy.is_shutdown():
odom_msg = Odometry()
odom_msg.header.frame_id = self.odom_frame
odom_msg.header.stamp = rospy.Time.now()
odom_msg.child_frame_id = self.base_frame
odom_msg.pose.pose.position.x = 0.0
odom_msg.pose.pose.position.y = 0.0
odom_msg.pose.pose.position.z = 0.0
odom_msg.pose.covariance = [0.] * 36
odom_msg.pose.covariance[0] = 1.
odom_msg.pose.covariance[7] = 1.
odom_msg.pose.covariance[16] = 40.
odom_msg.pose.pose.orientation.w = 1.
self.pub_odom.publish(odom_msg)
rospy.sleep(0.02)
if __name__ == "__main__":
rospy.init_node('sam_mm_dummy')
try:
SamMMDummy()
except rospy.ROSInterruptException:
pass
|
import download, linkload, getproxy
from bs4 import BeautifulSoup
import time
import csv
import re
import os
def get_books_as_sort():
url = 'https://www.23wxw.cc'
seed_url = 'https://www.23wxw.cc/xiaoshuodaquan/'
page = download.localPage(seed_url)
if page is None:
print 'Local file is None, just download.'
page = download.download(seed_url, save=True)
else:
print 'Use local file.'
soup = BeautifulSoup(page, 'html.parser')
books_sort = {}
all_sorts = soup.find('table', class_= 'layui-table').find('tbody').find('tr').find_all('td')
for a_sort in all_sorts:
sort_a = a_sort.find('a')
sort_name = sort_a.text
sort_link = url + sort_a['href']
sort_page = download.download(sort_link)
soup_a_sort = BeautifulSoup(sort_page, 'html.parser')
books_a_sort = soup_a_sort.find('div', class_='novellist').find('ul').find_all('li')
books_a_sort_ = []
for a_book in books_a_sort:
book_a = a_book.find('a')
a_book_ = {}
a_book_[u'name'] = book_a.text.encode('utf-8')
a_book_[u'link'] = url + book_a['href'].encode('utf-8')
books_a_sort_.append(a_book_)
books_sort[sort_name] = books_a_sort_
time.sleep(3)
return books_sort
def save_books(books_sort): # save books
for sort in books_sort:
with open('dingdian/%s.csv' % sort, 'w') as csvfile:
fieldnames = [u'name', u'link']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for a_book in books_sort[sort]:
writer.writerow(a_book)
print 'All books have save.'
def load_local_books(): # load boos from local
books = {}
for csvf in find_csvf():
name = csvf.split('.csv')[0]
a_sort = []
with open(csvf, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
a_sort.append(row)
books[name] = a_sort
return books
def find_csvf(): # find files which end with .csv
files = os.listdir('dingdian')
csvf = []
for file in files:
if re.search('\.csv$', file, re.U):
csvf.append(file)
return csvf
def get_all_chapter(book_url):
b_page = download.download(book_url)
b_soup = BeautifulSoup(b_page, 'html.parser')
dt = b_soup.find(id='list').find('dl').find_all('dt')[1]
dds = dt.find_next_siblings('dd')
chapters = []
for dd in dds:
a_chapter = {}
chapter_a = dd.find('a')
a_chapter[u'name'] = chapter_a.text.encode('utf-8')
a_chapter[u'link'] = u'https://www.23wxw.cc' + chapter_a['href'].encode('utf-8')
chapters.append(a_chapter)
return chapters
def save_chapters(chapters):
with open('dingdian/test/test.csv', 'w') as csvfile:
fieldnames = [u'name', u'link']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for a_chapter in chapters:
writer.writerow(a_chapter)
print 'All chapters have save.'
def load_a_chapter(chapter_name, chapter_url):
c_page = download.download(chapter_url)
c_soup = BeautifulSoup(c_page, 'html.parser')
c_content = c_soup.find(id='content')
nouse = c_content.find('p')
nouse.decompose() # delete this tag
nouse = c_content.find('div')
nouse.decompose()
c_content = c_soup.find(id='content').get_text('\n')
with open('dingdian/test/%s.txt'%chapter_name.decode('utf-8'), 'w') as f:
f.write(c_content.encode('utf-8'))
if __name__ == '__main__':
# save_books(get_books_as_sort())
# print find_csvf()
# get_all_chapter('https://www.23wxw.cc/html/18761/')
chapters = get_all_chapter('https://www.23wxw.cc/html/109557/')
for chapter in chapters:
load_a_chapter(chapter['name'], chapter['link'])
|
# Generated by Django 3.2 on 2022-04-05 11:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("post", "0003_category"),
]
operations = [
migrations.AlterModelOptions(
name="category",
options={"verbose_name_plural": "categories"},
),
]
|
from django.shortcuts import render
from django.http import HttpResponse
import random, sys
# Create your views here.
def index(request):
return maze(request)
def maze(request, width=12, height=12, seed = None):
seed, maze = generate_maze(width, height, seed=seed)
return render(request, 'labyapp/index.html', context={
'labyrint': maze,
'seed': seed,
})
def generate_maze(width, height, seed=None):
if not seed:
seed = random.randrange(sys.maxsize)
random.seed(seed)
RIGHT, LEFT, TOP, BOTTOM = 2,3,5,7
maze = [[210 for x in range(width)] for y in range(height)]
visited = set()
x,y = 0,0
visited.add((x,y))
inmaze = lambda x,y: -1<x<width and -1<y<height
fkey = lambda x,y: inmaze(x,y) and (x,y) not in visited
def remove_wall(x,y,dx,dy):
if (dx,dy) == (1,0):
maze[y][x]//=RIGHT
maze[y+dy][x+dx]//=LEFT
elif (dx,dy) == (-1,0):
maze[y][x]//=LEFT
maze[y+dy][x+dx]//=RIGHT
elif (dx,dy) == (0,1):
maze[y][x]//= BOTTOM
maze[y+dy][x+dx]//=TOP
else:
maze[y][x]//=TOP
maze[y+dy][x+dx]//=BOTTOM
def process_cell(x,y):
visited.add((x,y))
for (dx,dy) in random.sample(((1,0),(-1,0),(0,1),(0,-1)), 4):
if fkey(x+dx, y+dy):
remove_wall(x,y,dx,dy)
process_cell(x+dx,y+dy)
process_cell(0,0)
return seed, [['r'*(e%RIGHT==0) + 'l'*(e%LEFT==0) + 't'*(e%TOP==0) + 'b'*(e%BOTTOM==0)
for e in row] for row in maze]
|
from cloudmesh_job.cm_jobdb import JobDB
class CommandJob(object):
@classmethod
def start(cls):
db = JobDB()
db.start()
Console.ok("job server start")
@classmethod
def stop(cls):
db = JobDB()
db.stop()
Console.ok("job server stop")
|
'''
Provides the common utility operations for plotting images
'''
import os
import shutil
import uuid
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import imageio
def cleanup(path):
if not os.path.isdir(path):
raise TypeError('Path provided to cleanup can only be a directory!!')
# used to cleanup some resources
if os.path.exists(path):
shutil.rmtree(path)
def create_gif_from_images(src, dst, cleanup_path=[]):
# creates a gif and stores in dst given a src
dst_dir = dst if os.path.isdir(dst) else os.path.split(dst)[0]
if not os.path.exists(src):
raise OSError('No such path or directory. Did you run the optimize function for the GAN?')
if not os.path.exists(dst_dir):
# create the dst directory
print("Destination dir not found.Creating.....")
os.makedirs(dst_dir)
print('Creating gif from the images')
# create the gif from the images in the source directory
with imageio.get_writer(dst, mode='I') as writer:
# list the images in the src ordered by time
imageList = [os.path.join(src, image) for image in sort_by_time(src, reverse=False) if os.path.isfile(os.path.join(src, image))]
for image in imageList:
img = imageio.imread(image)
writer.append_data(img)
# cleanup the resources if not required
if cleanup_path:
for path in cleanup_path:
cleanup(path)
def generateSamples(m, noise_dim, scale):
# generate some random noise samples
return np.random.normal(scale=scale, size=(m, noise_dim))
def restore_checkpoint_status(saver, sess, path):
# check if the checkpoint exists for this experiment
dir_path = os.path.split(path)[0] if os.path.splitext(path)[1] else path
if not tf.train.latest_checkpoint(dir_path):
print('No checkpoint found. Starting training.....')
return False
# else resume the training
print('Checkpoint found for this experiment.Restoring variables.....')
tf.reset_default_graph()
saver.restore(sess, path)
return True
def save_images(image_batch, img_dim_x=None, img_dim_y=None,
shape=None, tmp_path=None, show=False, save=False, id=None, **kwargs):
img_shape_len = len(image_batch.shape)
if img_shape_len != 2 and img_shape_len !=4:
raise SyntaxError('Image shape can be either 2 dim or 4 dim with a channel last ordering for 4-dim images')
num_channels = 1 if img_shape_len == 2 else image_batch.shape[-1]
image_size = int(np.sqrt(image_batch.shape[1])) if img_shape_len == 2 else image_batch.shape[1]
dim_x = img_dim_x or image_size
dim_y = img_dim_y or image_size
num_images = image_batch.shape[0]
# calculate the grid size to display the images
grid_size = int(np.ceil(np.sqrt(num_images)))
fig = plt.figure(figsize=(grid_size, grid_size), **kwargs)
img_index = 1
for _ in range(grid_size):
for _ in range(grid_size):
# display the images in the plot
fig.add_subplot(grid_size, grid_size, img_index)
tmp_img = np.reshape(image_batch[img_index - 1], (dim_x, dim_y)) if num_channels == 1 else image_batch[img_index - 1]
plt.imshow(tmp_img, cmap='binary')
plt.gca().set_xticks([])
plt.gca().set_yticks([])
img_index += 1
# save the image file locally
if save and not show:
tmp = tmp_path or os.path.join(os.getcwd(), 'tmp')
if not os.path.exists(tmp):
os.makedirs(tmp)
plt.savefig(os.path.join(tmp, '{}.png'.format(id)))
plt.close()
def save_model_state(saver, sess, path):
# save the model state
if not os.path.exists(os.path.split(path)[0]):
os.makedirs(os.path.split(path)[0])
else:
saver.save(sess, path)
def sort_by_time(folder, reverse=False):
def getmtime(name):
path = os.path.join(folder, name)
return os.path.getmtime(path)
return sorted(os.listdir(folder), key=getmtime, reverse=reverse)
|
import operator
points = {'A1':(2,10),
'A2':(2, 5),
'A3':(8, 4),
'A4':(5, 8),
'A5':(7, 5),
'A6':(6, 4),
'A7':(1, 2),
'A8':(4, 9)}
centroid_1 = points['A1']
centroid_2 = points['A4']
centroid_3 = points['A7']
def distance(p1, p2):
return abs(p2[0] - p1[0]) + abs(p2[1] - p1[1])
def divide(a, b):
if a % b != 0:
return round(a/float(b), 4)
else:
return a/b
def calculate_centroid_point(list_of_points):
n = len(list_of_points)
list_of_x = [p[0] for p in list_of_points]
list_of_y = [p[1] for p in list_of_points]
x = divide(sum(list_of_x), n)
y = divide(sum(list_of_y), n)
return (x,y)
## arbitrarily set number iteration to 9
## note: there is stopping condition
for iteration in range(1, 10):
print
print 'Iteration: %s' % iteration
print 'Centroid_1: %s\nCentroid_2: %s\nCentroid_3: %s' % (
centroid_1, centroid_2, centroid_3)
summary_table = []
for k,v in points.iteritems():
d1 = distance(v, centroid_1)
d2 = distance(v, centroid_2)
d3 = distance(v, centroid_3)
min_index, min_val = min(enumerate([d1, d2, d3]),
key=operator.itemgetter(1))
summary_table.append([k,v, d1, d2, d3, min_index + 1])
for l in summary_table:
print '\t'.join(map(str, l))
## get points for new centroid
c1_pts = [p[1] for p in summary_table if p[-1] == 1]
c2_pts = [p[1] for p in summary_table if p[-1] == 2]
c3_pts = [p[1] for p in summary_table if p[-1] == 3]
old_c1, old_c2, old_c3 = centroid_1, centroid_2, centroid_3
## update centroid
centroid_1 = calculate_centroid_point(c1_pts)
centroid_2 = calculate_centroid_point(c2_pts)
centroid_3 = calculate_centroid_point(c3_pts)
if old_c1 == centroid_1 and old_c2 == centroid_2 and old_c3 == centroid_3:
break
|
from unittest.mock import patch
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from rest_framework.test import APIClient
from core.models import (Asset,
AssetModelNumber,
AllocationHistory,
AssetMake,
AssetType,
AssetSubCategory,
AssetCategory)
from api.tests import APIBaseTestCase
User = get_user_model()
client = APIClient()
class AssetHealthTestCase(APIBaseTestCase):
def setUp(self):
super(AssetHealthTestCase, self).setUp()
self.admin = User.objects.create_superuser(
email='admin@site.com', cohort=20,
slack_handle='@admin', password='devpassword'
)
self.token_admin = "tokenadmin"
self.user = User.objects.create_user(
email='user@site.com', cohort=20,
slack_handle='@admin', password='devpassword'
)
self.token_user = 'testtoken'
self.other_user = User.objects.create_user(
email='user1@site.com', cohort=20,
slack_handle='@admin', password='devpassword'
)
self.token_other_user = 'otherusertesttoken'
self.asset_category = AssetCategory.objects.create(
category_name="Accessories")
self.asset_sub_category = AssetSubCategory.objects.create(
sub_category_name="Sub Category name",
asset_category=self.asset_category)
self.asset_type = AssetType.objects.create(
asset_type="Asset Type",
asset_sub_category=self.asset_sub_category)
self.make_label = AssetMake.objects.create(
make_label="Asset Make", asset_type=self.asset_type)
self.assetmodel = AssetModelNumber(
model_number="IMN50987", make_label=self.make_label)
self.assetmodel.save()
self.asset = Asset(
asset_code="IC001",
serial_number="SN001",
assigned_to=self.user,
model_number=self.assetmodel,
purchase_date="2018-07-10",
)
self.asset.save()
allocation_history = AllocationHistory(
asset=self.asset,
current_owner=self.user
)
allocation_history.save()
self.asset_urls = reverse('asset-health-list')
def test_non_authenticated_user_view_assets_health(self):
response = client.get(self.asset_urls)
self.assertEqual(response.data, {
'detail': 'Authentication credentials were not provided.'
})
@patch('api.authentication.auth.verify_id_token')
def test_non_admin_cannot_view_asset_health(self, mock_verify_id_token):
mock_verify_id_token.return_value = {'email': self.user.email}
response = client.get(
self.asset_urls,
HTTP_AUTHORIZATION="Token {}".format(self.token_user))
self.assertEqual(response.data,
{'detail': ['You do not have authorization']})
self.assertEqual(response.status_code, 403)
@patch('api.authentication.auth.verify_id_token')
def test_authenticated_admin_view_assets_health(self,
mock_verify_id_token):
mock_verify_id_token.return_value = {'email': self.admin.email}
response = client.get(
self.asset_urls,
HTTP_AUTHORIZATION="Token {}".format(self.token_admin))
self.assertEqual(len(response.data), Asset.objects.count())
self.assertEqual(response.data[0]['model_number'],
self.asset.model_number.model_number)
self.assertEqual(response.data[0]['count_by_status']['Allocated'], 1)
self.assertEqual(response.data[0]['count_by_status']['Available'], 0)
self.assertEqual(response.data[0]['count_by_status']['Damaged'], 0)
self.assertEqual(response.data[0]['count_by_status']['Lost'], 0)
self.assertEqual(response.status_code, 200)
@patch('api.authentication.auth.verify_id_token')
def test_assets_health_api_endpoint_cant_allow_put(self,
mock_verify_id_token):
mock_verify_id_token.return_value = {'email': self.user.email}
response = client.put(
self.asset_urls,
HTTP_AUTHORIZATION="Token {}".format(self.token_user))
self.assertEqual(response.data, {
'detail': 'Method "PUT" not allowed.'
})
@patch('api.authentication.auth.verify_id_token')
def test_assets_health_api_endpoint_cant_allow_patch(self,
mock_verify_id_token):
mock_verify_id_token.return_value = {'email': self.user.email}
response = client.patch(
self.asset_urls,
HTTP_AUTHORIZATION="Token {}".format(self.token_user))
self.assertEqual(response.data, {
'detail': 'Method "PATCH" not allowed.'
})
@patch('api.authentication.auth.verify_id_token')
def test_assets_health__endpoint_cant_allow_delete(self,
mock_verify_id_token):
mock_verify_id_token.return_value = {'email': self.user.email}
response = client.delete(
'{}/{}/'.format(self.asset_urls, self.asset.serial_number),
HTTP_AUTHORIZATION="Token {}".format(self.token_user))
self.assertEqual(response.data, {
'detail': 'Method "DELETE" not allowed.'
})
@patch('api.authentication.auth.verify_id_token')
def test_asset_type_in_asset_health_api(self, mock_verify_id_token):
mock_verify_id_token.return_value = {'email': self.admin.email}
response = client.get(
self.asset_urls,
HTTP_AUTHORIZATION="Token {}".format(self.token_admin))
self.assertIn('asset_type', response.data[0])
self.assertEqual(response.data[0]['asset_type'],
self.asset_type.asset_type)
self.assertEqual(response.status_code, 200)
|
import fcntl
import logging
import queue
import os
import ipcqueue.posixmq
from django.conf import settings
from . import Monitoring
logger = logging.getLogger(__name__)
class PrometheusMultiprocessMonitoring(Monitoring):
def __init__(self):
super().__init__()
monitoring_setting = getattr(settings, "MONITORING", {})
self.queue = ipcqueue.posixmq.Queue(
name=monitoring_setting["QUEUE_NAME"],
maxsize=monitoring_setting.get("QUEUE_MAX_SIZE", 10),
maxmsgsize=monitoring_setting.get("QUEUE_MAX_MSG_SIZE", 1024),
)
self.result_file = monitoring_setting["RESULT_FILE"]
def get_stats(self):
data = b""
fd = os.open(
self.result_file,
os.O_CREAT | os.O_RDONLY,
mode=0o644,
)
try:
fcntl.flock(fd, fcntl.LOCK_SH)
try:
while 1:
part = os.read(fd, 4096)
if not part:
break
data += part
finally:
fcntl.flock(fd, fcntl.LOCK_UN)
finally:
os.close(fd)
return data
def set_stats(self, stats):
fd = os.open(
self.result_file,
os.O_CREAT | os.O_WRONLY,
mode=0o644,
)
try:
fcntl.flock(fd, fcntl.LOCK_EX)
try:
os.ftruncate(fd, 0)
os.write(fd, stats)
finally:
fcntl.flock(fd, fcntl.LOCK_UN)
finally:
os.close(fd)
def counter_inc(self, metric, value, *labelvalues, **labelkwargs):
try:
self.queue.put(
(
metric.__class__,
metric.name,
metric.documentation,
metric.labelnames,
"inc",
value,
labelvalues,
labelkwargs,
),
block=True,
timeout=0.1,
)
except queue.Full:
logger.warning("Queue is full")
except ipcqueue.posixmq.QueueError as exc:
logger.error("Queue error: %d %s", exc.errno, exc.msg)
except Exception as exc:
logger.exception("Increment %s error: %s", metric.name, exc)
def summary_observe(self, metric, value, *labelvalues, **labelkwargs):
try:
self.queue.put(
(
metric.__class__,
metric.name,
metric.documentation,
metric.labelnames,
"observe",
value,
labelvalues,
labelkwargs,
),
block=True,
timeout=0.1,
)
except queue.Full:
logger.warning("Queue is full")
except ipcqueue.posixmq.QueueError as exc:
logger.error("Queue error: %d %s", exc.errno, exc.msg)
except Exception as exc:
logger.exception("Observe %s error: %s", metric.name, exc)
|
from direct.interval.IntervalGlobal import Sequence, Func, Wait, LerpColorScaleInterval, Parallel
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.task.Task import Task
from direct.showbase import PythonUtil
from toontown.distributed import DelayDelete
from toontown.distributed.DelayDeletable import DelayDeletable
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from pandac.PandaModules import *
from direct.gui.DirectGui import *
from direct.distributed.ClockDelta import *
from direct.fsm.FSM import FSM
from toontown.golf import GolfGlobals
from toontown.golf import GolfScoreBoard
from toontown.golf import GolfRewardDialog
from toontown.toon import ToonHeadFrame
class DistributedGolfCourse(DistributedObject.DistributedObject, FSM, DelayDeletable):
notify = directNotify.newCategory('DistributedGolfCourse')
defaultTransitions = {'Off': ['Join'],
'Join': ['WaitStartHole', 'Cleanup'],
'WaitStartHole': ['PlayHole', 'Cleanup', 'WaitReward'],
'PlayHole': ['WaitFinishCourse',
'WaitStartHole',
'WaitReward',
'Cleanup'],
'WaitReward': ['WaitFinishCourse', 'Cleanup'],
'WaitFinishCourse': ['Cleanup'],
'Cleanup': ['Off']}
id = 0
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, base.cr)
FSM.__init__(self, 'Golf_%s_FSM' % self.id)
self.waitingStartLabel = DirectLabel(text=TTLocalizer.MinigameWaitingForOtherPlayers, text_fg=VBase4(1, 1, 1, 1), relief=None, pos=(-0.6, 0, -0.75), scale=0.075)
self.waitingStartLabel.hide()
self.avIdList = []
self.remoteAvIdList = []
self.exitedAvIdList = []
self.toonPanels = []
self.exitedPanels = []
self.exitedToonsWithPanels = []
self.localAvId = base.localAvatar.doId
self.hasLocalToon = 0
self.modelCount = 500
self.cleanupActions = []
self.courseId = None
self.scores = {}
self.curHoleIndex = 0
self.golfRewardDialog = None
self.rewardIval = None
self.scoreBoard = None
self.exit = False
self.drivingToons = []
return
def generate(self):
self.notify.debug('GOLF COURSE: generate, %s' % self.getTitle())
DistributedObject.DistributedObject.generate(self)
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
if not self.hasLocalToon:
return
self.notify.debug('BASE: handleAnnounceGenerate: send setAvatarJoined')
self.__delayDelete = DelayDelete.DelayDelete(self, 'GolfCourse.self')
self.request('Join')
self.normalExit = 1
count = self.modelCount
loader.beginBulkLoad('minigame', TTLocalizer.HeadingToMinigameTitle % self.getTitle(), count, 1, TTLocalizer.TIP_GOLF)
self.load()
globalClock.syncFrameTime()
self.onstage()
self.accept('clientCleanup', self._handleClientCleanup)
def _handleClientCleanup(self):
self._destroyDelayDelete()
def _destroyDelayDelete(self):
if self.__delayDelete:
self.__delayDelete.destroy()
self.__delayDelete = None
return
def delete(self):
print 'GOLF COURSE DELETE'
self.ignore('clientCleanup')
if self.scoreBoard:
self.scoreBoard.delete()
DistributedObject.DistributedObject.delete(self)
if self.golfRewardDialog:
self.golfRewardDialog.delete()
self.cleanUpReward()
if self.toonPanels:
for x in range(len(self.toonPanels)):
self.toonPanels[x].destroy()
self.toonPanels = None
self.scores = None
self.music.stop()
self.music = None
for avId in self.avIdList:
av = base.cr.doId2do.get(avId)
if av:
av.show()
return
def load(self):
self.music = base.loader.loadMusic('phase_6/audio/bgm/GZ_PlayGolf.mid')
def setCourseReady(self, numHoles, holeIds, coursePar):
self.notify.debug('GOLF COURSE: received setCourseReady')
if self.state == 'Cleanup':
return
self.numHoles = numHoles
self.holeIds = holeIds
self.coursePar = coursePar
for avId in self.avIdList:
blankScoreList = [0] * self.numHoles
self.scores[avId] = blankScoreList
self.request('WaitStartHole')
for avId in self.avIdList:
av = base.cr.doId2do.get(avId)
if av:
av.show()
av.reparentTo(render)
av.setPos(0, 0, -100)
else:
self.notify.warning('avId =%d does not exist')
self.scoreBoard = GolfScoreBoard.GolfScoreBoard(self)
toonPanelsStart = 0.3
whichToon = 0
color = 0
tpDiff = -0.45
headPanel = loader.loadModel('phase_6/models/golf/headPanel')
if self.numPlayers > 0:
for avId in self.avIdList:
if not self.localAvId == avId:
av = base.cr.doId2do.get(avId)
if av:
tPanels = ToonHeadFrame.ToonHeadFrame(av, GolfGlobals.PlayerColors[color], headPanel)
tPanels.setPos(-1.17, 0, toonPanelsStart + whichToon * tpDiff)
tPanels.setScale(0.3, 1, 0.7)
tPanels.head.setPos(0, 10, 0.18)
tPanels.head.setScale(0.47, 0.2, 0.2)
tPanels.tag1.setPos(0.3, 10, 0.18)
tPanels.tag1.setScale(0.1283, 0.055, 0.055)
tPanels.tag2.setPos(0, 10, 0.43)
tPanels.tag2.setScale(0.117, 0.05, 0.05)
self.toonPanels.append(tPanels)
whichToon = whichToon + 1
color += 1
else:
color += 1
else:
self.toonPanels = None
for avId in self.exitedAvIdList:
if avId not in self.exitedToonsWithPanels:
self.exitMessageForToon(avId)
return
def setPlayHole(self):
self.notify.debug('GOLF COURSE: received setPlayHole')
if self.state not in ['PlayHole', 'Cleanup']:
self.request('PlayHole')
def getTitle(self):
return GolfGlobals.getCourseName(self.courseId)
def getInstructions(self):
return 'You should not be seeing this'
def setGolferIds(self, avIds):
self.avIdList = avIds
self.numPlayers = len(self.avIdList)
self.hasLocalToon = self.localAvId in self.avIdList
if not self.hasLocalToon:
self.notify.warning('localToon (%s) not in list of golfers: %s' % (self.localAvId, self.avIdList))
return
self.notify.info('GOLF COURSE: setParticipants: %s' % self.avIdList)
self.remoteAvIdList = []
for avId in self.avIdList:
if avId != self.localAvId:
self.remoteAvIdList.append(avId)
def setCourseAbort(self, avId):
if avId == self.localAvId or avId == 0:
if not self.hasLocalToon:
return
self.notify.warning('GOLF COURSE: setGameAbort: Aborting game')
self.normalExit = 0
if not self.state == 'Cleanup':
self.request('Cleanup')
else:
self.notify.warning('GOLF COURSE: Attempting to clean up twice')
def onstage(self):
self.notify.debug('GOLF COURSE: onstage')
base.playMusic(self.music, looping=1, volume=0.9)
def avExited(self, avId):
self.exitedAvIdList.append(avId)
hole = base.cr.doId2do.get(self.curHoleDoId)
if hole:
hole.avExited(avId)
if self.localAvId == avId:
self.notify.debug('forcing setCourseAbort')
if self.state == 'Join':
loader.endBulkLoad('minigame')
self.setCourseAbort(0)
self.exitMessageForToon(avId)
def exitMessageForToon(self, avId):
if self.toonPanels and self.localAvId != avId:
y = 0
for x in range(len(self.avIdList)):
if avId == self.avIdList[x] and y < len(self.toonPanels):
toonPanel = self.toonPanels[y]
toonPanel.headModel.hide()
exitedToon = DirectLabel(parent=self.toonPanels[y], relief=None, pos=(0, 0, 0.4), color=(1, 1, 1, 1), text_align=TextNode.ACenter, text=TTLocalizer.GolferExited % toonPanel.av.getName(), text_scale=0.07, text_wordwrap=6)
exitedToon.setScale(2, 1, 1)
self.exitedPanels.append(exitedToon)
self.exitedToonsWithPanels.append(avId)
toonPanel.removeAvKeep()
elif not self.avIdList[x] == self.localAvId:
y += 1
return
def enterJoin(self):
self.sendUpdate('setAvatarJoined', [])
def handleFallingAsleepGolf(self, task):
base.localAvatar.stopSleepWatch()
base.localAvatar.forceGotoSleep()
self.sendUpdate('setAvatarExited', [])
def exitJoin(self):
pass
def enterWaitStartHole(self):
self.sendUpdate('setAvatarReadyCourse', [])
def exitWaitStartHole(self):
pass
def enterPlayHole(self):
loader.endBulkLoad('minigame')
def exitPlayHole(self):
pass
def enterCleanup(self):
print 'GOLF COURSE CLEANUP'
base.localAvatar.stopSleepWatch()
for action in self.cleanupActions:
action()
self.cleanupActions = []
if not self.scoreBoard == None:
self.scoreBoard.delete()
if self.toonPanels:
for x in range(len(self.toonPanels)):
self.toonPanels[x].destroy()
self.toonPanels = None
for avId in self.avIdList:
av = base.cr.doId2do.get(avId)
if av:
av.show()
av.resetLOD()
self.ignoreAll()
if self.hasLocalToon:
messenger.send('leavingGolf')
self._destroyDelayDelete()
return
def exitCleanup(self):
pass
def setCourseId(self, courseId):
self.courseId = courseId
def calcHolesToUse(self):
retval = []
while len(retval) < self.numHoles:
for holeId in self.courseInfo['holeIds']:
retval.append(holeId)
if len(retval) >= self.numHoles:
break
return retval
def calcCoursePar(self):
retval = 0
for holeId in self.holeIds:
holeInfo = GolfGlobals.HoleInfo[holeId]
retval += holeInfo['par']
return retval
def setScores(self, scoreList):
scoreList.reverse()
for avId in self.avIdList:
avScores = []
for holeIndex in range(self.numHoles):
avScores.append(scoreList.pop())
self.scores[avId] = avScores
self.notify.debug('self.scores=%s' % self.scores)
def setCurHoleIndex(self, holeIndex):
self.curHoleIndex = holeIndex
def setCurHoleDoId(self, holeDoId):
self.curHoleDoId = holeDoId
def getCurGolfer(self):
if self.curHoleDoId != 0:
av = base.cr.doId2do.get(self.curHoleDoId)
if av:
return av.currentGolfer
else:
return None
return None
def getStrokesForCurHole(self, avId):
retval = 0
if avId in self.scores:
retval = self.scores[avId][self.curHoleIndex]
return retval
def isGameDone(self):
retval = False
self.notify.debug('Self state is: %s' % self.state)
if self.getCurrentOrNextState() == 'WaitReward' or self.getCurrentOrNextState() == 'WaitFinishCourse':
retval = True
return retval
def setReward(self, trophiesList, rankingsList, holeBestList, courseBestList, cupList, tieBreakWinner, aim0, aim1, aim2, aim3):
self.trophiesList = trophiesList
self.rankingsList = rankingsList
self.holeBestList = holeBestList
self.courseBestList = courseBestList
self.cupList = cupList
self.tieBreakWinner = tieBreakWinner
self.aimTimesList = [aim0,
aim1,
aim2,
aim3]
if self.state not in ['Cleanup']:
self.demand('WaitReward')
def enterWaitReward(self):
self.scoreBoard.showBoardFinal()
if self.curHoleDoId != 0:
av = base.cr.doId2do.get(self.curHoleDoId)
av.cleanupPowerBar()
def doneWithRewardMovie():
if self.exit == False:
self.notify.debug('doneWithRewardMovie')
self.sendUpdate('setDoneReward', [])
self._destroyDelayDelete()
self.exit = True
self.golfRewardDialog = GolfRewardDialog.GolfRewardDialog(self.avIdList, self.trophiesList, self.rankingsList, self.holeBestList, self.courseBestList, self.cupList, self.localAvId, self.tieBreakWinner, self.aimTimesList)
self.rewardIval = Sequence(Parallel(Wait(5), self.golfRewardDialog.getMovie()), Func(doneWithRewardMovie))
self.rewardIval.start()
def exitEarly(self):
if self.exit == False:
self.notify.debug('doneWithRewardMovie')
self.sendUpdate('setDoneReward', [])
self._destroyDelayDelete()
self.exit = True
def exitReward(self):
self.cleanUpReward()
def cleanUpReward(self):
if self.rewardIval:
self.rewardIval.pause()
self.rewardIval = None
return
def updateScoreBoard(self):
if self.scoreBoard:
self.scoreBoard.update()
def changeDrivePermission(self, avId, canDrive):
if canDrive:
if avId not in self.drivingToons:
self.drivingToons.append(avId)
elif avId in self.drivingToons:
self.drivingToons.remove(avId)
def canDrive(self, avId):
retval = avId in self.drivingToons
return retval
|
#Conditional Statements
#Boolean logic
# >
# <
# >=
# <=
# ==
# !=
#AND
'''
True and True = True
True and False = False
False and True = False
False and False = False
'''
#OR
'''
True or True = True
True or False = True
False or True = True
False or False = False
'''
#NOT
'''
not True = False
not False = True
'''
#if statement
'''
number = 10
if number != 9:
print('inside if statement')
print('still inside if')
print('outside of if statement')
'''
#if else statements
'''
number = 5
if number > 5:
print('inside if')
else:
print('inside else')
print('outside if and else')
'''
#if elif else statements
'''
grade = 60
if grade > 90 and grade <= 100:
print("A")
elif grade > 80 and grade <= 90:
print("B")
elif grade > 70 and grade <= 80:
print("C")
else:
print("you fail")
print("outside of if or else")
'''
#Nested if statements
number = 8
if number >= 8:
if number > 10:
print('number is bigger than 10')
else:
print('number is smaller than 10 but bigger or equal to 8')
else:
print('outside of if')
#assignment 3
#ask user to enter a number between 1-7
# 1. Sunday
# 2. Monday
# 3. Tuesday
# 4. Wednesday
# 5. Thursday
# 6. Friday
# 7. Saturday
#assignment 4
#ask user to enter a number between 1-10
# 1. I
# 2. II
# 3. III
# 4. IV
# 5. V
# 6. VI
# 7. VII
# 8. VIII
# 9. IX
#10. X
|
import re
import sys
import os
from datetime import *
ADJ_FF_TIME = -5
malicious_labels = []
preprocessing_lines = []
process_parent = {}
def order_events():
global preprocessing_lines
preprocessing_lines.sort()
for i in range(0, len(preprocessing_lines)):
node = preprocessing_lines[i]
if "a" in node[:node.find(',')]:
preprocessing_lines[i] = str(int(node[:node.find('a')])) + node[node.find(','):] + "\n"
elif "b" in node[:node.find(',')]:
preprocessing_lines[i] = str(int(node[:node.find('b')])) + node[node.find(','):] + "\n"
elif "c" in node[:node.find(',')]:
preprocessing_lines[i] = str(int(node[:node.find('c')])) + node[node.find(','):] + "\n"
def is_matched(string):
for label in malicious_labels:
if label in string:
return True
return False
# preprocess dns log
def pp_dns(wf, path):
global preprocessing_lines
log_file_path = path + '/dns'
event_number = 1
with open(log_file_path, 'r') as f:
pre_out_line = ',' * 19
for line in f:
if not 'response' in line:
continue
out_line = ''
splitted_line = line.split()
no = splitted_line[0]
time = splitted_line[1] + " " + splitted_line[2]
ip_src = splitted_line[3]
ip_dst = splitted_line[5]
proto = splitted_line[6]
length = splitted_line[7]
info = ""
for i in range(8, len(splitted_line)):
info += splitted_line[i] + " "
event_date = splitted_line[1]
year, month, day = event_date.split('-')
day_of_year = datetime(int(year), int(month), int(day)).timetuple().tm_yday
date_val = str(int(day_of_year) * 24 * 60 * 60)
timestamp = time.split()[1].split('.')[0]
h, m, s = timestamp.split(':')
out_line += str(int(h) * 3600 + int(m) * 60 + int(s) + int(date_val)).zfill(20) + "a" + str(event_number)
event_number += 1
# queried domain
q_domain = re.findall(r'response 0x\S+ A+ (\S+) ', info)
if q_domain:
out_line += ',' + q_domain[0]
else:
out_line += ','
# resolved ip
r_ip = re.findall(r'[0-9]+(?:\.[0-9]+){3}', info)
if r_ip:
out_line += ',' + r_ip[0]
else:
out_line += ','
# remaining fields is empty
for i in range(0, 17):
out_line += ','
# write lines out, remove adjacent duplicate entries
if len([(i, j) for i, j in zip(out_line.split(','), pre_out_line.split(',')) if i != j]) > 1:
matched = False
if is_matched(out_line):
matched = True
if not ",,,,,,,,,,,,,,,,,,," in out_line:
if matched:
#wf.write(out_line + '-LD+\n')
preprocessing_lines.append('\n' + out_line.lower().replace("\\", "/") + '-LD+') # + event_date + " " + timestamp
else:
#wf.write(out_line + '-LD-\n')
preprocessing_lines.append('\n' + out_line.lower().replace("\\", "/") + '-LD-') # + event_date + " " + timestamp
pre_out_line = out_line
# preprocess audit log for windows x64 & x86
def pp_audit_w(wf, path):
global preprocessing_lines
timestamp = ""
h, m, s = "", "", ""
pid = 0
ppid = 0
new_pid = 0
new_ppid = 0
pname = ""
first_iter = True
d_ip = ""
d_port = ""
s_ip = ""
s_port = ""
acct = ""
objname = ""
log_file_path = path + '/security_events.txt'
event_number = 1
accesses_lines = False
accesses = ""
network_direction = ""
with open(log_file_path, 'r') as f:
f.readline()
single_line = []
inside_accesses_block = False
accesses = "Accesses:"
for line in f:
if line.lstrip().startswith("Accesses:") or inside_accesses_block:
access_line = line
if len(access_line.strip()) > 1:
if "Accesses:" in access_line:
inside_accesses_block = True
access_line = access_line.split("Accesses:")[1]
first_char_index = len(access_line) - len(access_line.lstrip())
access_line = access_line[first_char_index:]
last_char_index = len(access_line.rstrip())
access_line = access_line[:last_char_index]
access_line = access_line.replace(" ", "_")
accesses += "_" + access_line
else:
inside_accesses_block = False
single_line.append(accesses)
accesses = "Accesses:"
else:
single_line.append(line)
# then for each log entry, compute domain
pre_out_line = ',' * 19
for entry in reversed(single_line):
out_line = ''
# timestamp
if entry.startswith("Audit Success") or entry.startswith("Audit Failure") or entry.startswith("Information"):
event_date = ""
date_val = ""
# timestamp 64-bit
if entry.startswith("Information"):
event_date = entry.split()[1]
month, day, year = event_date.split('/')
day_of_year = datetime(int(year), int(month), int(day)).timetuple().tm_yday
date_val = str(int(day_of_year) * 24 * 60 * 60)
timestamp = entry.split()[2]
h, m, s = timestamp.split(':')
if entry.split()[3] == "PM":
if 1 <= int(h) <= 11:
h = str(int(h) + 12)
if entry.split()[3] == "AM":
if int(h) == 12:
h = "00"
# timestamp 32-bit
if entry.startswith("Audit Success") or entry.startswith("Audit Failure"):
event_date = entry.split()[2]
month, day, year = event_date.split('/')
day_of_year = datetime(int(year), int(month), int(day)).timetuple().tm_yday
date_val = str(int(day_of_year) * 24 * 60 * 60)
timestamp = entry.split()[3]
h, m, s = timestamp.split(':')
if entry.split()[4] == "PM":
if 1 <= int(h) <= 11:
h = str(int(h) + 12)
if entry.split()[4] == "AM":
if int(h) == 12:
h = "00"
out_line = str(int(h) * 3600 + int(m) * 60 + int(s) + int(date_val)).zfill(20) + "b" + str(event_number)
event_number += 1
# queried domain
out_line += ','
# resolved ip
out_line += ','
if pid in process_parent:
ppid = process_parent[pid]
else:
ppid = 0
# pid
if pid != 0:
out_line += ',' + str(pid)
else:
out_line += ','
# ppid
if ppid != 0:
out_line += ',' + str(ppid)
else:
out_line += ','
# pname
if len(pname) > 0:
out_line += ',' + pname
pname = ""
else:
out_line += ','
# Source ip
if len(s_ip) > 0:
out_line += ',' + s_ip
else:
out_line += ','
if len(s_port) > 0:
out_line += ',' + s_port
else:
out_line += ','
# Destination ip
if len(d_ip) > 0:
out_line += ',' + d_ip
else:
out_line += ','
if len(d_port) > 0:
out_line += ',' + d_port
else:
out_line += ','
# 7 fields are empty for audit log
for i in range(0, 7):
out_line += ','
if len(acct) > 0:
out_line += ',' + acct
else:
out_line += ','
if len(objname) > 0:
out_line += ',' + objname
else:
out_line += ','
# network direction
if len(network_direction) > 0:
out_line += ',' + network_direction
else:
out_line += ','
# write lines out, remove adjacent duplicate entries
if len([(i, j) for i, j in zip(out_line.split(','), pre_out_line.split(',')) if i != j]) > 1:
matched = False
if is_matched(out_line):
matched = True
if out_line.startswith(","):
print("malformed!")
if not ",,,,,,,,,,,,,,,,,,," in out_line:
if matched:
#wf.write(out_line + '-LA+\n')
preprocessing_lines.append('\n' + out_line.lower().replace("\\", "/") + '-LA+') # + event_date + " " + timestamp
else:
#wf.write(out_line + '-LA-\n')
preprocessing_lines.append('\n' + out_line.lower().replace("\\", "/") + '-LA-') # + event_date + " " + timestamp
pre_out_line = out_line
pid = 0
ppid = 0
new_pid = 0
new_ppid = 0
pname = ""
d_ip = ""
d_port = ""
s_ip = ""
s_port = ""
acct = ""
objname = ""
accesses = ""
continue
if "New Process ID:" in entry:
if "0x" in entry:
new_pid = str(int(entry.split("0x")[1].split("\"")[0], 16))
if len(new_pid) == 0:
print(entry)
else:
new_pid = str(int(entry.split()[-1].split("\"")[0]))
if len(new_pid) == 0:
print(entry)
pid = new_pid
if new_pid not in process_parent:
if new_ppid != 0:
process_parent[new_pid] = new_ppid
new_pid = 0
new_ppid = 0
continue
if "Creator Process ID:" in entry:
if "0x" in entry:
new_ppid = str(int(entry.split("0x")[1].split("\"")[0], 16))
if len(new_ppid) == 0:
print(entry)
else:
new_ppid = str(int(entry.split()[-1].split("\"")[0]))
if len(new_ppid) == 0:
print(entry)
ppid = new_ppid
continue
# process id
if "Process ID:" in entry:
if "0x" in entry:
pid = str(int(entry.split("0x")[1].split("\"")[0], 16))
if len(pid) == 0:
print(entry)
else:
pid = str(int(entry.split()[-1].split("\"")[0]))
if len(pid) == 0:
print(entry)
continue
# Process Name
if "Application Name:" in entry or "Process Name:" in entry or "New Process Name:" in entry:
if len(pname) == 0:
pname = entry.split("Name:")[1]
first_char_index = len(pname) - len(pname.lstrip())
pname = pname[first_char_index:]
#'''
last_char_index = len(pname.rstrip())
pname = pname[:last_char_index]
if "\"" in pname:
pname = pname[:len(pname)-1]
continue
# destination ip
if "Destination Address:" in entry:
d_ip = entry.split()[-1].split("\"")[0]
continue
# destination port
if "Destination Port:" in entry:
d_port = entry.split()[-1].split("\"")[0]
continue
# source ip
if "Source Address:" in entry:
s_ip = entry.split()[-1].split("\"")[0]
continue
# source port
if "Source Port:" in entry:
s_port = entry.split()[-1].split("\"")[0]
continue
# principle of object access
if "Object Type:" in entry:
acct = entry.split()[-1].split("\"")[0]
if len(accesses) > 0:
acct += accesses
continue
# network direction
if "Direction:" in entry:
network_direction = entry.split()[-1].split("\"")[0]
continue
# object name
if "Object Name:" in entry:
objname = entry.split("Object Name:")[1].lstrip().rstrip().split("\"")[0]
continue
if entry.startswith("Accesses:"):
accesses = entry.split("Accesses:")[1]
continue
# preprocess audit log
def pp_audit(wf, path):
global preprocessing_lines
log_file_path = path + '/audit.interpret.log'
all_lines = []
# first make every log entry a single line
with open(log_file_path, 'r') as f:
# f.next() # skip first ----
f.readline()
single_lines = []
single_line = []
for line in f:
line = line.strip().replace('\'', '')
if line == '----':
single_lines.append(' '.join(single_line))
single_line = []
continue
single_line.append(line)
# then for each log entry, compute domain
pre_out_line = ',' * 19
for entry in single_lines:
out_line = ''
event_date = re.findall(r'([0-9]+/[0-9]+/[0-9]+)', entry)[0]
month, day, year = event_date.split('/')
day_of_year = datetime(int(year), int(month), int(day)).timetuple().tm_yday
date_val = str(int(day_of_year) * 24 * 60 * 60)
# timestamp
timestamp = re.findall(r'([0-9]+:[0-9]+:[0-9]+)\.', entry)[0]
h, m, s = timestamp.split(':')
out_line += str(int(h) * 3600 + int(m) * 60 + int(s) + int(date_val)).zfill(20) + "a"
# queried domain
out_line += ','
# resolved ip
out_line += ','
# process id
pid = re.findall(r' pid=([0-9]+) ', entry)
if pid:
out_line += ',' + pid[0]
else:
out_line += ','
# parent process id
ppid = re.findall(r' ppid=([0-9]+) ', entry)
if ppid:
out_line += ',' + ppid[0]
else:
out_line += ','
# # process name
# pname = re.findall(r' proctitle=(\S+) ', entry)
# if pname:
# out_line += ',' + pname[0].split('/')[-1]
# else:
# out_line += ','
# process name
pname = re.findall(r' exe=(\S+) ', entry)
if pname:
out_line += ',' + pname[0]
else:
out_line += ','
# destination ip
d_ip = re.findall(r' host:([0-9]+(?:\.[0-9]+){3}) ', entry)
# src ip&port
if d_ip:
out_line += ',NO_SIP'
out_line += ',NO_SPORT'
else:
out_line += ','
# host port
out_line += ','
if d_ip:
out_line += ',' + d_ip[0]
else:
out_line += ','
# destination port
d_port = re.findall(r' serv:([0-9]+) ', entry)
if d_port:
out_line += ',' + d_port[0]
else:
out_line += ','
# 7 fields are empty for audit log
for i in range(0, 7):
out_line += ','
# # object_access_type
# acct = re.findall(r' nametype=(\S+) ', entry)
# if acct:
# out_line += ',' + acct[0]
# else:
# out_line += ','
# object_access_type
type_val = re.findall(r' type=(\S+)', entry) #[0].lower()
nametype_val = re.findall(r' nametype=(\S+) ', entry)
syscall_val = re.findall(r' syscall=(\S+) ', entry)
file_accesses = ""
if syscall_val:
if "openat" in syscall_val[0].lower():
a2_val = re.findall(r' a2=(\S+) ', entry)
if a2_val:
if "RDONLY" in a2_val[0]:
file_accesses += "readdata_"
if "WRONLY" in a2_val[0]:
file_accesses += "write_"
if "RDWR" in a2_val[0]:
file_accesses += "readdata_write_"
elif "open" in syscall_val[0].lower():
a1_val = re.findall(r' a1=(\S+) ', entry)
if a1_val:
if "RDONLY" in a1_val[0]:
file_accesses += "readdata_"
if "WRONLY" in a1_val[0]:
file_accesses += "write_"
if "RDWR" in a1_val[0]:
file_accesses += "readdata_write_"
if "remove" in syscall_val[0].lower():
file_accesses += "delete_"
if "exec" in syscall_val[0].lower():
file_accesses += "execute_"
if len(file_accesses) > 0:
out_line += ',file_' + file_accesses
else:
out_line += ','
objname = ""
if syscall_val:
if "open" in syscall_val[0].lower():
objname = re.findall(r' name=(\S+) ', entry)
if "remove" in syscall_val[0].lower():
objname = re.findall(r' a0=(\S+) ', entry)
if "exec" in syscall_val[0].lower():
objname = re.findall(r' a0=(\S+) ', entry)
if objname:
out_line += ',' + objname[0] #.split('/')[-1]
else:
out_line += ','
# authentication info
out_line += ','
# write lines out, remove adjacent duplicate entries
if len([(i, j) for i, j in zip(out_line.split(','), pre_out_line.split(',')) if i != j]) > 1:
if is_matched(out_line):
#wf.write('\n' + out_line + '-LA+') # + event_date + " " + timestamp
preprocessing_lines.append('\n' + out_line.lower().replace("\\", "/") + '-LA+') # + event_date + " " + timestamp
else:
#wf.write('\n' + out_line + '-LA-') # + event_date + " " + timestamp
preprocessing_lines.append('\n' + out_line.lower().replace("\\", "/") + '-LA-') # + event_date + " " + timestamp
pre_out_line = out_line
# preprocess http log
def pp_http(wf, path):
global preprocessing_lines
log_file_path = path + '/firefox.txt'
event_number = 1
with open(log_file_path, 'r') as f:
single_lines = []
single_line = ''
enter = False
for line in f:
if "uri=http" in line:
single_lines.append(line)
continue
line = line.strip().replace('\'', '').replace('\"', '')
if 'http response [' in line or 'http request [' in line:
enter = True
single_line += line
continue
if enter:
if ' ]' not in line:
single_line += ' ' + line
else:
enter = False
single_lines.append(single_line)
single_line = ''
# then for each log entry, compute domain
pre_out_line = ',' * 19
for entry in single_lines:
out_line = ''
# timestamp
timestamp = re.findall(r'([0-9]+:[0-9]+:[0-9]+)\.', entry)[0]
h, m, s = timestamp.split(':')
event_date = entry.split()[0]
year, month, day = event_date.split('-')
day_of_year = datetime(int(year), int(month), int(day)).timetuple().tm_yday
if 0 <= int(h) <= 3:
h = str(24 + int(h) + (ADJ_FF_TIME))
day_of_year = day_of_year - 1
timestamp = h + ":" + m + ":" + s
event_date = "2018-" + str(date.fromordinal(day_of_year).timetuple().tm_mon) + "-" + str(date.fromordinal(day_of_year).timetuple().tm_mday)
else:
h = str(int(h) + (ADJ_FF_TIME))
date_val = str(int(day_of_year) * 24 * 60 * 60)
out_line += str(int(h) * 3600 + int(m) * 60 + int(s) + int(date_val)).zfill(20) + "c" + str(event_number) # str((int(h) + 3) * 3600 + int(m) * 60 + int(s))
event_number += 1
for i in range(0, 9):
out_line += ','
# http type
if "uri=http" in entry:
out_line += ',' + "request"
else:
type = re.findall(r' http (\S+) \[', entry)
if type:
out_line += ',' + type[0]
else:
out_line += ','
url = ""
# get query
if "uri=http" in entry and "://" in entry:
url = entry.split("://")[1]
#url_trim = url[url.find("/"):]
url_trim = url
if len(url_trim) > 0:
url_trim = url_trim.split()[0]
if url_trim:
if url_trim.endswith("]"):
url_trim = url_trim.split("]")[0]
out_line += ',' + url_trim.replace(',', '')
else:
out_line += ','
else:
get_q = re.findall(r' GET (\S+) HTTP', entry)
if get_q:
#get_q = get_q[0][get_q[0].find("/"):]
get_q = get_q[0][:]
if get_q.endswith("]"):
get_q = get_q.split("]")[0]
if get_q.startswith("/"):
continue # redundant event
out_line += ',' + get_q.replace(',', '')
else:
out_line += ','
# post query
post_q = re.findall(r' POST (\S+) HTTP', entry)
if post_q:
post_q = post_q[0][post_q[0].find("/"):]
if post_q.endswith("]"):
post_q = post_q.split("]")[0]
out_line += ',' + post_q.replace(',', '')
else:
out_line += ','
# response code
res_code = re.findall(r' HTTP/[0-9]\.[0-9] ([0-9]+) ', entry)
if res_code:
out_line += ',' + res_code[0]
else:
out_line += ','
# 14- host domain name, if request, if response?
if " Host: " in entry:
h_domain = re.findall(r' Host: (.*?) ', entry)
if h_domain:
h_domain = h_domain[0]
if ":" in h_domain:
h_domain = h_domain.split(":")[0]
out_line += ',' + h_domain
else:
out_line += ','
else:
res_loc = re.findall(r' Location: (.*?) ', entry)
if res_loc:
host = ""
loc_url = res_loc[0]
if loc_url:
if loc_url.endswith("]"):
loc_url = loc_url.split("]")[0]
if "://" in loc_url:
host = loc_url.split("://")[1]
host = host.split("/")[0]
if ":" in host:
host = host.split(":")[0]
out_line += ',' + host
else:
out_line += ','
# 15- referer
referer = re.findall(r' Referer: (.*?) ', entry)
if referer:
referer = referer[0]
if "://" in referer:
referer = referer.split("://")[1]
if referer.endswith("/"):
referer = referer[:len(referer)-1]
out_line += ',' + referer.replace(',', '')
else:
out_line += ','
# 16- location of redirect
res_loc = re.findall(r' Location: (.*?) ', entry)
if res_loc:
res_loc = res_loc[0]
if "://" in res_loc:
res_loc = res_loc.split("://")[1]
if res_loc.endswith("/"):
res_loc = res_loc[:len(res_loc)-1]
out_line += ',' + res_loc.replace(',', '')
else:
out_line += ','
for i in range(0, 3):
out_line += ','
# write lines out, remove adjacent duplicate entries
if len([(i, j) for i, j in zip(out_line.split(','), pre_out_line.split(',')) if i != j]) > 1:
matched = False
if "/RiPleEsZw/PjttGs/ZIUgsQ.swf" in entry:
print(entry)
if is_matched(out_line):
matched = True
if not ",,,,,,,,,,,,,,,,,,," in out_line:
if matched:
#wf.write(out_line + '-LB+\n')
preprocessing_lines.append('\n' + out_line.lower().replace("\\", "/") + '-LB+') # + event_date + " " + timestamp
else:
#wf.write(out_line + '-LB-\n')
preprocessing_lines.append('\n' + out_line.lower().replace("\\", "/") + '-LB-') # + event_date + " " + timestamp
pre_out_line = out_line
if __name__ == '__main__':
# '''
for file in os.listdir("training_logs"):
print("parsing: training_logs/" + file)
preprocessing_lines = []
path = os.path.join("training_logs", file + "/logs")
mlabels_file = open("training_logs" + "/" + file + "/malicious_labels.txt")
malicious_labels = mlabels_file.readlines()
malicious_labels = [x.strip() for x in malicious_labels]
print("\nMalicious entities:")
print(str(malicious_labels) + "\n")
output_file = "output/training_preprocessed_logs_" + file
training_wf = open(output_file, 'w')
if "linux" in file:
pp_audit(training_wf, path)
if "windows" in file:
pp_dns(training_wf, path)
pp_http(training_wf, path)
pp_audit_w(training_wf, path)
order_events()
training_wf.writelines(preprocessing_lines)
# '''
for file in os.listdir("testing_logs"):
print("parsing: testing_logs/" + file)
preprocessing_lines = []
path = os.path.join("testing_logs", file + "/logs")
mlabels_file = open("testing_logs" + "/" + file + "/malicious_labels.txt")
malicious_labels = mlabels_file.readlines()
malicious_labels = [x.strip() for x in malicious_labels]
print("\nMalicious entities:")
print(str(malicious_labels) + "\n")
output_file = "output/testing_preprocessed_logs_" + file
testing_wf = open(output_file, 'w')
if "linux" in file:
pp_audit(testing_wf, path)
if "windows" in file:
pp_dns(testing_wf, path)
pp_http(testing_wf, path)
pp_audit_w(testing_wf, path)
order_events()
testing_wf.writelines(preprocessing_lines)
'''
preprocessing_lines.sort()
for i in range(0, len(preprocessing_lines)):
node = preprocessing_lines[i]
preprocessing_lines[i] = str(int(node[:node.find(',')-1])) + node[node.find(','):] + "\n"
testing_wf.writelines(preprocessing_lines)
'''
|
# This program calculates gross pay.
def main():
try:
# Get the number of hours worked.
hours = int(input('How many hours did you work? '))
# Get the hourly pay rate.
pay_rate = float(input('Enter your hourly pay rate: '))
# Calculate the gross pay.
gross_pay = hours * pay_rate
# Display the gross pay.
print('Gross pay: $', format(gross_pay, ',.2f'), sep='')
except ValueError:
print('ERROR: Hours worked and hourly pay rate must')
print('be valid integers.')
# Call the main function.
main()
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is the Python XPCOM language bindings.
#
# The Initial Developer of the Original Code is
# Activestate Tool Corp.
# Portions created by the Initial Developer are Copyright (C) 2000
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Hammond <MarkH@ActiveState.com>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import xpcom
from xpcom import components, logger
import module
import glob, os, types
from xpcom.client import Component
# Until we get interface constants.
When_Startup = 0
When_Component = 1
When_Timer = 2
def _has_good_attr(object, attr):
# Actually allows "None" to be specified to disable inherited attributes.
return getattr(object, attr, None) is not None
def FindCOMComponents(py_module):
# For now, just run over all classes looking for likely candidates.
comps = []
for name, object in py_module.__dict__.items():
try:
if (type(object) == types.ClassType or issubclass(object, object)) and \
_has_good_attr(object, "_com_interfaces_") and \
_has_good_attr(object, "_reg_clsid_") and \
_has_good_attr(object, "_reg_contractid_"):
comps.append(object)
except TypeError:
# The issubclass call raises TypeError when the obj is not a class.
pass;
return comps
def register_self(klass, compMgr, location, registryLocation, componentType):
pcl = PythonComponentLoader
from xpcom import _xpcom
svc = _xpcom.GetServiceManager().getServiceByContractID("@mozilla.org/categorymanager;1", components.interfaces.nsICategoryManager)
svc.addCategoryEntry("component-loader", pcl._reg_component_type_, pcl._reg_contractid_, 1, 1)
class PythonComponentLoader:
_com_interfaces_ = components.interfaces.nsIComponentLoader
_reg_clsid_ = "{63B68B1E-3E62-45f0-98E3-5E0B5797970C}" # Never copy these!
_reg_contractid_ = "moz.pyloader.1"
_reg_desc_ = "Python component loader"
# Optional function which performs additional special registration
# Appears that no special unregistration is needed for ComponentLoaders, hence no unregister function.
_reg_registrar_ = (register_self,None)
# Custom attributes for ComponentLoader registration.
_reg_component_type_ = "script/python"
def __init__(self):
self.com_modules = {} # Keyed by module's FQN as obtained from nsIFile.path
self.moduleFactory = module.Module
self.num_modules_this_register = 0
def _getCOMModuleForLocation(self, componentFile):
fqn = componentFile.path
mod = self.com_modules.get(fqn)
if mod is not None:
return mod
import ihooks, sys
base_name = os.path.splitext(os.path.basename(fqn))[0]
loader = ihooks.ModuleLoader()
module_name_in_sys = "component:%s" % (base_name,)
stuff = loader.find_module(base_name, [componentFile.parent.path])
assert stuff is not None, "Couldnt find the module '%s'" % (base_name,)
py_mod = loader.load_module( module_name_in_sys, stuff )
# Make and remember the COM module.
comps = FindCOMComponents(py_mod)
mod = self.moduleFactory(comps)
self.com_modules[fqn] = mod
return mod
def getFactory(self, clsid, location, type):
# return the factory
assert type == self._reg_component_type_, "Being asked to create an object not of my type:%s" % (type,)
# FIXME: how to do this without obsolete component manager?
cmo = components.manager.queryInterface(components.interfaces.nsIComponentManagerObsolete)
file_interface = cmo.specForRegistryLocation(location)
# delegate to the module.
m = self._getCOMModuleForLocation(file_interface)
return m.getClassObject(components.manager, clsid, components.interfaces.nsIFactory)
def init(self, comp_mgr, registry):
# void
self.comp_mgr = comp_mgr
logger.debug("Python component loader init() called")
# Called when a component of the appropriate type is registered,
# to give the component loader an opportunity to do things like
# annotate the registry and such.
def onRegister (self, clsid, type, className, proId, location, replace, persist):
logger.debug("Python component loader - onRegister() called")
def autoRegisterComponents (self, when, directory):
directory_path = directory.path
self.num_modules_this_register = 0
logger.debug("Auto-registering all Python components in '%s'", directory_path)
# ToDo - work out the right thing here
# eg - do we recurse?
# - do we support packages?
entries = directory.directoryEntries
while entries.HasMoreElements():
entry = entries.GetNext(components.interfaces.nsIFile)
if os.path.splitext(entry.path)[1]==".py":
try:
self.autoRegisterComponent(when, entry)
# Handle some common user errors
except xpcom.COMException, details:
from xpcom import nsError
# If the interface name does not exist, suppress the traceback
if details.errno==nsError.NS_ERROR_NO_INTERFACE:
logger.error("Registration of '%s' failed\n %s",
entry.leafName, details.message)
else:
logger.exception("Registration of '%s' failed!", entry.leafName)
except SyntaxError, details:
# Syntax error in source file - no useful traceback here either.
logger.error("Registration of '%s' failed\n %s",
entry.leafName, details)
except:
# All other exceptions get the full traceback.
logger.exception("Registration of '%s' failed.", entry.leafName)
def autoRegisterComponent (self, when, componentFile):
# bool return
# Check if we actually need to do anything
modtime = componentFile.lastModifiedTime
loader_mgr = components.manager.queryInterface(components.interfaces.nsIComponentLoaderManager)
if not loader_mgr.hasFileChanged(componentFile, None, modtime):
return 1
if self.num_modules_this_register == 0:
# New components may have just installed new Python
# modules into the main python directory (including new .pth files)
# So we ask Python to re-process our site directory.
# Note that the pyloader does the equivalent when loading.
try:
from xpcom import _xpcom
import site
NS_XPCOM_CURRENT_PROCESS_DIR="XCurProcD"
dirname = _xpcom.GetSpecialDirectory(NS_XPCOM_CURRENT_PROCESS_DIR)
dirname.append("python")
site.addsitedir(dirname.path)
except:
logger.exception("PyXPCOM loader failed to process site directory before component registration")
self.num_modules_this_register += 1
# auto-register via the module.
m = self._getCOMModuleForLocation(componentFile)
m.registerSelf(components.manager, componentFile, None, self._reg_component_type_)
loader_mgr = components.manager.queryInterface(components.interfaces.nsIComponentLoaderManager)
loader_mgr.saveFileInfo(componentFile, None, modtime)
return 1
def autoUnregisterComponent (self, when, componentFile):
# bool return
# auto-unregister via the module.
m = self._getCOMModuleForLocation(componentFile)
loader_mgr = components.manager.queryInterface(components.interfaces.nsIComponentLoaderManager)
try:
m.unregisterSelf(components.manager, componentFile)
finally:
loader_mgr.removeFileInfo(componentFile, None)
return 1
def registerDeferredComponents (self, when):
# bool return
logger.debug("Python component loader - registerDeferred() called")
return 0 # no more to register
def unloadAll (self, when):
# This is called at shutdown time - don't get too upset if an error
# results from logging due to the logfile being closed
try:
logger.debug("Python component loader being asked to unload all components!")
except:
# Evil blank except, but restricting to just catching IOError
# failure means custom logs could still screw us
pass
self.comp_mgr = None
self.com_modules = {}
def MakePythonComponentLoaderModule(serviceManager, nsIFile):
import module
return module.Module( [PythonComponentLoader] )
|
import logging
from serial import Serial
from influxdb import InfluxDBClient
from influxdb.client import InfluxDBClientError
import paho.mqtt.client as mqtt
import time
from settings import *
ser = Serial(SERIAL_PORT, 460800, timeout=5, xonxoff=True)
influx_client = InfluxDBClient(INFLUX_HOST, 8086, INFLUX_USER,
INFLUX_PWD, INFLUX_DBNAME ,timeout=30)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
try:
influx_client.create_database(INFLUX_DBNAME)
except InfluxDBClientError:
logging.debug("{} DB already exist".format(INFLUX_DBNAME))
pass
except:
logging.exception("Error")
try:
influx_client.create_retention_policy('{}_policy'.format(SITE_NAME),
'{}d'.format(2*165), 3, default=True)
except InfluxDBClientError:
logging.debug("%s policy already exist" % DBNAME)
pass
class HD21ABE17(object):
labels = {
"co2": ["CO2", "Carbon Dioxide (ppm)"],
"co": ["CO", "Carbon Monoxide (ppm)"],
"rh": ["RH", "Relative Humidity (%)"],
"temp": ["T", "Temperature (°C)"],
"pressure": ["Patm", "Atmospheric Pressure (hPa)"],
"td": ["Td", "Dew Point"],
"tw": ["Tw", "Wet Bulb Temperature"],
"ah": ["AH", "Absolute Humidity"],
"r": ["r", "Mixing Ratio"],
"h": ["H", "Enthalpy"]
}
def __init__(self, date, co2, co, rh, temp, pressure, not_implemented1, not_implemented2, not_implemented3,
td, ah, r, tw, h):
#self.date = datetime.datetime.strptime(date, 'Date=%Y/%m/%d %H:%M:%S')
self.co2 = int(co2)
self.co = int(co)
self.rh = float(rh)
self.temp = float(temp)
self.pressure = int(pressure)
#self.valuex1 = str(valuex1)
#self.valuex2 = str(valuex2)
#self.valuex3 = str(valuex3)
self.td = float(td)
self.ah = float(ah)
self.r = float(r)
self.tw = float(tw)
self.h = float(h)
def get_label(self,item):
return self.labels.get(item)
def read_values():
with ser as port:
port.write(b'P0\r')
port.readline()
port.write(b'HA\r')
res=list(map(lambda x: x.strip(), port.readline().decode().split(";")))
port.write(b'P1\r')
port.readline()
port.close()
return HD21ABE17(*res)
def on_publish(client, userdata, result): #create function for callback
logging.debug("data published \n")
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
logging.debug("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe('{}#'.format(MQTT_TOPIC))
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
logging.debug(msg.topic+" "+str(msg.payload))
mqtt_client = mqtt.Client()
mqtt_client.username_pw_set(MQTT_USER, password=MQTT_PWD)
mqtt_client.on_publish = on_publish
mqtt_client.on_connect = on_connect
mqtt_client.on_message = on_message
mqtt_client.connect(MQTT_HOST, 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
while True:
values = read_values()
influx_points = []
for k, v in values.__dict__.items():
topic = MQTT_TOPIC + "/{}".format(k)
ret = mqtt_client.publish(topic,v)
label, desc = values.get_label(k)
logging.debug("{} [{}]= {}".format(label, desc, v))
json_body = {
"measurement": SITE_NAME,
"tags": {
"sensor": k,
"label": label,
"desc": desc
},
"fields": {
"value": float(v)
}
}
influx_points.append(json_body)
influx_client.write_points(influx_points)
time.sleep(POLL_TIME)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" QiCore Python Module """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
def register():
""" Register QiCore Python Module """
# Get the Absolute Path if the Package
path_package = os.path.dirname(os.path.realpath(__file__))
# Get the Root Path of QiCore
path_qicore = os.path.join(path_package, "linux")
if "darwin" in sys.platform:
path_qicore = os.path.join(path_package, "mac")
elif "win" in sys.platform:
path_qicore = os.path.join(path_package, "win")
# Update QiAdditionalSdkPrefixes if needed
qisdk_prefixes = os.environ.get("QI_ADDITIONAL_SDK_PREFIXES", "")
if path_qicore not in qisdk_prefixes:
if qisdk_prefixes:
qisdk_prefixes += os.path.pathsep
os.environ["QI_ADDITIONAL_SDK_PREFIXES"] = qisdk_prefixes + path_qicore
|
#Global parameters module
N = 1024
K = 16
Q = 12289
POLY_BYTES = 1792
NEWHOPE_SEEDBYTES = 32
NEWHOPE_RECBYTES = 256
NEWHOPE_SENDABYTES = POLY_BYTES + NEWHOPE_SEEDBYTES
NEWHOPE_SENDBBYTES = POLY_BYTES + NEWHOPE_RECBYTES
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'table_example.ui'
#
# Created: Tue Aug 08 17:25:48 2017
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(634, 857)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
self.itemInput = QtGui.QLineEdit(Dialog)
self.itemInput.setObjectName(_fromUtf8("itemInput"))
self.horizontalLayout_2.addWidget(self.itemInput)
self.confirm = QtGui.QPushButton(Dialog)
self.confirm.setObjectName(_fromUtf8("confirm"))
self.horizontalLayout_2.addWidget(self.confirm)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.tableWidget = QtGui.QTableWidget(Dialog)
self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.verticalLayout.addWidget(self.tableWidget)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.getData = QtGui.QPushButton(Dialog)
self.getData.setObjectName(_fromUtf8("getData"))
self.horizontalLayout.addWidget(self.getData)
self.Plot = QtGui.QPushButton(Dialog)
self.Plot.setObjectName(_fromUtf8("Plot"))
self.horizontalLayout.addWidget(self.Plot)
self.verticalLayout.addLayout(self.horizontalLayout)
self.gridLayout.addLayout(self.verticalLayout, 1, 0, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.label.setText(_translate("Dialog", "Item", None))
self.confirm.setText(_translate("Dialog", "Confirm", None))
self.getData.setText(_translate("Dialog", "Get Data", None))
self.Plot.setText(_translate("Dialog", "Plot", None))
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
import time
class TimingCtx:
def __init__(self, init=None, init_ns=None, first_tic=None):
self.timings = init if init is not None else Counter()
self.ns = init_ns if init_ns is not None else Counter()
self.arg = None
self.last_clear = time.time()
self.first_tic = first_tic
def clear(self):
self.timings.clear()
self.last_clear = time.time()
def start(self, arg):
if self.arg is not None:
self.__exit__()
self.__call__(arg)
self.__enter__()
def stop(self):
self.start(None)
def __call__(self, arg):
self.arg = arg
return self
def __enter__(self):
self.tic = time.time()
if self.first_tic is None:
self.first_tic = self.tic
def __exit__(self, *args):
self.timings[self.arg] += time.time() - self.tic
self.ns[self.arg] += 1
self.arg = None
def __repr__(self):
return dict(
total=time.time() - self.last_clear,
**dict(sorted(self.timings.items(), key=lambda kv: kv[1], reverse=True)),
).__repr__()
def __truediv__(self, other):
return {k: v / other for k, v in self.timings.items()}
def __iadd__(self, other):
if other != 0:
self.timings += other.timings
self.ns += other.ns
return self
def items(self):
return self.timings.items()
@classmethod
def pprint_multi(cls, timings, log_fn):
data = []
for k in timings[0].timings.keys():
t_mean = sum(t.timings[k] for t in timings) / len(timings)
t_max = max(t.timings[k] for t in timings)
n = sum(t.ns[k] for t in timings) / len(timings)
data.append((t_mean, t_max, k, n))
log_fn(f"TimingCtx summary of {len(timings)} timings (mean/max shown over this group):")
ind_log_fn = lambda x: log_fn(" " + x)
ind_log_fn(
"{:^24}|{:^8}|{:^18}|{:^18}".format("Key", "N_mean", "t_mean (ms)", "t_max (ms)")
)
ind_log_fn("-" * (24 + 8 + 18 + 18))
for t_mean, t_max, k, n in sorted(data, reverse=True):
ind_log_fn(
"{:^24}|{:^8.1f}|{:^18.1f}|{:^18.1f}".format(k, n, t_mean * 1e3, t_max * 1e3)
)
ind_log_fn("-" * (24 + 8 + 18 + 18))
sum_t_mean = sum(t_mean for t_mean, _, _, _ in data)
ind_log_fn("{:^24}|{:^8}|{:^18.1f}|{:^18}".format("Total", "", sum_t_mean * 1e3, ""))
def pprint(self, log_fn):
data = []
for k in self.timings.keys():
n = self.ns[k]
t_total = self.timings[k]
t_mean = t_total / n
data.append((t_total, t_mean, k, n))
log_fn(f"TimingCtx summary:")
ind_log_fn = lambda x: log_fn(" " + x)
ind_log_fn("{:^24}|{:^8}|{:^18}|{:^18}".format("Key", "N", "t_total (ms)", "t_mean (ms)"))
ind_log_fn("-" * (24 + 8 + 18 + 18))
for t_total, t_mean, k, n in sorted(data, reverse=True):
ind_log_fn(
"{:^24}|{:^8.1f}|{:^18.1f}|{:^18.1f}".format(k, n, t_total * 1e3, t_mean * 1e3)
)
ind_log_fn("-" * (24 + 8 + 18 + 18))
elapsed = time.time() - self.first_tic
sum_t_total = sum(t_total for t_total, _, _, _ in data)
ind_log_fn(
"{:^24}|{:^8}|{:^18.1f}|{:^18}".format("Lost", "", (elapsed - sum_t_total) * 1e3, "")
)
ind_log_fn("{:^24}|{:^8}|{:^18.1f}|{:^18}".format("Total", "", sum_t_total * 1e3, ""))
class DummyCtx:
def __enter__(self):
pass
def __exit__(self, *args):
pass
def __call__(self, *args):
return self
|
import pytest
from steputils.express import pyparser, ast
primary = pyparser.primary.copy().addParseAction(ast.Primary.action)
bound_spec = pyparser.bound_spec.copy().addParseAction(ast.BoundSpec.action)
def test_bound_spec():
r = bound_spec.parseString('[3:3]')[0]
assert len(r) == 2
bound_1 = r[0]
bound_2 = r[1]
assert bound_1 == 3
assert bound_2 == 3
assert repr(r) == '(BoundSpec, 3, 3)'
def test_primary():
r = primary.parseString('SELF[1]')[0]
assert repr(r) == "(Primary, 'SELF', '[', 1, ']')"
r = primary.parseString('1')[0]
assert type(r) is int
r = primary.parseString('1.0')[0]
assert type(r) is float
r = primary.parseString("'s'")[0]
assert type(r) is ast.StringLiteral
if __name__ == '__main__':
pytest.main([__file__])
|
import sys
sys.path.append('.')
# stdlib
import os
import shutil
from glob import glob
from tqdm.auto import tqdm
import re
import time
import argparse
# numlib
import pandas as pd
from global_config import Config
from utils.file import Logger
from utils.metrics import map_2cls
from utils.common import increment_path
from utils.torch_common import seed_everything, memory_cleanup
from func import make_fold, allocate_files, get_image_sub
from detection.pseudo.func import allocate_pseudo_files
def yolo_infer(ck_path, image_size=512,
batch_size=16,
iou_thresh=0.5,
conf_thresh=0.001,
mode='remote',
save_dir='../result/yolo/submit',
fold_path=None,
duplicate_path=None,
device=0):
t0 = time.time()
memory_cleanup()
yolo_ver = 'yolov5' if 'yolov5' in ck_path else 'yolotrs' if 'yolotrs' in ck_path else None
assert yolo_ver is not None
t = time.strftime('%Y%m%d_%H%M%S')
fold = -1
for s in ck_path.split('/'):
for ss in s.split('.'):
for sss in ss.split('_'):
if len(sss) == 5 and 'fold' in sss:
fold = int(sss.replace('fold',''))
ck_name = ss
break
if fold != -1: break
assert fold > -1, 'checkpoint path is not in correct structure'
ck_name += re.sub('[^\w_-]', '', '_%d_iou%.2f_conf%.4f'%(image_size, iou_thresh, conf_thresh))
os.makedirs(save_dir, exist_ok=True)
#----logging
log = Logger()
log.open('../logging/yolo_inference.txt', mode='a')
log.write(f'infer {ck_name} - fold {fold} - {mode}\n'.upper())
log.write(t+'\n')
log.write(ck_path+'\n')
log.write('mode=%s,fold=%d,batch_size=%d,image_size=%d,iou=%.4f,conf=%.4f\n'\
%(mode,fold,batch_size,image_size,iou_thresh,conf_thresh))
#----
if mode == 'local':
_, df_valid = make_fold('train-%d'%fold, Config.csv_path, fold_path, duplicate_path)
allocate_files(fold, csv_path=Config.csv_path,
yaml_path=Config.yaml_data_path,
save_dir=os.path.abspath('../dataset/chest'),
num_classes=Config.num_classes,
class_names=Config.class_names,
is_train=False,
fold_path=fold_path,
duplicate_path=duplicate_path)
exp_path = f'./detection/yolo/{yolo_ver}/runs/test/exp'
if os.path.exists(exp_path): shutil.rmtree(exp_path)
os.chdir(f'./detection/yolo/{yolo_ver}')
infer_command = f'python \
./test.py \
--batch-size {batch_size} \
--img {image_size} \
--conf {conf_thresh} \
--iou {iou_thresh} \
--weights {os.path.abspath("../../../" + ck_path)} \
--data {os.path.abspath("../../../" + Config.yaml_data_path)} \
--augment \
--save-txt \
--save-conf \
--exist-ok \
--verbose'
elif mode == 'remote':
df_valid = make_fold('test', Config.csv_path)
test_image_dir = allocate_files(None,
csv_path=Config.csv_path,
yaml_path=None,
save_dir=os.path.abspath('../../../dataset/chest'),
num_classes=Config.num_classes,
class_names=Config.class_names,
is_train=False)
exp_path = f'./detection/yolo/{yolo_ver}/runs/detect/exp'
if os.path.exists(exp_path): shutil.rmtree(exp_path)
os.chdir(f'./detection/yolo/{yolo_ver}')
infer_command = f'python ./detect.py \
--weights {os.path.abspath("../../../" + ck_path)} \
--img {image_size} \
--conf {conf_thresh} \
--iou {iou_thresh} \
--source {os.path.abspath("../../../" + test_image_dir)} \
--augment \
--save-txt \
--save-conf \
--exist-ok \
--nosave'
elif mode == 'pseudo':
df_valid = pd.read_csv('../dataset/image-level-psuedo-label-metadata-siim/bimcv_ricord.csv')
test_image_dir = '../dataset/chest'
allocate_pseudo_files(test_image_dir)
exp_path = f'./detection/yolo/{yolo_ver}/runs/detect/exp'
if os.path.exists(exp_path): shutil.rmtree(exp_path)
os.chdir(f'./detection/yolo/{yolo_ver}')
infer_command = f'python ./detect.py \
--weights {os.path.abspath("../../../" + ck_path)} \
--img {image_size} \
--conf {conf_thresh} \
--iou {iou_thresh} \
--source {os.path.abspath("../../../" + test_image_dir)} \
--augment \
--save-txt \
--save-conf \
--exist-ok \
--nosave'
if Config.device.type == 'cuda':
infer_command += ' --device {device}'
os.system(infer_command)
os.chdir('../../..')
df_sub = get_image_sub(os.path.join(exp_path, 'labels'), df_valid)
logging_path = increment_path(f'../logging/yolo/{mode}/{ck_name}', exist_ok=False, sep='_')
shutil.move(exp_path, logging_path)
prediction_path = increment_path(os.path.join(save_dir, ck_name), exist_ok=False, sep='_') + '.csv'
df_sub.to_csv(prediction_path, index=False)
if mode == 'local':
log.write('opacity map = %.5f\nnone map = %.5f\n'%map_2cls(df_valid, df_sub))
log.write('Result saved to %s\n'%os.path.abspath(prediction_path))
t1 = time.time()
log.write('Inference took %ds\n\n'%(t1 - t0))
log.write('============================================================\n\n')
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('-ck-paths', type=str, nargs='+')
parser.add_argument('--image-size', type=int, default=512)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--iou-thr', type=float, default=0.5)
parser.add_argument('--conf-thr', type=float, default=0.001)
parser.add_argument('--mode', type=str, default='remote')
parser.add_argument('--device', default=0, help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
return parser.parse_args()
def main():
seed_everything(Config.seed)
opt = parse_opt()
ck_paths, image_size, batch_size, \
iou, conf, mode, device = \
opt.ck_paths, opt.image_size, opt.batch_size, \
opt.iou_thr, opt.conf_thr, opt.mode, opt.device
if opt.mode == 'pseudo':
save_dir = os.path.abspath('../result/pseudo/prediction')
else:
save_dir = os.path.abspath(f'../result/yolo/submit/{opt.mode}')
for ck_path in ck_paths:
yolo_infer(ck_path,
image_size=image_size,
batch_size=batch_size,
iou_thresh=iou,
conf_thresh=conf,
mode=mode,
save_dir=save_dir,
fold_path=Config.fold_path,
duplicate_path=Config.duplicate_path,
device=device)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.