max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
src/models.py
|
tonouchi510/tensorflow-design
| 0
|
12785051
|
<filename>src/models.py
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from typing import List
from absl import flags
FLAGS = flags.FLAGS
def build_model(input_shape: List, num_classes: int):
"""トレーニングに使用するモデルを作成する.
Args:
input_shape {List} -- 入力データのshape.
num_classes {int} -- クラス数.
"""
inputs = tf.keras.Input(shape=input_shape)
outputs = tf.keras.layers.Dense(num_classes, activation="softmax")(inputs)
model = CustomModelClass(inputs, outputs)
# カスタムモデルクラスをしようしない場合は以下
#model = tf.keras.Model(inputs, outputs)
return model
class CustomModelClass(tf.keras.Model):
"""トレーニングループをカスタマイズするための自前クラス.
tf.keras.Model.fit時に内部で呼び出される`train_step`メソッドをオーバーライドすることで、
トレーニングループのカスタマイズが可能.
Args:
tf ([type]): [description]
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# クラス内で参照が必要なパラメータを設定
self.param_a = "~~~"
self.param_b = "~~~"
def train_step(self, x, y):
"""ここにトレーニングループの実装を書く.
引数の数なども任意に設定できる.
`tf.keras.Model.compile`で指定した損失関数やOptimizerにもアクセスできる.
Args:
x {Tensor} -- モデルへの入力(例えば入力画像)
y {Tensor} -- モデルへの入力(例えば教師ラベル)
Returns:
[type]: [description]
"""
# 何らかの処理
with tf.GradientTape() as tape:
# 何らかの処理
y_pred = self(x, training=True) # selfのみのcallはモデルへの入力
# 何らかの処理
loss = self.compiled_loss(y, y_pred)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return {m.name: m.result() for m in self.metrics}
| 2.796875
| 3
|
undaqTools/examples/example01__daq_to_hdf5_batch_conversion.py
|
rogerlew/undaqTools
| 0
|
12785052
|
<filename>undaqTools/examples/example01__daq_to_hdf5_batch_conversion.py
from __future__ import print_function
# Copyright (c) 2013, <NAME>
# All rights reserved.
# undaq.py in the scripts folder is more fully featured version of this
# example. This code is a bit easier to follow though.
"""
Batch convert daq files to HDF5 in parallel using the multiprocessing module
"""
import os
import glob
import time
import multiprocessing
import undaqTools
# define a function to convert a daq to hdf5
def convert_daq(daq_file):
"""
converts a daqfile to hdf5
Parameters
----------
daq_file : string
relative path to daq_file
Returns
-------
elapsed_time : float
returns the time it took to complete converting daq or -1 if the
conversion fails.
"""
t0 = time.time()
# both multiprocessing and ipython cluster mask Exceptions
# This way we can at least identify what file fails and the batch
# processing continues
#
# Discovered this is needed the hard way. During an experimental trial
# our MiniSim ran out of harddrive space and the resulting Daq failed to
# load.
try:
daq = undaqTools.Daq()
daq.read(daq_file)
daq.write_hd5(daq_file.replace('.daq', '.hdf5'))
del daq
return time.time()-t0
except:
return -1
if __name__ == '__main__':
# data is on a local SSD drive. This is very important for performance.
data_dir = './'
# change the directory of the kernel
print("Changing wd to '%s"%data_dir)
os.chdir(data_dir)
# specifies whether to convert all the daqs or only the ones that haven't
# been created. Unless you muck with the Daq or DynObj classes there it
# should be fine to leave this False
rebuild_all = False
# The clients may be RAM limited. In this example the machine has 8 cores
# but we are only using 6 to convert the daq files (with this particular
# dataset) (Machine has 32GB of memory and daqfiles are ~ 1.5 GB each,
# memory peaks at about 29 GB with no other applications.)
numcpus = 6
# parallel worker pool
pool = multiprocessing.Pool(numcpus)
# find all hdf5 files and all the daq files
# we don't want to convert the daq files unless we have to
#
# data is organized such that every participant has his or her
# own folder. Casting as tuples isn't strictly necessary. But this way
# this ensures they are immutable.
hd5_files = tuple(glob.glob('*/*.hdf5'))
daq_files = tuple(glob.glob('*/*.daq'))
# need to build list of daq_files to convert to pass to convert_daq
if rebuild_all:
daqs2convert = daq_files
else:
daqs2convert = \
[daq for daq in daq_files if daq[:-3]+'hdf5' not in hd5_files]
# ready to roll.
print('\nConverting daqs (this may take awhile)...')
t0 = time.time() # start global time clock
# this launches the batch processing of the daq files
times = pool.imap(convert_daq, daqs2convert)
# this provides feedback as the sets of files complete. Using imap
# guarentees that the times are in the same order as daqs2convert but
# delays receiving feedback
for i, elapsed_time in enumerate(times):
print(' {:<43}{:.1f} s'.format(daqs2convert[i], elapsed_time))
elapsed_time = time.time() - t0 + 1e-6 # so throughput calc doesn't bomb
# when daq2convert is empty
# close multiprocessing pool
pool.close()
pool.join()
| 2.78125
| 3
|
viz.py
|
huanzhang12/tensorflow-alexnet-model
| 7
|
12785053
|
<gh_stars>1-10
import tensorflow as tf
from tensorflow.python.platform import gfile
import sys
if len(sys.argv) < 2:
print("Usage: {} model_file log_dir".format(sys.argv[0]))
sys.exit(0)
model_filename = sys.argv[1]
LOGDIR = sys.argv[2]
with tf.Session() as sess:
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
g_in = tf.import_graph_def(graph_def)
for node in graph_def.node:
print(node.name)
print("writing summary to", LOGDIR)
train_writer = tf.summary.FileWriter(LOGDIR)
train_writer.add_graph(sess.graph)
train_writer.close()
| 2.578125
| 3
|
views/game_over_view.py
|
AnotherCat/dtc-level-2-game
| 0
|
12785054
|
<gh_stars>0
from typing import TYPE_CHECKING
from arcade import View, draw_text, set_viewport, start_render
from arcade.color import WHITE
from static_values import HEIGHT, START_LEVEL, WIDTH
if TYPE_CHECKING:
from main import GameWindow
class GameOverView(View):
"""View to show when game is over"""
def __init__(self) -> None:
"""This is run once when we switch to this view"""
super().__init__()
self.window.set_mouse_visible(True)
# Make the mouse visible
self.window: "GameWindow"
# Store this so all progress is not lost
self.current_level: int
def setup(self, current_level: int = START_LEVEL) -> None:
# Reset the viewport, necessary if we have a scrolling game and we need
# to reset the viewport back to the start so we can see what we draw.
set_viewport(0, WIDTH - 1, 0, HEIGHT - 1)
self.current_level = current_level
def on_draw(self) -> None:
"""Draw this view"""
start_render()
draw_text(
"Game Over", WIDTH / 2, HEIGHT / 2, WHITE, font_size=50, anchor_x="center"
)
draw_text(
"Click to restart",
WIDTH / 2,
HEIGHT - 75,
WHITE,
font_size=20,
anchor_x="center",
)
def on_mouse_press(
self, _x: float, _y: float, _button: int, _modifiers: int
) -> None:
"""If the user presses the mouse button, re-start the game."""
game_view = self.window.game_view
game_view.setup(self.current_level)
self.window.show_view(game_view)
| 3.09375
| 3
|
tests/unit/test_cluster.py
|
kishkaru/python-driver
| 0
|
12785055
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from mock import patch, Mock
from cassandra import ConsistencyLevel, DriverException, Timeout, Unavailable, RequestExecutionException, ReadTimeout, WriteTimeout, CoordinationFailure, ReadFailure, WriteFailure, FunctionFailure, AlreadyExists,\
InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException
from cassandra.cluster import _Scheduler, Session, Cluster
from cassandra.policies import HostDistance
from cassandra.query import SimpleStatement
class ExceptionTypeTest(unittest.TestCase):
def test_exception_types(self):
"""
PYTHON-443
Sanity check to ensure we don't unintentionally change class hierarchy of exception types
"""
self.assertTrue(issubclass(Unavailable, DriverException))
self.assertTrue(issubclass(Unavailable, RequestExecutionException))
self.assertTrue(issubclass(ReadTimeout, DriverException))
self.assertTrue(issubclass(ReadTimeout, RequestExecutionException))
self.assertTrue(issubclass(ReadTimeout, Timeout))
self.assertTrue(issubclass(WriteTimeout, DriverException))
self.assertTrue(issubclass(WriteTimeout, RequestExecutionException))
self.assertTrue(issubclass(WriteTimeout, Timeout))
self.assertTrue(issubclass(CoordinationFailure, DriverException))
self.assertTrue(issubclass(CoordinationFailure, RequestExecutionException))
self.assertTrue(issubclass(ReadFailure, DriverException))
self.assertTrue(issubclass(ReadFailure, RequestExecutionException))
self.assertTrue(issubclass(ReadFailure, CoordinationFailure))
self.assertTrue(issubclass(WriteFailure, DriverException))
self.assertTrue(issubclass(WriteFailure, RequestExecutionException))
self.assertTrue(issubclass(WriteFailure, CoordinationFailure))
self.assertTrue(issubclass(FunctionFailure, DriverException))
self.assertTrue(issubclass(FunctionFailure, RequestExecutionException))
self.assertTrue(issubclass(RequestValidationException, DriverException))
self.assertTrue(issubclass(ConfigurationException, DriverException))
self.assertTrue(issubclass(ConfigurationException, RequestValidationException))
self.assertTrue(issubclass(AlreadyExists, DriverException))
self.assertTrue(issubclass(AlreadyExists, RequestValidationException))
self.assertTrue(issubclass(AlreadyExists, ConfigurationException))
self.assertTrue(issubclass(InvalidRequest, DriverException))
self.assertTrue(issubclass(InvalidRequest, RequestValidationException))
self.assertTrue(issubclass(Unauthorized, DriverException))
self.assertTrue(issubclass(Unauthorized, RequestValidationException))
self.assertTrue(issubclass(AuthenticationFailed, DriverException))
self.assertTrue(issubclass(OperationTimedOut, DriverException))
self.assertTrue(issubclass(UnsupportedOperation, DriverException))
class ClusterTest(unittest.TestCase):
def test_invalid_contact_point_types(self):
with self.assertRaises(ValueError):
Cluster(contact_points=[None], protocol_version=4, connect_timeout=1)
with self.assertRaises(TypeError):
Cluster(contact_points="not a sequence", protocol_version=4, connect_timeout=1)
def test_requests_in_flight_threshold(self):
d = HostDistance.LOCAL
mn = 3
mx = 5
c = Cluster(protocol_version=2)
c.set_min_requests_per_connection(d, mn)
c.set_max_requests_per_connection(d, mx)
# min underflow, max, overflow
for n in (-1, mx, 127):
self.assertRaises(ValueError, c.set_min_requests_per_connection, d, n)
# max underflow, under min, overflow
for n in (0, mn, 128):
self.assertRaises(ValueError, c.set_max_requests_per_connection, d, n)
class SchedulerTest(unittest.TestCase):
# TODO: this suite could be expanded; for now just adding a test covering a ticket
@patch('time.time', return_value=3) # always queue at same time
@patch('cassandra.cluster._Scheduler.run') # don't actually run the thread
def test_event_delay_timing(self, *_):
"""
Schedule something with a time collision to make sure the heap comparison works
PYTHON-473
"""
sched = _Scheduler(None)
sched.schedule(0, lambda: None)
sched.schedule(0, lambda: None) # pre-473: "TypeError: unorderable types: function() < function()"t
class SessionTest(unittest.TestCase):
# TODO: this suite could be expanded; for now just adding a test covering a PR
@patch('cassandra.cluster.ResponseFuture._make_query_plan')
def test_default_serial_consistency_level(self, *_):
"""
Make sure default_serial_consistency_level passes through to a query message.
Also make sure Statement.serial_consistency_level overrides the default.
PR #510
"""
s = Session(Mock(protocol_version=4), [])
# default is None
self.assertIsNone(s.default_serial_consistency_level)
sentinel = 1001
for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL, sentinel):
s.default_serial_consistency_level = cl
# default is passed through
f = s._create_response_future(query='', parameters=[], trace=False, custom_payload={}, timeout=100)
self.assertEqual(f.message.serial_consistency_level, cl)
# any non-None statement setting takes precedence
for cl_override in (ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL):
f = s._create_response_future(SimpleStatement(query_string='', serial_consistency_level=cl_override), parameters=[], trace=False, custom_payload={}, timeout=100)
self.assertEqual(s.default_serial_consistency_level, cl)
self.assertEqual(f.message.serial_consistency_level, cl_override)
| 1.921875
| 2
|
envreader/field_getter.py
|
vd2org/envreader
| 11
|
12785056
|
# Copyright (C) 2020-2021 by Vd.
# This file is part of EnvReader, the modern environment variables processor.
# EnvReader is released under the MIT License (see LICENSE).
from typing import Callable, Optional
from .field import Field
class FieldGetter(Field):
def __init__(self, default, alias: str, transform: Callable, description: Optional[str],
example: Optional[str], cached: bool):
super().__init__(default, alias=alias, transform=transform, description=description, example=example)
self.__cached = cached
self.__value = None
def get_value(self):
if self.__value:
return self.__value
val = super().get_value()
if self.__cached:
self.__value = val
return val
def __get__(self, obj, cls=None):
return self.get_value()
def __set__(self, obj, value):
raise AttributeError("You can't change an env variable from here! Use os.environ for this.")
| 2.453125
| 2
|
Advance/5.Regex.py
|
AMZEnterprise/Python_Course_Jadi
| 0
|
12785057
|
<reponame>AMZEnterprise/Python_Course_Jadi
import re
str = 'Hello ali, how are you ali? I am fine. And you?'
result = re.search(r'ali', str)
print(result)
result = re.sub(r'ali', 'Mahdi', str)
print(result)
| 3.671875
| 4
|
flash/video/classification/input_transform.py
|
Actis92/lightning-flash
| 1,457
|
12785058
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Callable
import torch
from flash.core.data.io.input_transform import InputTransform
from flash.core.data.transforms import ApplyToKeys
from flash.core.utilities.imports import _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, requires
if _KORNIA_AVAILABLE:
import kornia.augmentation as K
if _PYTORCHVIDEO_AVAILABLE:
from pytorchvideo.transforms import UniformTemporalSubsample
from torchvision.transforms import CenterCrop, Compose, RandomCrop
else:
ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None
@requires("video")
@dataclass
class VideoClassificationInputTransform(InputTransform):
image_size: int = 244
temporal_sub_sample: int = 8
mean: torch.Tensor = torch.tensor([0.45, 0.45, 0.45])
std: torch.Tensor = torch.tensor([0.225, 0.225, 0.225])
data_format: str = "BCTHW"
same_on_frame: bool = False
def per_sample_transform(self) -> Callable:
if self.training:
per_sample_transform = [RandomCrop(self.image_size, pad_if_needed=True)]
else:
per_sample_transform = [CenterCrop(self.image_size)]
return ApplyToKeys(
"video", Compose([UniformTemporalSubsample(self.temporal_sub_sample)] + per_sample_transform)
)
def per_batch_transform_on_device(self) -> Callable:
return ApplyToKeys(
"video",
K.VideoSequential(
K.Normalize(self.mean, self.std),
data_format=self.data_format,
same_on_frame=self.same_on_frame,
),
)
| 2.0625
| 2
|
compileman/Compile_Result.py
|
danilocgsilva/CompileMan
| 0
|
12785059
|
class Compile_Result:
def __init__(self):
self.success = None
self.message = None
def setSuccess(self):
self.success = True
return self
def setError(self, message: str):
self.success = False
self.message = message
return self
def getResult(self) -> bool:
return self.success
def getErrorMessage(self) -> str:
return self.message
| 2.75
| 3
|
dict.py
|
jlogans/vampy2017cs
| 1
|
12785060
|
d1 = {"key1":1, "key2":2, "key3":3, "key4":4, "key5":5}
sorted_keys = sorted(d1)
print(sorted_keys)
for key in sorted_keys:
print(d1[key])
| 3.96875
| 4
|
chapter3/run23.py
|
donngchao/python_code_snippets
| 0
|
12785061
|
# encoding: utf-8
'''
@author: developer
@software: python
@file: run23.py
@time: 2021/8/7 7:10
@desc:
'''
'''
描述
2008年北京奥运会,A国的运动员参与了n天的决赛项目(1≤n≤17)。现在要统计一下A国所获得的金、银、铜牌数目及总奖牌数。
输入
输入n+1行,第1行是A国参与决赛项目的天数n,其后n行,每一行是该国某一天获得的金、银、铜牌数目,以一个空格分开。
输出
输出1行,包括4个整数,为A国所获得的金、银、铜牌总数及总奖牌数,以一个空格分开。
样例输入
3
1 0 3
3 1 0
0 3 0
样例输出
4 4 3 11
'''
day_of_olympic = int(input())
medals = []
for day in range(day_of_olympic):
medals.append(input().split())
gold_medal = []
silver_medal = []
bronze_medal = []
for day in range(day_of_olympic):
bronze, silver, gold = int(medals[day][0]), int(medals[day][1]), int(medals[day][2])
gold_medal.append(bronze)
silver_medal.append(silver)
bronze_medal.append(gold)
total_gold = sum(gold_medal)
total_silver = sum(silver_medal)
total_bronze = sum(bronze_medal)
total_medal = total_gold + total_silver + total_bronze
print("%d %d %d %d" %(total_gold, total_silver, total_bronze, total_medal))
| 3.59375
| 4
|
ozet/settings/dev.py
|
ozet-team/ozet-server
| 0
|
12785062
|
<filename>ozet/settings/dev.py
# fmt: off
# flake8: noqa
from .base import *
INSTAGRAM_OAUTH_REDIRECT_URL = "https://staging-api.ozet.app/api/v1/member/user/me/instagram/oauth/authorize/"
CRONJOBS = []
| 1
| 1
|
scripts/rule_system/POTATO/frontend/app.py
|
GKingA/offensive_text
| 0
|
12785063
|
import argparse
import configparser
import copy
import datetime
import json
import os
import random
import re
import sys
import time
from collections import Counter, defaultdict
from contextlib import contextmanager
from io import StringIO
from threading import current_thread
import networkx as nx
import pandas as pd
import penman as pn
import requests
import streamlit as st
import streamlit.components.v1 as components
from potato.dataset.utils import default_pn_to_graph
from potato.graph_extractor.extract import FeatureEvaluator
from potato.models.trainer import GraphTrainer
from PIL import Image
from st_aggrid import AgGrid, DataReturnMode, GridOptionsBuilder, GridUpdateMode, JsCode
from streamlit.report_thread import REPORT_CONTEXT_ATTR_NAME
from tuw_nlp.grammar.text_to_4lang import TextTo4lang
from tuw_nlp.graph.fourlang import FourLang
from tuw_nlp.graph.utils import GraphFormulaMatcher, pn_to_graph, read_alto_output
if "false_graph_number" not in st.session_state:
st.session_state.false_graph_number = 0
if "true_graph_number" not in st.session_state:
st.session_state.true_graph_number = 0
if "false_neg_number" not in st.session_state:
st.session_state.false_neg_number = 0
if "predicted_num" not in st.session_state:
st.session_state.predicted_num = 0
if "whole_accuracy" not in st.session_state:
st.session_state.whole_accuracy = []
if "df_statistics" not in st.session_state:
st.session_state.df_statistics = pd.DataFrame
if "val_dataframe" not in st.session_state:
st.session_state.val_dataframe = pd.DataFrame
if "whole_accuracy_val" not in st.session_state:
st.session_state.whole_accuracy_val = []
if "feature_df" not in st.session_state:
st.session_state.feature_df = pd.DataFrame
if "clustered_words_path" not in st.session_state:
st.session_state.clustered_words_path = None
if "features" not in st.session_state:
st.session_state.features = {}
if "suggested_features" not in st.session_state:
st.session_state.suggested_features = {}
if "trained" not in st.session_state:
st.session_state.trained = False
if "ml_feature" not in st.session_state:
st.session_state.ml_feature = None
if "sens" not in st.session_state:
st.session_state.sens = []
if "min_edge" not in st.session_state:
st.session_state.min_edge = 0
if "rows_to_delete" not in st.session_state:
st.session_state.rows_to_delete = []
if "rls_after_delete" not in st.session_state:
st.session_state.rls_after_delete = []
def rerun():
raise st.experimental_rerun()
@contextmanager
def st_redirect(src, dst):
placeholder = st.empty()
output_func = getattr(placeholder, dst)
with StringIO() as buffer:
old_write = src.write
def new_write(b):
if getattr(current_thread(), REPORT_CONTEXT_ATTR_NAME, None):
buffer.write(b)
output_func(buffer.getvalue())
else:
old_write(b)
try:
src.write = new_write
yield
finally:
src.write = old_write
@contextmanager
def st_stdout(dst):
with st_redirect(sys.stdout, dst):
yield
def to_dot(graph, marked_nodes=set(), integ=False):
lines = ["digraph finite_state_machine {", "\tdpi=70;"]
# lines.append('\tordering=out;')
# sorting everything to make the process deterministic
node_lines = []
node_to_name = {}
for node, n_data in graph.nodes(data=True):
if integ:
d_node = d_clean(str(node))
else:
d_node = d_clean(n_data["name"]) if n_data["name"] else "None"
printname = d_node
node_to_name[node] = printname
if "expanded" in n_data and n_data["expanded"] and printname in marked_nodes:
node_line = '\t{0} [shape = circle, label = "{1}", \
style=filled, fillcolor=purple];'.format(
d_node, printname
).replace(
"-", "_"
)
elif "expanded" in n_data and n_data["expanded"]:
node_line = '\t{0} [shape = circle, label = "{1}", \
style="filled"];'.format(
d_node, printname
).replace(
"-", "_"
)
elif "fourlang" in n_data and n_data["fourlang"]:
node_line = '\t{0} [shape = circle, label = "{1}", \
style="filled", fillcolor=red];'.format(
d_node, printname
).replace(
"-", "_"
)
elif "substituted" in n_data and n_data["substituted"]:
node_line = '\t{0} [shape = circle, label = "{1}", \
style="filled"];'.format(
d_node, printname
).replace(
"-", "_"
)
elif printname in marked_nodes:
node_line = '\t{0} [shape = circle, label = "{1}", style=filled, fillcolor=lightblue];'.format(
d_node, printname
).replace(
"-", "_"
)
else:
node_line = '\t{0} [shape = circle, label = "{1}"];'.format(
d_node, printname
).replace("-", "_")
node_lines.append(node_line)
lines += sorted(node_lines)
edge_lines = []
for u, v, edata in graph.edges(data=True):
if "color" in edata:
d_node1 = node_to_name[u].replace("-", "_")
d_node2 = node_to_name[v].replace("-", "_")
edge_lines.append(
'\t{0} -> {1} [ label = "{2}" ];'.format(
d_node1, d_node2, edata["color"]
)
)
lines += sorted(edge_lines)
lines.append("}")
return "\n".join(lines)
def save_ruleset(path, features):
with open(path, "w+") as f:
json.dump(features, f)
def d_clean(string):
s = string
for c in "\\=@-,'\".!:;<>/{}[]()#^?":
s = s.replace(c, "_")
s = s.replace("$", "_dollars")
s = s.replace("%", "_percent")
s = s.replace("|", " ")
s = s.replace("*", " ")
if s == "#":
s = "_number"
keywords = ("graph", "node", "strict", "edge")
if re.match("^[0-9]", s) or s in keywords:
s = "X" + s
return s
def get_df_from_rules(rules, negated_rules):
data = {"rules": rules, "negated_rules": negated_rules}
# Create DataFrame.
df = pd.DataFrame(data)
return df
def save_after_modify(hand_made_rules, classes):
st.session_state.features[classes] = copy.deepcopy(
st.session_state.rls_after_delete
)
st.session_state.feature_df = get_df_from_rules(
[";".join(feat[0]) for feat in st.session_state.features[classes]],
[";".join(feat[1]) for feat in st.session_state.features[classes]],
)
save_rules = hand_made_rules or "saved_features.json"
save_ruleset(save_rules, st.session_state.features)
st.session_state.rows_to_delete = []
rerun()
@st.cache(allow_output_mutation=True)
def load_text_to_4lang():
tfl = TextTo4lang("en", "en_nlp_cache")
return tfl
@st.cache()
def init_evaluator():
return FeatureEvaluator()
@st.cache(allow_output_mutation=True)
def read_train(path):
return pd.read_pickle(path)
def save_dataframe(data, path):
data.to_pickle(path)
@st.cache(allow_output_mutation=True)
def read_val(path):
return pd.read_pickle(path)
def train_df(df, min_edge=0):
with st_stdout("code"):
trainer = GraphTrainer(df)
features = trainer.prepare_and_train(min_edge=min_edge)
return features
def rule_chooser():
option = st.selectbox("Choose from the rules", st.session_state.sens)
G, _ = default_pn_to_graph(option.split(";")[0])
text_G, _ = default_pn_to_graph(option.split(";")[0])
st.graphviz_chart(to_dot(text_G), use_container_width=True)
nodes = [d_clean(n[1]["name"].split("_")[0]) for n in text_G.nodes(data=True)]
return nodes, option
def annotate_df(predicted):
for i, pred in enumerate(predicted):
if pred:
st.session_state.df.at[i, "label"] = st.session_state.inverse_labels[1]
st.session_state.df.at[i, "applied_rules"] = pred
else:
st.session_state.df.at[i, "applied_rules"] = []
if st.session_state.df.loc[i, "annotated"] == False:
st.session_state.df.at[i, "label"] = ""
def show_ml_feature(classes, hand_made_rules):
st.markdown(
f"<span>Feature: {st.session_state.ml_feature[0]}, Precision: <b>{st.session_state.ml_feature[1]:.3f}</b>, \
Recall: <b>{st.session_state.ml_feature[2]:.3f}</b>, Fscore: <b>{st.session_state.ml_feature[3]:.3f}</b>, \
Support: <b>{st.session_state.ml_feature[4]}</b></span>",
unsafe_allow_html=True,
)
accept_rule = st.button("Accept")
decline_rule = st.button("Decline")
if accept_rule:
st.session_state.features[classes].append(st.session_state.ml_feature[0])
st.session_state.ml_feature = None
if st.session_state.features[classes]:
st.session_state.feature_df = get_df_from_rules(
[";".join(feat[0]) for feat in st.session_state.features[classes]],
[";".join(feat[1]) for feat in st.session_state.features[classes]],
)
save_rules = hand_made_rules or "saved_features.json"
save_ruleset(save_rules, st.session_state.features)
rerun()
elif decline_rule:
st.session_state.ml_feature = None
rerun()
def extract_data_from_dataframe(option):
fp_graphs = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].False_positive_graphs
fp_sentences = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].False_positive_sens
tp_graphs = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].True_positive_graphs
tp_sentences = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].True_positive_sens
fn_graphs = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].False_negative_graphs
fn_sentences = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].False_negative_sens
prec = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].Precision
recall = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].Recall
fscore = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].Fscore
support = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].Support
predicted = st.session_state.df_statistics.iloc[
st.session_state.sens.index(option)
].Predicted
return (
fn_graphs,
fn_sentences,
fp_graphs,
fp_sentences,
fscore,
prec,
predicted,
recall,
support,
tp_graphs,
tp_sentences,
)
def graph_viewer(type, graphs, sentences, nodes):
graph_type = {
"FP": st.session_state.false_graph_number,
"TP": st.session_state.true_graph_number,
"FN": st.session_state.false_neg_number,
}
if st.button(f"Previous {type}"):
graph_type[type] = max(0, graph_type[type] - 1)
if st.button(f"Next {type}"):
graph_type[type] = min(
graph_type[type] + 1,
len(graphs) - 1,
)
if graph_type[type] > len(graphs) - 1:
graph_type[type] = 0
st.markdown(
f"<span><b>Sentence:</b> {sentences[graph_type[type]][0]}</span>",
unsafe_allow_html=True,
)
st.markdown(
f"<span><b>Gold label:</b> {sentences[graph_type[type]][1]}</span>",
unsafe_allow_html=True,
)
st.text(f"{type}: {len(graphs)}")
current_graph = graphs[graph_type[type]]
st.graphviz_chart(
to_dot(
current_graph,
marked_nodes=set(nodes),
),
use_container_width=True,
)
if type == "FP":
st.session_state.false_graph_number = graph_type[type]
elif type == "TP":
st.session_state.true_graph_number = graph_type[type]
elif type == "FN":
st.session_state.false_neg_number = graph_type[type]
def add_rule_manually(classes, hand_made_rules):
text = st.text_area("You can add a new rule here manually")
negated_text = st.text_area("You can modify the negated features here")
agree = st.button("Add rule to the ruleset")
if agree:
if not negated_text.strip():
negated_features = []
else:
negated_features = negated_text.split(";")
st.session_state.features[classes].append([[text], negated_features, classes])
if st.session_state.features[classes]:
st.session_state.feature_df = get_df_from_rules(
[";".join(feat[0]) for feat in st.session_state.features[classes]],
[";".join(feat[1]) for feat in st.session_state.features[classes]],
)
save_rules = hand_made_rules or "saved_features.json"
save_ruleset(save_rules, st.session_state.features)
rerun()
st.markdown(
f"<span><b>Or get suggestions by our ML!</b></span>",
unsafe_allow_html=True,
)
def rank_and_suggest(classes, data, evaluator):
suggest_new_rule = st.button("suggest new rules")
if suggest_new_rule:
if (
not st.session_state.df_statistics.empty
and st.session_state.sens
and st.session_state.suggested_features[classes]
):
features_to_rank = st.session_state.suggested_features[classes][:5]
with st.spinner("Ranking rules..."):
features_ranked = evaluator.rank_features(
classes,
features_to_rank,
data,
st.session_state.df_statistics.iloc[0].False_negative_indices,
)
suggested_feature = features_ranked[0]
st.session_state.suggested_features[classes].remove(suggested_feature[0])
st.session_state.ml_feature = suggested_feature
def supervised_mode(
evaluator, data, val_data, graph_format, feature_path, hand_made_rules
):
if hand_made_rules:
with open(hand_made_rules) as f:
st.session_state.features = json.load(f)
if not feature_path and not st.session_state.trained:
st.sidebar.title("Train your dataset!")
show_app = st.sidebar.button("Train")
st.session_state.min_edge = st.sidebar.number_input(
"Min edge in features", min_value=0, max_value=3, value=0, step=1
)
if show_app:
st.session_state.suggested_features = train_df(
data, st.session_state.min_edge
)
st.session_state.trained = True
with st_stdout("success"):
print("Success, your dataset is trained, wait for the app to load..")
time.sleep(3)
rerun()
st.markdown(
"<h3 style='text-align: center; color: black;'>Your dataset is shown below, click the train button to train your dataset!</h3>",
unsafe_allow_html=True,
)
sample_df = AgGrid(data, width="100%", fit_columns_on_grid_load=True)
st.write("label distribution:")
st.bar_chart(data.groupby("label").size())
st.write("sentence lenghts:")
st.bar_chart(data.text.str.len())
st.write("common words:")
st.bar_chart(
pd.Series(" ".join(data["text"]).lower().split()).value_counts()[:100]
)
if st.session_state.trained or feature_path:
col1, col2 = st.columns(2)
if (
feature_path
and os.path.exists(feature_path)
and not st.session_state.suggested_features
):
with open(feature_path) as f:
st.session_state.suggested_features = json.load(f)
if not st.session_state.features:
for key in st.session_state.suggested_features:
pop_len = (
5
if len(st.session_state.suggested_features[key]) > 5
else len(st.session_state.suggested_features[key])
)
st.session_state.features[key] = [
st.session_state.suggested_features[key].pop(0)
for _ in range(pop_len)
]
col1.header("Rule to apply")
col2.header("Graphs and results")
# if graph_format == "fourlang":
# tfl = load_text_to_4lang()
with col1:
classes = st.selectbox(
"Choose class", list(st.session_state.features.keys())
)
st.session_state.feature_df = get_df_from_rules(
[";".join(feat[0]) for feat in st.session_state.features[classes]],
[";".join(feat[1]) for feat in st.session_state.features[classes]],
)
with st.form("example form") as f:
gb = GridOptionsBuilder.from_dataframe(st.session_state.feature_df)
# make all columns editable
gb.configure_columns(["rules", "negated_rules"], editable=True)
gb.configure_selection(
"multiple",
use_checkbox=True,
groupSelectsChildren=True,
groupSelectsFiltered=True,
# ◙pre_selected_rows=[1,2]
)
go = gb.build()
ag = AgGrid(
st.session_state.feature_df,
gridOptions=go,
key="grid1",
allow_unsafe_jscode=True,
reload_data=True,
update_mode=GridUpdateMode.MODEL_CHANGED
| GridUpdateMode.VALUE_CHANGED,
width="100%",
theme="material",
fit_columns_on_grid_load=True,
)
delete_or_train = st.radio(
"Delete or Train selected rules", ("none", "delete", "train")
)
submit = st.form_submit_button(label="save updates")
evaluate = st.form_submit_button(label="evaluate selected")
if evaluate:
feature_list = []
selected_rules = (
ag["selected_rows"]
if ag["selected_rows"]
else ag["data"].to_dict(orient="records")
)
for rule in selected_rules:
positive_rules = (
rule["rules"].split(";")
if "rules" in rule and rule["rules"].strip()
else []
)
negated_rules = (
rule["negated_rules"].split(";")
if "negated_rules" in rule and rule["negated_rules"].strip()
else []
)
feature_list.append(
[
positive_rules,
negated_rules,
classes,
]
)
st.session_state.sens = [";".join(feat[0]) for feat in feature_list]
with st.spinner("Evaluating rules..."):
(
st.session_state.df_statistics,
st.session_state.whole_accuracy,
) = evaluator.evaluate_feature(
classes, feature_list, data, graph_format
)
(
st.session_state.val_dataframe,
st.session_state.whole_accuracy_val,
) = evaluator.evaluate_feature(
classes,
feature_list,
val_data,
graph_format,
)
st.success("Done!")
rerun()
if submit:
delete = delete_or_train == "delete"
train = delete_or_train == "train"
st.session_state.rows_to_delete = [
r["rules"] for r in ag["selected_rows"]
]
st.session_state.rls_after_delete = []
negated_list = ag["data"]["negated_rules"].tolist()
feature_list = []
for i, rule in enumerate(ag["data"]["rules"].tolist()):
if not negated_list[i].strip():
feature_list.append([rule.split(";"), [], classes])
else:
feature_list.append(
[
rule.split(";"),
negated_list[i].strip().split(";"),
classes,
]
)
if st.session_state.rows_to_delete and delete:
for r in feature_list:
if ";".join(r[0]) not in st.session_state.rows_to_delete:
st.session_state.rls_after_delete.append(r)
elif st.session_state.rows_to_delete and train:
st.session_state.rls_after_delete = copy.deepcopy(feature_list)
rule_to_train = st.session_state.rows_to_delete[0]
if ";" in rule_to_train or ".*" not in rule_to_train:
st.text("Only single and underspecified rules can be trained!")
else:
selected_words = evaluator.train_feature(
classes, rule_to_train, data, graph_format
)
for f in selected_words:
st.session_state.rls_after_delete.append([[f], [], classes])
else:
st.session_state.rls_after_delete = copy.deepcopy(feature_list)
if st.session_state.rls_after_delete and not delete:
save_after_modify(hand_made_rules, classes)
if st.session_state.rows_to_delete and delete_or_train == "delete":
with st.form("Delete form"):
st.write("The following rules will be deleted, do you accept it?")
st.write(st.session_state.rows_to_delete)
save_button = st.form_submit_button("Accept Delete")
if save_button:
save_after_modify(hand_made_rules, classes)
add_rule_manually(classes, hand_made_rules)
rank_and_suggest(classes, data, evaluator)
if st.session_state.ml_feature:
show_ml_feature(classes, hand_made_rules)
with col2:
if not st.session_state.df_statistics.empty and st.session_state.sens:
if st.session_state.sens:
nodes, option = rule_chooser()
st.markdown(
f"<span>Result of using all the rules: Precision: <b>{st.session_state.whole_accuracy[0]:.3f}</b>, \
Recall: <b>{st.session_state.whole_accuracy[1]:.3f}</b>, Fscore: <b>{st.session_state.whole_accuracy[2]:.3f}</b>, \
Support: <b>{st.session_state.whole_accuracy[3]}</b></span>",
unsafe_allow_html=True,
)
(
fn_graphs,
fn_sentences,
fp_graphs,
fp_sentences,
fscore,
prec,
predicted,
recall,
support,
tp_graphs,
tp_sentences,
) = extract_data_from_dataframe(option)
st.markdown(
f"<span>The rule's result: Precision: <b>{prec:.3f}</b>, Recall: <b>{recall:.3f}</b>, \
Fscore: <b>{fscore:.3f}</b>, Support: <b>{support}</b></span>",
unsafe_allow_html=True,
)
with st.expander("Show validation data", expanded=False):
val_prec = st.session_state.val_dataframe.iloc[
st.session_state.sens.index(option)
].Precision
val_recall = st.session_state.val_dataframe.iloc[
st.session_state.sens.index(option)
].Recall
val_fscore = st.session_state.val_dataframe.iloc[
st.session_state.sens.index(option)
].Fscore
val_support = st.session_state.val_dataframe.iloc[
st.session_state.sens.index(option)
].Support
st.markdown(
f"<span>Result of using all the rules on the validation data: Precision: <b>{st.session_state.whole_accuracy_val[0]:.3f}</b>, \
Recall: <b>{st.session_state.whole_accuracy_val[1]:.3f}</b>, Fscore: <b>{st.session_state.whole_accuracy_val[2]:.3f}</b>, \
Support: <b>{st.session_state.whole_accuracy_val[3]}</b></span>",
unsafe_allow_html=True,
)
st.markdown(
f"<span>The rule's result on the validation data: Precision: <b>{val_prec:.3f}</b>, \
Recall: <b>{val_recall:.3f}</b>, Fscore: <b>{val_fscore:.3f}</b>, \
Support: <b>{val_support}</b></span>",
unsafe_allow_html=True,
)
tp_fp_fn_choice = (
"True Positive graphs",
"False Positive graphs",
"False Negative graphs",
)
tp_fp_fn = st.selectbox(
"Select the graphs you want to view", tp_fp_fn_choice
)
if tp_fp_fn == "False Positive graphs":
if fp_graphs:
graph_viewer("FP", fp_graphs, fp_sentences, nodes)
elif tp_fp_fn == "True Positive graphs":
if tp_graphs:
graph_viewer("TP", tp_graphs, tp_sentences, nodes)
elif tp_fp_fn == "False Negative graphs":
if fn_graphs:
graph_viewer("FN", fn_graphs, fn_sentences, nodes)
def unsupervised_mode(
evaluator, train_data, graph_format, feature_path, hand_made_rules
):
data = read_train(train_data)
if hand_made_rules:
with open(hand_made_rules) as f:
st.session_state.features = json.load(f)
if "df" not in st.session_state:
st.session_state.df = data.copy()
if "annotated" not in st.session_state.df:
st.session_state.df["annotated"] = False
if "applied_rules" not in st.session_state.df:
st.session_state.df["applied_rules"] = [
[] for _ in range(len(st.session_state.df))
]
if "index" not in st.session_state.df:
st.session_state.df.reset_index(level=0, inplace=True)
if "df_to_train" not in st.session_state:
st.session_state.df_to_train = pd.DataFrame
if "applied_rules" not in st.session_state:
st.session_state.applied_rules = []
df_annotated = st.session_state.df[st.session_state.df.annotated == True][
["index", "text", "label", "applied_rules"]
]
df_unannotated = st.session_state.df[st.session_state.df.annotated == False][
["index", "text", "label", "applied_rules"]
]
if "labels" not in st.session_state:
st.text("Before we start, please provide labels you want to train")
user_input = st.text_input("label encoding", "NOT:0,OFF:1")
st.session_state.labels = {
label.split(":")[0]: int(label.split(":")[1])
for label in user_input.split(",")
}
st.write(st.session_state.labels)
st.session_state.inverse_labels = {
v: k for (k, v) in st.session_state.labels.items()
}
else:
st.markdown(
f"<span><b>Annotate samples here:</b></span>",
unsafe_allow_html=True,
)
if st.session_state.applied_rules:
st.markdown(
f"<span>Currently the following rules are applied:</span>",
unsafe_allow_html=True,
)
st.write(st.session_state.applied_rules)
with st.form("annotate form") as f:
gb = GridOptionsBuilder.from_dataframe(df_unannotated)
gb.configure_default_column(
editable=True,
resizable=True,
sorteable=True,
wrapText=True,
autoHeight=True,
)
# make all columns editable
gb.configure_selection(
"multiple",
use_checkbox=True,
groupSelectsChildren=True,
groupSelectsFiltered=True,
)
go = gb.build()
ag = AgGrid(
df_unannotated,
gridOptions=go,
key="grid2",
allow_unsafe_jscode=True,
reload_data=True,
update_mode=GridUpdateMode.MODEL_CHANGED | GridUpdateMode.VALUE_CHANGED,
width="100%",
theme="material",
fit_columns_on_grid_load=True,
)
annotate = st.form_submit_button("Annotate")
if annotate:
if ag["selected_rows"]:
for row in ag["selected_rows"]:
st.session_state.df.loc[
row["index"], "label"
] = st.session_state.inverse_labels[1]
st.session_state.df.loc[row["index"], "annotated"] = True
save_dataframe(st.session_state.df, train_data)
rerun()
st.markdown(
f"<span>Samples you have already annotated:</span>",
unsafe_allow_html=True,
)
with st.form("annotated form") as f:
gb = GridOptionsBuilder.from_dataframe(df_annotated)
gb.configure_default_column(
editable=True,
resizable=True,
sorteable=True,
wrapText=True,
)
# make all columns editable
gb.configure_selection(
"multiple",
use_checkbox=True,
groupSelectsChildren=True,
groupSelectsFiltered=True,
)
go = gb.build()
ag_ann = AgGrid(
df_annotated,
gridOptions=go,
key="grid3",
allow_unsafe_jscode=True,
reload_data=True,
update_mode=GridUpdateMode.MODEL_CHANGED | GridUpdateMode.VALUE_CHANGED,
width="100%",
theme="material",
fit_columns_on_grid_load=True,
)
clear_annotate = st.form_submit_button("Clear annotation")
if clear_annotate:
if ag_ann["selected_rows"]:
for row in ag_ann["selected_rows"]:
st.session_state.df.loc[
row["index"], "label"
] = st.session_state.inverse_labels[1]
st.session_state.df.loc[row["index"], "annotated"] = False
st.session_state.df.loc[row["index"], "label"] = ""
save_dataframe(st.session_state.df, train_data)
rerun()
train = st.button("Train!")
if train:
df_to_train = st.session_state.df.copy()
df_to_train = df_to_train[df_to_train.applied_rules.map(len) == 0]
if not df_to_train.empty:
st.session_state.trained = True
df_to_train["label"] = df_to_train["label"].apply(
lambda x: st.session_state.inverse_labels[0] if not x else x
)
df_to_train["label_id"] = df_to_train["label"].apply(
lambda x: st.session_state.labels[x]
)
positive_size = df_to_train.groupby("label").size()[
st.session_state.inverse_labels[1]
]
df_to_train = df_to_train.groupby("label").sample(
n=positive_size, random_state=1, replace=True
)
st.session_state.suggested_features = train_df(
df_to_train, st.session_state.min_edge
)
st.session_state.df_to_train = df_to_train
st.session_state.df_statistics = pd.DataFrame
for key in st.session_state.suggested_features:
if key not in st.session_state.features:
st.session_state.features[key] = [
st.session_state.suggested_features[key].pop(0)
]
else:
st.session_state.features[key].append(
st.session_state.suggested_features[key].pop(0)
)
else:
st.write("Empty dataframe!")
col1, col2 = st.columns(2)
if st.session_state.trained and st.session_state.suggested_features:
with col1:
if not st.session_state.features:
for key in st.session_state.suggested_features:
st.session_state.features[key] = [
st.session_state.suggested_features[key].pop(0)
]
classes = st.selectbox(
"Choose class", list(st.session_state.features.keys())
)
st.session_state.feature_df = get_df_from_rules(
[";".join(feat[0]) for feat in st.session_state.features[classes]],
[";".join(feat[1]) for feat in st.session_state.features[classes]],
)
with st.form("example form") as f:
gb = GridOptionsBuilder.from_dataframe(st.session_state.feature_df)
# make all columns editable
gb.configure_columns(["rules", "negated_rules"], editable=True)
gb.configure_selection(
"multiple",
use_checkbox=True,
groupSelectsChildren=True,
groupSelectsFiltered=True,
# ◙pre_selected_rows=[1,2]
)
go = gb.build()
ag = AgGrid(
st.session_state.feature_df,
gridOptions=go,
key="grid1",
allow_unsafe_jscode=True,
reload_data=True,
update_mode=GridUpdateMode.MODEL_CHANGED
| GridUpdateMode.VALUE_CHANGED,
width="100%",
theme="material",
fit_columns_on_grid_load=True,
)
delete_or_train = st.radio(
"Delete or Train selected rules", ("none", "delete", "train")
)
submit = st.form_submit_button(label="save updates")
evaluate = st.form_submit_button(label="evaluate selected")
annotate = st.form_submit_button(label="annotate based on selected")
feature_list = []
selected_rules = (
ag["selected_rows"]
if ag["selected_rows"]
else ag["data"].to_dict(orient="records")
)
for rule in selected_rules:
positive_rules = (
rule["rules"].split(";")
if "rules" in rule and rule["rules"].strip()
else []
)
negated_rules = (
rule["negated_rules"].split(";")
if "negated_rules" in rule and rule["negated_rules"].strip()
else []
)
feature_list.append(
[
positive_rules,
negated_rules,
classes,
]
)
if evaluate or annotate:
st.session_state.sens = [";".join(feat[0]) for feat in feature_list]
with st.spinner("Evaluating rules..."):
(
st.session_state.df_statistics,
st.session_state.whole_accuracy,
) = evaluator.evaluate_feature(
classes,
feature_list,
st.session_state.df,
graph_format,
)
st.success("Done!")
if annotate:
predicted_rules = [[] for _ in range(len(st.session_state.df))]
st.session_state.applied_rules = st.session_state.sens
for j, opt in enumerate(st.session_state.sens):
predicted = st.session_state.df_statistics.iloc[j].Predicted
predicted_indices = [
i for i, pred in enumerate(predicted) if pred == 1
]
for ind in predicted_indices:
predicted_rules[ind].append(opt)
annotate_df(predicted_rules)
st.session_state.trained = False
rerun()
if submit:
delete = delete_or_train == "delete"
train = delete_or_train == "train"
st.session_state.rows_to_delete = [
r["rules"] for r in ag["selected_rows"]
]
st.session_state.rls_after_delete = []
negated_list = ag["data"]["negated_rules"].tolist()
feature_list = []
for i, rule in enumerate(ag["data"]["rules"].tolist()):
if not negated_list[i].strip():
feature_list.append([rule.split(";"), [], classes])
else:
feature_list.append(
[
rule.split(";"),
negated_list[i].strip().split(";"),
classes,
]
)
if st.session_state.rows_to_delete and delete:
for r in feature_list:
if ";".join(r[0]) not in st.session_state.rows_to_delete:
st.session_state.rls_after_delete.append(r)
elif st.session_state.rows_to_delete and train:
st.session_state.rls_after_delete = copy.deepcopy(feature_list)
rule_to_train = st.session_state.rows_to_delete[0]
if ";" in rule_to_train or ".*" not in rule_to_train:
st.text(
"Only single and underspecified rules can be trained!"
)
else:
selected_words = evaluator.train_feature(
classes,
rule_to_train,
st.session_state.df,
graph_format,
)
for f in selected_words:
st.session_state.rls_after_delete.append(
[[f], [], classes]
)
else:
st.session_state.rls_after_delete = copy.deepcopy(feature_list)
if st.session_state.rls_after_delete and not delete:
save_after_modify(hand_made_rules, classes)
if st.session_state.rows_to_delete and delete_or_train == "delete":
with st.form("Delete form"):
st.write(
"The following rules will be deleted, do you accept it?"
)
st.write(st.session_state.rows_to_delete)
save_button = st.form_submit_button("Accept Delete")
if save_button:
save_after_modify(hand_made_rules, classes)
add_rule_manually(classes, hand_made_rules)
rank_and_suggest(classes, st.session_state.df, evaluator)
if st.session_state.ml_feature:
show_ml_feature(classes, hand_made_rules)
with col2:
if not st.session_state.df_statistics.empty and st.session_state.sens:
if st.session_state.sens:
nodes, option = rule_chooser()
st.markdown(
f"<span>Result of using all the rules: Precision: <b>{st.session_state.whole_accuracy[0]:.3f}</b>, \
Recall: <b>{st.session_state.whole_accuracy[1]:.3f}</b>, Fscore: <b>{st.session_state.whole_accuracy[2]:.3f}</b>, \
Support: <b>{st.session_state.whole_accuracy[3]}</b></span>",
unsafe_allow_html=True,
)
(
fn_graphs,
fn_sentences,
fp_graphs,
fp_sentences,
fscore,
prec,
predicted,
recall,
support,
tp_graphs,
tp_sentences,
) = extract_data_from_dataframe(option)
st.markdown(
f"<span>The rule's result: Precision: <b>{prec:.3f}</b>, Recall: <b>{recall:.3f}</b>, \
Fscore: <b>{fscore:.3f}</b>, Support: <b>{support}</b></span>",
unsafe_allow_html=True,
)
tp_fp_fn_choice = (
"Predicted",
"True Positive graphs",
"False Positive graphs",
"False Negative graphs",
)
tp_fp_fn = st.selectbox(
"Select the option you want to view", tp_fp_fn_choice
)
current_graph = None
if tp_fp_fn == "Predicted":
predicted_inds = [
i for i, pred in enumerate(predicted) if pred == 1
]
if st.button("Previous Predicted"):
st.session_state.predicted_num = max(
0, st.session_state.predicted_num - 1
)
if st.button("Next Predicted"):
st.session_state.predicted_num = min(
st.session_state.predicted_num + 1,
len(predicted_inds) - 1,
)
if st.session_state.predicted_num > len(predicted_inds) - 1:
st.session_state.predicted_inds = 0
st.markdown(
f"<span><b>Sentence:</b> {st.session_state.df.iloc[predicted_inds[st.session_state.predicted_num]].text}</span>",
unsafe_allow_html=True,
)
st.markdown(
f"<span><b>Gold label:</b> {st.session_state.df.iloc[predicted_inds[st.session_state.predicted_num]].label}</span>",
unsafe_allow_html=True,
)
st.text(f"Predicted: {len(predicted_inds)}")
current_graph = st.session_state.df.iloc[
predicted_inds[st.session_state.predicted_num]
].graph
st.graphviz_chart(
to_dot(
current_graph,
marked_nodes=set(nodes),
),
use_container_width=True,
)
elif tp_fp_fn == "False Positive graphs":
if fp_graphs:
graph_viewer("FP", fp_graphs, fp_sentences, nodes)
elif tp_fp_fn == "True Positive graphs":
if tp_graphs:
graph_viewer("TP", tp_graphs, tp_sentences, nodes)
elif tp_fp_fn == "False Negative graphs":
if fn_graphs:
graph_viewer("FN", fn_graphs, fn_sentences, nodes)
def get_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument("-t", "--train-data", type=str, required=True)
parser.add_argument("-v", "--val-data", type=str)
parser.add_argument(
"-sr",
"--suggested-rules",
default=None,
type=str,
help="Rules extracted automatically from python. If not present, the UI will automatically train it.",
)
parser.add_argument(
"-hr",
"--hand-rules",
default=None,
type=str,
help="Rules extracted with the UI. If provided, the UI will load them.",
)
parser.add_argument("-m", "--mode", default="supervised", type=str)
parser.add_argument("-g", "--graph-format", default="fourlang", type=str)
return parser.parse_args()
def main(args):
st.set_page_config(layout="wide")
st.markdown(
"<h1 style='text-align: center; color: black;'>Rule extraction framework</h1>",
unsafe_allow_html=True,
)
evaluator = init_evaluator()
data = read_train(args.train_data)
if args.val_data:
val_data = read_val(args.val_data)
graph_format = args.graph_format
feature_path = args.suggested_rules
hand_made_rules = args.hand_rules
mode = args.mode
if mode == "supervised":
assert args.val_data
supervised_mode(
evaluator, data, val_data, graph_format, feature_path, hand_made_rules
)
elif mode == "unsupervised":
unsupervised_mode(
evaluator, args.train_data, graph_format, feature_path, hand_made_rules
)
if __name__ == "__main__":
args = get_args()
main(args)
| 1.851563
| 2
|
benchmark/lue/benchmark/export_results.py
|
computationalgeography/lue
| 2
|
12785064
|
import lue.data_model as ldm
import numpy as np
import csv
def export_partition_shape_results(
lue_dataset,
csv_writer):
# Assert that the number of array shapes for which experiments where
# performed is 1
lue_array = lue_dataset.array.array
assert lue_array.shape.value.nr_arrays == 1
# For each array shape for which experiments where performed
lue_measurement = lue_dataset.benchmark.measurement
array_shapes = lue_measurement.array_shape.value[:]
assert np.all(array_shapes == array_shapes[0])
count = lue_measurement.duration.value.array_shape[:][0]
lue_partition = lue_dataset.partition.partition
partition_shape = lue_measurement.partition_shape.value[:]
nr_partitions = lue_measurement.nr_partitions.value[:,-1]
assert len(partition_shape) == len(nr_partitions)
if count == 1:
assert False, "Implement!"
else:
# Write the following columns:
# - partition_shape
# - nr_partitions
# - {mean,std}_duration
csv_writer.writerow([
# "partition_shape",
"partition_size",
"nr_partitions",
"mean_duration",
"std_duration",
])
mean_duration = \
lue_partition.properties["mean_duration_{}".format(0)].value[:]
std_duration = \
lue_partition.properties["std_duration_{}".format(0)].value[:]
for n in range(len(partition_shape)):
csv_writer.writerow([
# "{},{}".format(*partition_shape[n]),
np.prod(partition_shape[n]),
nr_partitions[n],
mean_duration[n],
std_duration[n],
])
def export_strong_scaling_results(
lue_dataset,
csv_writer):
lue_measurement = lue_dataset.benchmark.measurement
count = lue_measurement.duration.value.array_shape[:][0]
nr_workers = lue_measurement.nr_workers.value[:]
sort_idxs = np.argsort(nr_workers)
nr_workers = nr_workers[sort_idxs]
if count == 1:
# Write the following columns:
# - nr_workers
# - relative_speed_up
# - relative_efficiency
# - lups
csv_writer.writerow([
"nr_workers",
"duration",
"relative_speed_up",
"relative_efficiency",
"lups",
])
lue_scaling = lue_dataset.benchmark.scaling
duration = lue_measurement.duration.value[:][sort_idxs]
relative_speed_up = lue_scaling.relative_speed_up.value[:][sort_idxs]
relative_efficiency = lue_scaling.relative_efficiency.value[:][sort_idxs]
lups = lue_scaling.lups.value[:][sort_idxs]
for n in range(len(nr_workers)):
csv_writer.writerow([
nr_workers[n],
duration[n][0],
relative_speed_up[n][0],
relative_efficiency[n][0],
lups[n][0],
])
else:
# Write the following columns:
# - nr_workers
# - {mean,std}_duration
# - {mean,std}_relative_efficiency
# - {mean,std}_lups
csv_writer.writerow([
"nr_workers",
"mean_duration",
"std_duration",
"mean_relative_efficiency",
"std_relative_efficiency",
"mean_lups",
"std_lups",
])
lue_scaling = lue_dataset.benchmark.scaling
mean_duration = lue_scaling.mean_duration.value[:][sort_idxs]
std_duration = lue_scaling.std_duration.value[:][sort_idxs]
mean_relative_efficiency = lue_scaling.mean_relative_efficiency.value[:][sort_idxs]
std_relative_efficiency = lue_scaling.std_relative_efficiency.value[:][sort_idxs]
mean_lups = lue_scaling.mean_lups.value[:][sort_idxs]
std_lups = lue_scaling.std_lups.value[:][sort_idxs]
for n in range(len(nr_workers)):
csv_writer.writerow([
nr_workers[n],
mean_duration[n],
std_duration[n],
mean_relative_efficiency[n],
std_relative_efficiency[n],
mean_lups[n],
std_lups[n],
])
def export_weak_scaling_results(
lue_dataset,
csv_writer):
lue_measurement = lue_dataset.benchmark.measurement
count = lue_measurement.duration.value.array_shape[:][0]
nr_workers = lue_measurement.nr_workers.value[:]
sort_idxs = np.argsort(nr_workers)
nr_workers = nr_workers[sort_idxs]
if count == 1:
# Write the following columns:
# - nr_workers
# - duration
# - relative_efficiency
# - lups
csv_writer.writerow([
"nr_workers",
"duration",
"relative_efficiency",
"lups",
])
lue_scaling = lue_dataset.benchmark.scaling
duration = lue_measurement.duration.value[:]
relative_efficiency = lue_scaling.relative_efficiency.value[:][sort_idxs]
lups = lue_scaling.lups.value[:][sort_idxs]
for n in range(len(nr_workers)):
csv_writer.writerow([
nr_workers[n],
duration[n][0],
relative_efficiency[n][0],
lups[n][0],
])
else:
# Write the following columns:
# - nr_workers
# - {mean,std}_duration
# - {mean,std}_relative_efficiency
# - {mean,std}_lups
csv_writer.writerow([
"nr_workers",
"mean_duration",
"std_duration",
"mean_relative_efficiency",
"std_relative_efficiency",
"mean_lups",
"std_lups",
])
lue_scaling = lue_dataset.benchmark.scaling
mean_duration = lue_scaling.mean_duration.value[:][sort_idxs]
std_duration = lue_scaling.std_duration.value[:][sort_idxs]
mean_relative_efficiency = lue_scaling.mean_relative_efficiency.value[:][sort_idxs]
std_relative_efficiency = lue_scaling.std_relative_efficiency.value[:][sort_idxs]
mean_lups = lue_scaling.mean_lups.value[:][sort_idxs]
std_lups = lue_scaling.std_lups.value[:][sort_idxs]
for n in range(len(nr_workers)):
csv_writer.writerow([
nr_workers[n],
mean_duration[n],
std_duration[n],
mean_relative_efficiency[n],
std_relative_efficiency[n],
mean_lups[n],
std_lups[n],
])
def export_results(
lue_dataset_pathname,
csv_file_pathname):
lue_dataset = ldm.open_dataset(lue_dataset_pathname, "r")
kind = lue_dataset.benchmark.meta_information.kind.value[:][0]
with open(csv_file_pathname, "w") as csv_file:
csv_writer = csv.writer(csv_file)
export_by_kind = {
"partition_shape": export_partition_shape_results,
"strong_scaling": export_strong_scaling_results,
"weak_scaling": export_weak_scaling_results,
}
export_by_kind[kind](lue_dataset, csv_writer)
| 2.3125
| 2
|
src/playbacker/clock.py
|
vrslev/playbacker
| 1
|
12785065
|
import time
from dataclasses import dataclass, field
from threading import Event, Thread
from typing import Callable, NoReturn
@dataclass
class Clock:
callback: Callable[[], None] = field(repr=False)
thread: Thread = field(init=False, repr=False)
started: Event = field(default_factory=Event, init=False, repr=False)
lag: float = field(init=False)
previous_time: float = field(init=False, repr=False)
def __post_init__(self) -> None:
self.thread = Thread(daemon=True, target=self.run)
self.thread.start()
def start(self) -> None:
self.previous_time = time.monotonic()
self.started.set()
def pause(self) -> None:
self.started.clear()
def destroy(self) -> None:
self.thread.join(0)
def _sleep(self):
result = self.lag + self.previous_time - time.monotonic()
sleep_for = result * 0.925 if result > 0 else 0
time.sleep(sleep_for)
def _tick(self) -> None:
self._sleep()
self.previous_time += self.lag
self.callback()
def _run_once(self) -> None:
self.started.wait()
while self.started.is_set():
self._tick()
def run(self) -> NoReturn:
while True:
self._run_once()
| 2.859375
| 3
|
examples/varying_model_parameters/scan_torsions/scan_torsions.py
|
shirtsgroup/foldamers
| 1
|
12785066
|
import os
from statistics import mean
import numpy as np
import matplotlib.pyplot as pyplot
from simtk import unit
from simtk.openmm.app.pdbfile import PDBFile
from foldamers.cg_model.cgmodel import CGModel
from foldamers.parameters.reweight import *
from foldamers.thermo.calc import *
from foldamers.ensembles.ens_build import *
from cg_openmm.simulation.rep_exch import *
from cg_openmm.simulation.tools import *
# Job settings
scan_sc_bb_bb_sc_torsions = True
calculate_dQ = True
calculate_free_energies = True
evaluate_heat_capacity = True
output_directory = "output"
if not os.path.exists(output_directory):
os.mkdir(output_directory)
# Number of grid points to scan (around initial angle definition)
grid_points = 3
# Configure Yank (replica exchange) simulation settings
print_frequency = 5 # Number of steps to skip when printing output
total_simulation_time = 500.0 * unit.picosecond
simulation_time_step = 5.0 * unit.femtosecond
number_replicas = 30
min_temp = 1.0 * unit.kelvin
max_temp = 400.0 * unit.kelvin
temperature_list = get_temperature_list(min_temp, max_temp, number_replicas)
# Model settings
polymer_length = 12
backbone_lengths = [1]
sidechain_lengths = [1]
sidechain_positions = [0]
include_bond_forces = False
include_bond_angle_forces = True
include_nonbonded_forces = True
include_torsion_forces = True
constrain_bonds = True
# Bond definitions
bond_length = 7.5 * unit.angstrom
bond_lengths = {
"bb_bb_bond_length": bond_length,
"bb_sc_bond_length": bond_length,
"sc_sc_bond_length": bond_length,
}
bond_force_constant = 0 * unit.kilocalorie_per_mole / unit.nanometer / unit.nanometer
bond_force_constants = {
"bb_bb_bond_k": bond_force_constant,
"bb_sc_bond_k": bond_force_constant,
"sc_sc_bond_k": bond_force_constant,
}
# Particle definitions
mass = 100.0 * unit.amu
masses = {"backbone_bead_masses": mass, "sidechain_bead_masses": mass}
r_min = 3.0 * bond_length # Lennard-Jones potential r_min
sigma = r_min / (2.0 ** (1 / 6)) # Factor of /(2.0**(1/6)) is applied to convert r_min to sigma
sigmas = {"bb_sigma": sigma, "sc_sigma": sigma}
epsilon = 0.05 * unit.kilocalorie_per_mole
epsilons = {"bb_eps": epsilon, "sc_eps": epsilon}
# Bond angle definitions
bond_angle_force_constant = 0.0001 * unit.kilocalorie_per_mole / unit.radian / unit.radian
bond_angle_force_constants = {
"bb_bb_bb_angle_k": bond_angle_force_constant,
"bb_bb_sc_angle_k": bond_angle_force_constant,
}
bb_bb_bb_equil_bond_angle = 120.0 * (
3.14 / 180.0
) # OpenMM expects angle definitions in units of radians
bb_bb_sc_equil_bond_angle = 120.0 * (3.14 / 180.0)
equil_bond_angles = {
"bb_bb_bb_angle_0": bb_bb_bb_equil_bond_angle,
"bb_bb_sc_angle_0": bb_bb_sc_equil_bond_angle,
}
# Torsion angle definitions (Used to establish a scanning range below)
torsion_force_constant = 0.01 * unit.kilocalorie_per_mole / unit.radian / unit.radian
if scan_sc_bb_bb_sc_torsions == True:
torsion_force_constants = {
"bb_bb_bb_bb_torsion_k": torsion_force_constant,
"sc_bb_bb_sc_torsion_k": torsion_force_constant,
}
bb_bb_bb_bb_equil_torsion_angle = 78.0 * (
3.14 / 180.0
) # OpenMM defaults to units of radians for angle definitions
sc_bb_bb_sc_equil_torsion_angle = 120.0 * (3.14 / 180.0)
equil_torsion_angles = {
"bb_bb_bb_bb_torsion_0": bb_bb_bb_bb_equil_torsion_angle,
"sc_bb_bb_sc_torsion_0": sc_bb_bb_sc_equil_torsion_angle,
}
torsion_periodicities = {"bb_bb_bb_bb_period": 1, "sc_bb_bb_sc_period": 1}
else:
torsion_force_constants = {"bb_bb_bb_bb_torsion_k": torsion_force_constant}
bb_bb_bb_bb_equil_torsion_angle = 78.0 * (
3.14 / 180.0
) # OpenMM defaults to units of radians for angle definitions
equil_torsion_angles = {"bb_bb_bb_bb_torsion_0": bb_bb_bb_bb_equil_torsion_angle}
torsion_periodicities = {"bb_bb_bb_bb_period": 1}
# Get initial positions from local file
positions = PDBFile("helix.pdb").getPositions()
# Build a coarse grained model
cgmodel = CGModel(
polymer_length=polymer_length,
backbone_lengths=backbone_lengths,
sidechain_lengths=sidechain_lengths,
sidechain_positions=sidechain_positions,
masses=masses,
sigmas=sigmas,
epsilons=epsilons,
bond_lengths=bond_lengths,
bond_force_constants=bond_force_constants,
bond_angle_force_constants=bond_angle_force_constants,
torsion_force_constants=torsion_force_constants,
equil_bond_angles=equil_bond_angles,
equil_torsion_angles=equil_torsion_angles,
torsion_periodicities=torsion_periodicities,
include_nonbonded_forces=include_nonbonded_forces,
include_bond_forces=include_bond_forces,
include_bond_angle_forces=include_bond_angle_forces,
include_torsion_forces=include_torsion_forces,
constrain_bonds=constrain_bonds,
positions=positions,
)
# Run test simulations (NVT) with this coarse-grained model at the minimum and maximum temperatures
# to make sure the parameters are reasonable before attempting replica exchange simulations
# (If high-T simulations fail then we need to modify the model parameters)
test_simulation_time = 50.0 * unit.picosecond
print_frequency = 5
temperature = temperature_list[0]
output_directory = str("test_" + str(round(temperature._value, 1)))
if not os.path.exists(output_directory):
os.mkdir(output_directory)
run_simulation(
cgmodel,
output_directory,
test_simulation_time,
simulation_time_step,
temperature,
print_frequency,
)
temperature = temperature_list[-1]
output_directory = str("test_" + str(round(temperature._value, 1)))
if not os.path.exists(output_directory):
os.mkdir(output_directory)
run_simulation(
cgmodel,
output_directory,
test_simulation_time,
simulation_time_step,
temperature,
print_frequency,
)
# Reset the output directory
output_directory = "output"
if not os.path.exists(output_directory):
os.mkdir(output_directory)
# Create a list of the torsion angles that we will investigate in our parameter scan
bb_bb_bb_bb_equil_torsion_angles = [
float(bb_bb_bb_bb_equil_torsion_angle + i * 0.05) for i in range(-grid_points, grid_points, 1)
]
if scan_sc_bb_bb_sc_torsions == True:
sc_bb_bb_sc_equil_torsion_angles = [
float(sc_bb_bb_sc_equil_torsion_angle + i * 0.05)
for i in range(-grid_points, grid_points, 1)
]
else:
sc_bb_bb_sc_equil_torsion_angles = [0.0]
if calculate_dQ:
# Set parameters for evaluating native contacts
native_structure_contact_distance_cutoff = 1.00 * cgmodel.get_sigma(
0
) # This distance cutoff determines which nonbonded interactions are considered 'native' contacts
native_fraction_cutoff = (
0.95 # The threshold fraction of native contacts above which a pose is considered 'native'
)
nonnative_fraction_cutoff = 0.95 # The threshold fraction of native contacts below which a pose is considered 'nonnative'
native_ensemble_size = 10
nonnative_ensemble_size = 10
decorrelate = True
# Build arrays to store data for each model parameter scan/grid point
dQ_list = []
df_ij_list = []
ddf_ij_list = []
Delta_u_list = []
dDelta_u_list = []
Delta_s_list = []
dDelta_s_list = []
C_v_list = []
dC_v_list = []
# This is where we start evaluating the properties of models with different equilibrium torsion angles
for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles:
for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles:
if scan_sc_bb_bb_sc_torsions == True:
equil_torsion_angles = {
"bb_bb_bb_bb_torsion_0": bb_bb_bb_bb_equil_torsion_angle,
"sc_bb_bb_sc_torsion_0": sc_bb_bb_sc_equil_torsion_angle,
}
else:
equil_torsion_angles = {"bb_bb_bb_bb_torsion_0": bb_bb_bb_bb_equil_torsion_angle}
# Build a coarse grained model that has the torsion parameters for this grid point.
positions = PDBFile("helix.pdb").getPositions()
cgmodel = CGModel(
polymer_length=polymer_length,
backbone_lengths=backbone_lengths,
sidechain_lengths=sidechain_lengths,
sidechain_positions=sidechain_positions,
masses=masses,
sigmas=sigmas,
epsilons=epsilons,
bond_lengths=bond_lengths,
bond_force_constants=bond_force_constants,
bond_angle_force_constants=bond_angle_force_constants,
torsion_force_constants=torsion_force_constants,
equil_bond_angles=equil_bond_angles,
equil_torsion_angles=equil_torsion_angles,
torsion_periodicities=torsion_periodicities,
include_nonbonded_forces=include_nonbonded_forces,
include_bond_forces=include_bond_forces,
include_bond_angle_forces=include_bond_angle_forces,
include_torsion_forces=include_torsion_forces,
constrain_bonds=constrain_bonds,
positions=positions,
)
if scan_sc_bb_bb_sc_torsions == True:
output_data = str(
str(output_directory)
+ "/torsions_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ ".nc"
)
file_name = str(
str(output_directory)
+ "/re_min_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ ".pdb"
)
else:
output_data = str(
str(output_directory)
+ "/torsions_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ ".nc"
)
file_name = str(
str(output_directory)
+ "/re_min_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ ".pdb"
)
if os.path.exists(file_name):
print("\n")
print("Reading existing simulation data for a coarse grained model")
print(
"with bb_bb_bb_bb torsion angles of "
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ " degrees."
)
if scan_sc_bb_bb_sc_torsions == True:
print(
"and sc_bb_bb_sc torsion angles of "
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ " degrees."
)
print("\n")
# Search for existing data, and reading it if possible
replica_energies, replica_positions, replica_states = read_replica_exchange_data(
system=cgmodel.system,
topology=cgmodel.topology,
temperature_list=temperature_list,
output_data=output_data,
print_frequency=print_frequency,
)
# Find the lowest energy pose for this model
native_structure = PDBFile(file_name).getPositions()
else:
print("\n")
print("Performing simulations for a coarse grained model")
print(
"with bb_bb_bb_bb torsion angles of "
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ " degrees."
)
if scan_sc_bb_bb_sc_torsions == True:
print(
"and sc_bb_bb_sc torsion angles of "
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ " degrees."
)
print("\n")
# Run a replica exchange simulation with this cgmodel
replica_energies, replica_positions, replica_states = run_replica_exchange(
cgmodel.topology,
cgmodel.system,
cgmodel.positions,
temperature_list=temperature_list,
simulation_time_step=simulation_time_step,
total_simulation_time=total_simulation_time,
print_frequency=print_frequency,
output_data=output_data,
)
native_structure = get_native_structure(
replica_positions, replica_energies, temperature_list
)
file = open(file_name, "w")
PDBFile.writeFile(cgmodel.topology, native_structure, file=file)
file.close()
if calculate_dQ:
native_structure_contact_distance_cutoff = 1.15 * cgmodel.get_sigma(
0
) # This distance cutoff determines which nonbonded interactions are considered 'native' contacts
native_fraction_cutoff = 0.95 # The threshold fraction of native contacts above which a pose is considered 'native'
nonnative_fraction_cutoff = 0.95 # The threshold fraction of native contacts below which a pose is considered 'nonnative'
native_ensemble_size = 10
nonnative_ensemble_size = 100
decorrelate = True
(
native_ensemble,
native_ensemble_energies,
nonnative_ensemble,
nonnative_ensemble_energies,
) = get_ensembles_from_replica_positions(
cgmodel,
replica_positions,
replica_energies,
temperature_list,
decorrelate=decorrelate,
native_fraction_cutoff=native_fraction_cutoff,
nonnative_fraction_cutoff=nonnative_fraction_cutoff,
native_structure_contact_distance_cutoff=native_structure_contact_distance_cutoff,
native_ensemble_size=native_ensemble_size,
nonnative_ensemble_size=nonnative_ensemble_size,
)
if (
len(native_ensemble_energies) != native_ensemble_size
or len(nonnative_ensemble_energies) != nonnative_ensemble_size
):
print(
"ERROR: attempt to generate native and nonnative ensembles was unsuccessful."
)
print(
str(len(native_ensemble_energies))
+ " native ensemble members were generated ("
+ str(native_ensemble_size)
+ " were requested),"
)
print(
"and "
+ str(len(nonnative_ensemble_energies))
+ " non-native ensemble members were generated ("
+ str(nonnative_ensemble_size)
+ " were requested)."
)
print(
"Try adjusting the 'native_structure_distance_cutoff' parameter (current value="
+ str(native_structure_contact_distance_cutoff.__div__(cgmodel.get_sigma(0)))
+ "*'bb_sigma'),"
)
print(
"and the 'nonnative_fraction_cutoff' parameter (current value="
+ str(nonnative_fraction_cutoff)
+ ")"
)
print("to see if either of these approaches fixes the problem.")
exit()
if scan_sc_bb_bb_sc_torsions == True:
nonnative_ensemble_directory = str(
str(output_directory)
+ "/ens_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_nonnative"
)
native_ensemble_directory = str(
str(output_directory)
+ "/ens_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_native"
)
else:
nonnative_ensemble_directory = str(
str(output_directory)
+ "/ens_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_nonnative"
)
native_ensemble_directory = str(
str(output_directory)
+ "/ens_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_native"
)
# We build an ensemble of nonnative poses for energetic comparison with the native pose.
if os.path.exists(nonnative_ensemble_directory):
nonnative_ensemble, nonnative_ensemble_energies = get_ensemble_data(
cgmodel, nonnative_ensemble_directory
)
if len(nonnative_ensemble) != nonnative_ensemble_size:
print(
"ERROR: "
+ str(len(nonnative_ensemble_energies))
+ " nonnative poses were found in existing output folders, but "
+ str(nonnative_ensemble_size)
+ " poses were requested."
)
print(
"This probably means that the requested ensemble size changed since the script was last run."
)
exit()
else:
os.mkdir(nonnative_ensemble_directory)
for pose in nonnative_ensemble:
cgmodel.positions = pose
write_ensemble_pdb(cgmodel, ensemble_directory=nonnative_ensemble_directory)
nonnative_ensemble_Q = []
for pose in nonnative_ensemble:
Q = fraction_native_contacts(cgmodel, pose, native_structure)
nonnative_ensemble_Q.append(Q)
nonnative_ensemble_Q = np.array([Q for Q in nonnative_ensemble_Q])
mean_nonnative_contacts = mean(nonnative_ensemble_Q)
print(
"The mean fraction of native contacts for this model is: "
+ str(mean_nonnative_contacts)
)
# We build an ensemble of native poses in order to understand the energy distribution around the folded state.
if os.path.exists(native_ensemble_directory):
native_ensemble, native_ensemble_energies = get_ensemble_data(
cgmodel, native_ensemble_directory
)
if len(native_ensemble_energies) != native_ensemble_size:
print(
"ERROR: "
+ str(len(native_ensemble_energies))
+ " native poses were found in existing output folders, but "
+ str(native_ensemble_size)
+ " poses were requested."
)
print(
"This probably means that the requested ensemble size changed since the script was last run."
)
exit()
else:
os.mkdir(native_ensemble_directory)
for pose in native_ensemble:
cgmodel.positions = pose
write_ensemble_pdb(cgmodel, ensemble_directory=native_ensemble_directory)
# Get the average change in the fraction of native contacts during folding (dQ),
# calculated as the difference between the average fraction of native contacts
# in the nonnative ensemble.
# A large dQ means the model/structure has a stable folded state.
# A small dQ means the model/structure does not have a stable folded state.
dQ = 1.0 - mean_nonnative_contacts
dQ_list.append(dQ)
if calculate_free_energies:
num_intermediate_states = 1
mbar, E_kn, E_expect, dE_expect, new_temp_list = get_mbar_expectation(
replica_energies, temperature_list, num_intermediate_states
)
df_ij, ddf_ij = get_free_energy_differences(mbar)
df_ij_list.append(df_ij)
ddf_ij_list.append(ddf_ij)
Delta_s, dDelta_s = get_entropy_differences(mbar)
Delta_s_list.append(Delta_s)
dDelta_s_list.append(dDelta_s)
Delta_u, dDelta_u = get_enthalpy_differences(mbar)
Delta_u_list.append(Delta_u)
dDelta_u_list.append(dDelta_u)
if evaluate_heat_capacity:
C_v, dC_v, new_temperature_list = get_heat_capacity(
replica_energies, temperature_list, num_intermediate_states=1
)
C_v_list.append(C_v)
dC_v_list.append(dC_v)
if scan_sc_bb_bb_sc_torsions == True:
file_name = "dQ_for_variable_equil_torsion_angles.png"
figure = pyplot.figure(1)
bb_bb_bb_bb_equil_torsion_angles = np.array(
[float(equil_torsion_angle) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles]
)
sc_bb_bb_sc_equil_torsion_angles = np.array(
[float(equil_torsion_angle) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles]
)
x = np.unique(bb_bb_bb_bb_equil_torsion_angles * (180.0 / 3.14))
y = np.unique(sc_bb_bb_sc_equil_torsion_angles * (180.0 / 3.14))
X, Y = np.meshgrid(x, y)
Z = dQ_list.reshape(len(x), len(y))
pyplot.xlabel(r"$ \alpha_{0}^{BB-BB-BB-BB} $ ( Degrees )")
pyplot.ylabel(r"$ \alpha_{0}^{SC-BB-BB-SC} $ ( Degrees )")
pyplot.title("dQ (Change in native contacts during folding)")
pyplot.pcolormesh(X, Y, Z)
pyplot.colorbar()
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
if calculate_dQ:
file_name = "dQ_for_variable_bb_bb_bb_bb_torsion_angle.png"
figure = pyplot.figure(1)
x = np.array([float(angle * (180.0 / 3.14)) for angle in bb_bb_bb_bb_equil_torsion_angles])
y = np.array([float(dQ) for dQ in dQ_list])
pyplot.xlabel(r"$ \alpha_{0}^{BB-BB-BB-BB} $ ( Degrees )")
pyplot.ylabel(r"$\Delta$Q")
pyplot.title(r"$\Delta$Q (Change in native contacts) during folding")
pyplot.plot(x, y)
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
if calculate_free_energies:
file_name = "free_energies_for_variable_bb_bb_bb_bb_torsion_angle.png"
figure = pyplot.figure(1)
legend_title = r"$ \alpha_{0}^{BB-BB-BB-BB} $ (Degrees)"
legend_labels = np.array(
[float(round(angle * (180.0 / 3.14), 1)) for angle in bb_bb_bb_bb_equil_torsion_angles]
)
temperatures = np.array([temperature for temperature in new_temp_list])
index = 0
for df_ij, ddf_ij in zip(df_ij_list, ddf_ij_list):
df_ij = np.array([df_ij[i][0] for i in range(len(df_ij))])
ddf_ij = np.array([ddf_ij[i][0] for i in range(len(ddf_ij))])
(line,) = pyplot.plot(temperatures, df_ij)
line.set_label(legend_labels[index])
index = index + 1
pyplot.xlabel("Temperature (Kelvin)")
pyplot.ylabel(r"Dimensionless free energy differences $\mathit{F}$")
pyplot.title(r"$\mathit{F}$ for variable $\alpha_{0}^{BB-BB-BB-BB}$")
pyplot.legend(legend_labels)
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
file_name = "entropies_for_variable_bb_bb_bb_bb_torsion_angle.png"
figure = pyplot.figure(1)
legend_title = r"$ \alpha_{0}^{BB-BB-BB-BB} $ (Degrees)"
legend_labels = np.array(
[float(round(angle * (180.0 / 3.14), 1)) for angle in bb_bb_bb_bb_equil_torsion_angles]
)
temperatures = np.array([temperature for temperature in new_temp_list])
index = 0
for Delta_s in Delta_s_list:
delta_s = np.array([Delta_s[i][0] for i in range(len(Delta_s))])
(line,) = pyplot.plot(temperatures, delta_s)
line.set_label(legend_labels[index])
index = index + 1
pyplot.xlabel("Temperature (Kelvin)")
pyplot.ylabel("Entropy differences ($\Delta$S)")
pyplot.title(r"Entropy for variable $\alpha_{0}^{BB-BB-BB-BB}$")
pyplot.legend(legend_labels)
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
if evaluate_heat_capacity:
file_name = "heat_capacity_for_variable_bb_bb_bb_bb_torsion_angle.png"
figure = pyplot.figure(1)
legend_title = r"$ \alpha_{0}^{BB-BB-BB-BB} $ (Degrees)"
legend_labels = np.array(
[float(round(angle * (180.0 / 3.14), 1)) for angle in bb_bb_bb_bb_equil_torsion_angles]
)
temperatures = np.array([temperature for temperature in new_temp_list])
index = 0
for C_v, dC_v in zip(C_v_list, dC_v_list):
C_v = np.array([C_v[i] for i in range(len(C_v))])
dC_v = np.array([dC_v[i] for i in range(len(dC_v))])
pyplot.errorbar(temperatures, C_v, yerr=dC_v, figure=figure, label=legend_labels[index])
index = index + 1
pyplot.xlabel("Temperature ( Kelvin )")
pyplot.ylabel(r"C$_{v}$ ( kcal/mol * Kelvin )")
pyplot.title(r"Heat capacity for variable $\epsilon$")
pyplot.legend(legend_labels)
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
exit()
| 1.929688
| 2
|
extract_pin_function_from_liberty.py
|
hrshishym/ExtractPinFunctionFromLibertySource
| 0
|
12785067
|
<reponame>hrshishym/ExtractPinFunctionFromLibertySource<filename>extract_pin_function_from_liberty.py
#!/usr/bin/env python
### Setting
cell_attributes = ["clock_gating_integrated_cell"]
ff_attributes = []
pin_attributes = ["direction", "clock", "function", "state_function"]
import os
import sys
import re
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'modules'))
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("-d", "--debug", action="store_true")
args = parser.parse_args()
from liberty.parser import parse_liberty
tdata = re.sub("\\\\\n", "", open(args.input).read())
tlib = parse_liberty(tdata)
if args.debug:
import pprint
pprint.pprint(tlib)
# library (sxlib013) {
print("library ({}) {{".format(tlib.args[0]))
for eachlib in tlib.groups:
if eachlib.group_name != "cell":
continue
cellname = eachlib.args[0]
# cell(a2_x2) { /* 2008-01-10:21h05 */
print("cell({}) {{".format(cellname))
if args.debug:
print("==")
pprint.pprint(eachlib)
print("==")
pprint.pprint(eachlib.attributes)
print("==")
### Print cell attributes
for eachattr in eachlib.attributes:
for eachattr in cell_attributes:
if eachattr in eachlib.attributes.keys():
print(" {} : {} ;".format(eachattr, eachlib.attributes[eachattr]))
### Print sub group
for eachgroup in eachlib.groups:
if args.debug:
print("====")
pprint.pprint(eachgroup)
print("====")
if eachgroup.group_name == "ff":
# ff
print(" ff({}) {{ ".format(",".join(eachgroup.args)))
for eachkey in eachgroup.attributes.keys():
print(" {} : {} ;".format(eachkey, eachgroup.attributes[eachkey]))
print(" }")
elif eachgroup.group_name == "pin":
## pin
print(" pin({}) {{".format(eachgroup.args[0]))
for eachattr in pin_attributes:
if eachattr in eachgroup.attributes.keys():
print(" {} : {} ;".format(eachattr, eachgroup.attributes[eachattr]))
print(" }")
elif eachgroup.group_name == "statetable":
## statetable
tarr = []
for i in eachgroup.args:
tarr.append(str(i))
print(" statetable( {} ) {{".format(" , ".join(tarr)))
if "table" in eachgroup.attributes.keys():
print(" {} : {} ;".format("table", re.sub(",", ", \\\n", str(eachgroup.attributes["table"]))))
print(" }")
print("}")
print("}")
| 2.484375
| 2
|
PyPoll/Resources/main.py
|
alcazar007/python-challenge
| 0
|
12785068
|
'''
## PyPoll

* In this challenge, you are tasked with helping a small, rural town
modernize its vote-counting process. (Up until now, Uncle Cleetus had
been trustfully tallying them one-by-one, but unfortunately, his concentration isn't what it used to be.)
* You will be give a set of poll data called [election_data.csv](PyPoll/Resources/election_data.csv).
The dataset is composed of three columns: `Voter ID`, `County`, and `Candidate`.
Your task is to create a Python script that analyzes the votes and calculates each of the following:
* The total number of votes cast
* A complete list of candidates who received votes
* The percentage of votes each candidate won
* The total number of votes each candidate won
* The winner of the election based on popular vote.
* As an example, your analysis should look similar to the one below:
```text
Election Results
-------------------------
Total Votes: 3521001
-------------------------
Khan: 63.000% (2218231)
Correy: 20.000% (704200)
Li: 14.000% (492940)
O'Tooley: 3.000% (105630)
-------------------------
Winner: Khan
-------------------------
```
* In addition, your final script should both print the analysis to the terminal and export a text file with the results.
'''
# Imports
import os, csv
# CSV Path
data_file = os.path.join("election_data.csv")
# Store Objects
database_total_votes = []
candidates_with_votes = []
store_candidates_votes = []
winner = []
# Variables
total_votes = 0
vote_percents = 0
# Open csv with reaser, header, and F statement
with open (data_file, newline="", encoding="UTF=8") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
csv_header = next(csv_file)
# Loop through the data to variables
for row in csv_reader:
total_votes = total_votes +1
database_total_votes = total_votes
print(database_total_votes)
# A complete list of candidates who received votes `Voter ID`, `County`, and `Candidate`
candidates_with_votes.append(row[2])
candidates_with_votes = candidates_with_votes
print(candidates_with_votes)
| 3.703125
| 4
|
mantabot/core/private.py
|
spectras/turbot
| 2
|
12785069
|
<gh_stars>1-10
import asyncio
class PrivateChat(object):
""" A direct messaging channel to a specific user
There should be only one per user per bot. The intent is to provide a way for
multiple plugins to claim, acquire and release the private chat, preventing
several of them from interacting simultaneously and confusing the user.
"""
class BusyError(RuntimeError):
pass
def __init__(self, client, user):
self.client = client
self.user = user
self.channel = None
self.task = None
async def wait_reply(self, timeout=None):
""" Wait until the user types something or specified time elapses (in seconds)
Calling task must have acquired ownership of the channel first.
"""
assert self.task == asyncio.Task.current_task()
def check(msg):
return msg.channel == self.channel and msg.author == self.user
try:
return await self.client.wait_for('message', check=check, timeout=timeout)
except asyncio.TimeoutError:
return None
async def send(self, *args, **kwargs):
""" Send a message to the user """
return await self.user.send(*args, **kwargs)
async def __aenter__(self):
""" Acquire ownership of the private chat """
if self.task:
raise self.BusyError()
self.task = asyncio.Task.current_task()
if not self.channel:
self.channel = await self.user.create_dm()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
""" Release ownership of the private chat """
self.task = None
| 2.8125
| 3
|
lib/TestTaxonAPI/TestTaxonAPIImpl.py
|
scanon/testtaxonapi
| 0
|
12785070
|
<reponame>scanon/testtaxonapi
#BEGIN_HEADER
from biokbase.workspace.client import Workspace as workspaceService
import doekbase.data_api.taxonomy.taxon.api
from doekbase.data_api import cache
import logging
#END_HEADER
class TestTaxonAPI:
'''
Module Name:
TestTaxonAPI
Module Description:
A KBase module: test_taxon_api
'''
######## WARNING FOR GEVENT USERS #######
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
#########################################
VERSION = "0.0.1"
GIT_URL = "https://github.com/scanon/testtaxonapi"
GIT_COMMIT_HASH = "a76b28a7e461132eb5227a7465dc8c37fb01b839"
#BEGIN_CLASS_HEADER
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.workspaceURL = config['workspace-url']
self.shockURL = config['shock-url']
self.logger = logging.getLogger()
log_handler = logging.StreamHandler()
log_handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s"))
self.logger.addHandler(log_handler)
self.services = {
"workspace_service_url": self.workspaceURL,
"shock_service_url": self.shockURL,
}
try:
cache_dir = config['cache_dir']
except:
cache_dir = None
try:
redis_host = config['redis_host']
redis_port = config['redis_port']
except:
redis_host = None
redis_port = None
if redis_host is not None and redis_port is not None:
self.logger.info("Activating REDIS at host:{} port:{}".format(redis_host, redis_port))
cache.ObjectCache.cache_class = cache.RedisCache
cache.ObjectCache.cache_params = {'redis_host': redis_host, 'redis_port': redis_port}
elif cache_dir is not None:
self.logger.info("Activating File")
cache.ObjectCache.cache_class = cache.DBMCache
cache.ObjectCache.cache_params = {'path':cache_dir,'name':'data_api'}
else:
self.logger.info("Not activating REDIS")
#END_CONSTRUCTOR
pass
def get_parent(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_parent
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_parent(ref_only=True)
#END get_parent
# At some point might do deeper type checking...
if not isinstance(returnVal, basestring):
raise ValueError('Method get_parent return value ' +
'returnVal is not type basestring as required.')
# return the results
return [returnVal]
def get_children(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_children
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_children(ref_only=True)
#END get_children
# At some point might do deeper type checking...
if not isinstance(returnVal, list):
raise ValueError('Method get_children return value ' +
'returnVal is not type list as required.')
# return the results
return [returnVal]
def get_genome_annotations(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_genome_annotations
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_genome_annotations(ref_only=True)
#END get_genome_annotations
# At some point might do deeper type checking...
if not isinstance(returnVal, list):
raise ValueError('Method get_genome_annotations return value ' +
'returnVal is not type list as required.')
# return the results
return [returnVal]
def get_scientific_lineage(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_scientific_lineage
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_scientific_lineage()
#END get_scientific_lineage
# At some point might do deeper type checking...
if not isinstance(returnVal, list):
raise ValueError('Method get_scientific_lineage return value ' +
'returnVal is not type list as required.')
# return the results
return [returnVal]
def get_scientific_name(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_scientific_name
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_scientific_name()
#END get_scientific_name
# At some point might do deeper type checking...
if not isinstance(returnVal, basestring):
raise ValueError('Method get_scientific_name return value ' +
'returnVal is not type basestring as required.')
# return the results
return [returnVal]
def get_taxonomic_id(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_taxonomic_id
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_taxonomic_id()
#END get_taxonomic_id
# At some point might do deeper type checking...
if not isinstance(returnVal, int):
raise ValueError('Method get_taxonomic_id return value ' +
'returnVal is not type int as required.')
# return the results
return [returnVal]
def get_kingdom(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_kingdom
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_kingdom()
#END get_kingdom
# At some point might do deeper type checking...
if not isinstance(returnVal, basestring):
raise ValueError('Method get_kingdom return value ' +
'returnVal is not type basestring as required.')
# return the results
return [returnVal]
def get_domain(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_domain
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_domain()
#END get_domain
# At some point might do deeper type checking...
if not isinstance(returnVal, basestring):
raise ValueError('Method get_domain return value ' +
'returnVal is not type basestring as required.')
# return the results
return [returnVal]
def get_genetic_code(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_genetic_code
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_genetic_code()
#END get_genetic_code
# At some point might do deeper type checking...
if not isinstance(returnVal, int):
raise ValueError('Method get_genetic_code return value ' +
'returnVal is not type int as required.')
# return the results
return [returnVal]
def get_aliases(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_aliases
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_aliases()
#END get_aliases
# At some point might do deeper type checking...
if not isinstance(returnVal, list):
raise ValueError('Method get_aliases return value ' +
'returnVal is not type list as required.')
# return the results
return [returnVal]
def get_info(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_info
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_info()
#END get_info
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method get_info return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def get_history(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_history
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_history()
#END get_history
# At some point might do deeper type checking...
if not isinstance(returnVal, list):
raise ValueError('Method get_history return value ' +
'returnVal is not type list as required.')
# return the results
return [returnVal]
def get_provenance(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_provenance
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_provenance()
#END get_provenance
# At some point might do deeper type checking...
if not isinstance(returnVal, list):
raise ValueError('Method get_provenance return value ' +
'returnVal is not type list as required.')
# return the results
return [returnVal]
def get_id(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_id
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_id()
#END get_id
# At some point might do deeper type checking...
if not isinstance(returnVal, int):
raise ValueError('Method get_id return value ' +
'returnVal is not type int as required.')
# return the results
return [returnVal]
def get_name(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_name
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_name()
#END get_name
# At some point might do deeper type checking...
if not isinstance(returnVal, basestring):
raise ValueError('Method get_name return value ' +
'returnVal is not type basestring as required.')
# return the results
return [returnVal]
def get_version(self, ctx, ref):
# ctx is the context object
# return variables are: returnVal
#BEGIN get_version
taxon_api = doekbase.data_api.taxonomy.taxon.api.TaxonAPI(self.services, ctx['token'], ref)
returnVal=taxon_api.get_version()
#END get_version
# At some point might do deeper type checking...
if not isinstance(returnVal, basestring):
raise ValueError('Method get_version return value ' +
'returnVal is not type basestring as required.')
# return the results
return [returnVal]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK", 'message': "", 'version': self.VERSION,
'git_url': self.GIT_URL, 'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| 1.820313
| 2
|
models/head/box_head.py
|
nota-github/ssd_tf2
| 8
|
12785071
|
import tensorflow as tf
from utils import box_utils
from models import registry
from .loss import MultiBoxLoss
from .inference import PostProcessor
from models.head.box_predictor import make_box_predictor
from models.anchors.prior_box import PriorBox
@registry.BOX_HEADS.register('SSDBoxHead')
class SSDBoxHead(tf.keras.layers.Layer):
def __init__(self, cfg):
super(SSDBoxHead, self).__init__()
self.cfg = cfg
self.predictor = make_box_predictor(cfg)
self.loss_evaluator = MultiBoxLoss(neg_pos_ratio=cfg.MODEL.NEG_POS_RATIO)
self.post_processor = PostProcessor(cfg)
self.priors = None
def call(self, features, targets=None):
cls_logits, bbox_pred = self.predictor(features) # (batch_size, num_priors, num_C) | (batch_size, num_priors, 4)
if targets is not None:
return self._call_train(cls_logits, bbox_pred, targets)
return self._call_test(cls_logits, bbox_pred)
def _call_train(self, cls_logits, bbox_pred, targets):
gt_boxes, gt_labels = targets
reg_loss, cls_loss = self.loss_evaluator(cls_logits, bbox_pred, gt_labels, gt_boxes)
return reg_loss, cls_loss
def _call_test(self, cls_logits, bbox_pred):
if self.priors is None:
self.priors = PriorBox(self.cfg)()
scores = tf.keras.activations.softmax(cls_logits, axis=2)
boxes = box_utils.convert_locations_to_boxes(bbox_pred, self.priors , self.cfg.MODEL.CENTER_VARIANCE, self.cfg.MODEL.SIZE_VARIANCE)
boxes = box_utils.center_form_to_corner_form(boxes)
detections = (scores, boxes)
detections = self.post_processor(detections)
return detections
| 2.015625
| 2
|
mahjong/ai/test/test_mahjong_comb.py
|
feiyaaaa/mahjong
| 0
|
12785072
|
import pymysql
import sys
from mahjong.ai.comb.perm_comb_mahjong import PermCombMahjongGenerator
Tiles = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
db = pymysql.connect(host='127.0.0.1', user='root',
password='<PASSWORD>', db='mahjong', port=3306, charset='utf8')
cursor = db.cursor()
raw_sql = """INSERT INTO comb_chain(hands_comb,
search_chain)
VALUES ('{0}', '')
ON DUPLICATE KEY UPDATE search_chain = ''"""
comb_gen = PermCombMahjongGenerator(Tiles, 13, end_point=1, start_comb=[0, 0, 0, 0, 1, 1, 1, 1, 2, 3, 4, 5, 6])
comb = comb_gen.next()
i = 1
while comb is not None:
i += 1
if i % 1000 == 0:
print(comb)
comb_str = ""
comb_str_list = []
for tile in comb:
comb_str_list.append(tile.__str__()+",")
comb_str = ''.join(comb_str_list)
comb_str = comb_str[:-1]
s = raw_sql.format(comb_str)
try:
# 执行sql语句
cursor.execute(s)
# 提交到数据库执行
db.commit()
except Exception:
# 如果发生错误则回滚
db.rollback()
print("wrong")
print(sys.exc_info()[0], sys.exc_info()[1])
comb = comb_gen.next()
| 2.6875
| 3
|
catalog/Marshall PY/plugins/games.py
|
derwear/bots.hub
| 8
|
12785073
|
<reponame>derwear/bots.hub
import datetime, random, time
from kutana.vksm import *
from kutana import Plugin
from kutana.database import *
import aiohttp, json, re, xmltodict
plugin = Plugin(name="other")
cases = (2, 0, 1, 1, 1, 2)
def plural_form(n: int, v: (list, tuple), need_n=False, need_cases=False):
"""Функция возвращает число и просклонённое слово после него
Аргументы:
:param n: число
:param v: варианты слова в формате (для 1, для 2, для 5)
Пример:
plural_form(difference.days, ("день", "дня", "дней"))
:return: Число и просклонённое слово после него
"""
return f"{n if need_n is False else ''} {v[2 if (4 < n % 100 < 20) else cases[min(n % 10, 5)]] if need_cases is False else ''}"
async def get_diff(a, b):
if a < b:
return int(((b-a)/a) * 100)
elif a > b:
return int(((a-b)/a) * 100)
elif a == b:
return 0
@plugin.on_startup()
async def on_startup(kutana, update):
plugin.dict = {}
plugin.min_bet = 50
plugin.positive = random.choice(['😊','😉','😃','😋','😏','😄'])
plugin.negative = random.choice(['😩','😰','😒','😔','😢'])
async def get_or_create_profile(user_id):
try:
shopcenters = (
shopcenter
.select()
)
job = (
jobs.select()
)
profiles = (
Profile
.select()
.where(Profile.user_id == user_id
)
)
profile = list(await db.prefetch(profiles, shopcenters, job))[0]
except IndexError:
profile = await peewee_async.create_object(Profile, user_id=user_id)
return profile
def text_to_value(value, text):
value2 = 1000
if text == 'к' or text == 'k':
return int(value) * int(value2)
if text == 'кк' or text == 'kk':
return int(value) * (int(value2) ** 2)
if text == 'ккк' or text == 'kkk':
return int(value) * (int(value2) ** 3)
if text == 'кккк' or text == 'kkkk':
return int(value) * (int(value2) ** 4)
if text == 'ккккк' or text == 'kkkkk':
return int(value) * (int(value2) ** 5)
if text == 'кккккк' or text == 'kkkkkk':
return int(value) * (int(value2) ** 6)
if text == 'ккккккк' or text == 'kkkkkkk':
return int(value) * (int(value2) ** 7)
if text == 'кккккккк' or text == 'kkkkkkkk':
return int(value) * (int(value2) ** 8)
return int(value)
def humanize(value):
return "{:,}".format(round(value)).replace(",",".")
@plugin.on_startswith_text('шар')
async def shar(msg,ats,env):
if not env.body:
return
answers2 = '''Абсолютно точно!
Да.
Нет.
Бесспорно.
Никаких сомнений.
Определённо да.
Пока не ясно, попробуй снова.
Предрешено.
Скорее да, чем нет.
Сконцентрируйся и спроси опять.
Не уверен...
Сейчас нельзя предсказать.
Однозначно нет!
Можешь быть уверен в этом.
Перспективы не очень хорошие.
А как же иначе?.
Знаки говорят — «да».
Не знаю.
Мой ответ — «нет».
Весьма сомнительно.
Ну может быть.
Приличный вопрос задай :c
Не могу дать точный ответ.
'''.splitlines()
return await env.reply("🔮" + random.choice(answers2).lower())
@plugin.on_startswith_text('переверни')
async def invert(msg,ats,env):
flipTable = {
'a' : '\u0250',
'b' : 'q',
'c' : '\u0254',
'd' : 'p',
'e': '\u01DD',
'f': '\u025F',
'g' : '\u0183',
'h' : '\u0265',
'i' : '\u0131',
'j' : '\u027E',
'k' : '\u029E',
'l' : '\u0283',
'm' : '\u026F',
'n' : 'u',
'r' : '\u0279',
't' : '\u0287',
'v' : '\u028C',
'w' : '\u028D',
'y' : '\u028E',
'.' : '\u02D9',
'[' : ']',
'(' : ')',
'{' : '}',
'?' : '\u00BF',
'!' : '\u00A1',
"\'" : ',',
'<' : '>',
'_' : '\u203E',
'\u203F' : '\u2040',
'\u2045' : '\u2046',
'\u2234' : '\u2235',
'\r' : '\n',
'а' : 'ɐ',
'б' : 'ƍ',
'в' : 'ʚ',
'г' : 'ɹ',
'д' : 'ɓ',
'е' : 'ǝ',
'ё' : 'ǝ',
'ж' : 'ж',
'з' : 'ε',
'и' : 'и',
'й' : 'ņ',
'к' : 'ʞ',
'л' : 'v',
'м' : 'w',
'н' : 'н',
'о' : 'о',
'п' : 'u',
'р' : 'd',
'с' : 'ɔ',
'т' : 'ɯ',
'у' : 'ʎ',
'ф' : 'ф',
'х' : 'х',
'ц' : 'ǹ',
'ч' : 'Һ',
'ш' : 'm',
'щ' : 'm',
'ъ' : 'q',
'ы' : 'ıq',
'ь' : 'q',
'э' : 'є',
'ю' : 'oı',
'я' : 'ʁ'
}
if not env.body:
return
result = ""
for word in env.body:
result += flipTable.get(word, word)
return await env.reply(result)
@plugin.on_startswith_text('инфа','шанс','вероятность')
async def info(msg,ats,env):
if not env.body:
return
answers2 = '''17e^3pi/-e%*6,0(3)... Я тут прикинул
Хуерятность
Ровно
Примерно
Звезды говорят, что вероятность
Почти
Наверное
'''.splitlines()
num = random.randint(1, 100)
return await env.reply(f"{random.choice(answers2)} {str(num)}%")
@plugin.on_text('анекдот')
async def joke(msg,ats,env):
async with aiohttp.ClientSession() as sess:
async with sess.get(f"http://nextjoke.net/Api/GetJoke?format=JSONP&ratingMin=100&NOCACHE={time.time()}") as resp:
html = await resp.text()
try:
html = json.loads(html.replace("window.JokeWidget.parseResponse(", "", 1)[:-2])["text"]
except:
return await env.reply("Сегодня без шуток ;(")
html = re.sub("(\n|^| )-([A-Za-zА-Яа-я])", "- \\2", html)
return await env.reply(html.replace("\r", ""))
@plugin.on_text('онлайн')
async def members(msg,ats,env):
if msg.peer_id > 2000000000:
all_users = await env.request('messages.getConversationMembers', peer_id=msg.peer_id, fields='online')
text = ""
x = 0
numerate = 1
if not all_users.response:
return await env.reply('назначьте меня администратором и попробуйте еще раз.')
for user in all_users.response['profiles']:
if 'online' in user and user['online'] == 1:
name = await parse_user_name(env, user['id'])
text += f"{numerate}. [id{user['id']}|{name}]\n"
numerate += 1
x += 1
plural = plural_form(x, ('пользователя', 'пользователи', 'пользователей'), need_n=True)
await env.reply(f'онлайн пользователи ({x} из {all_users.response["count"]}):\n' + text)
else:
await env.reply("эту команду можно использовать только в беседе.")
@plugin.on_startswith_text('дата')
async def datareg(msg,ats,env):
async with aiohttp.ClientSession() as sess:
data = await parse_user_id(msg, env)
check = 0
if not data:
data = msg.from_id
check = 1
url = f"http://vk.com/foaf.php?id={data[0] if check == 0 else data}"
async with sess.get(url) as resp:
xml = xmltodict.parse(await resp.text())
items = xml["rdf:RDF"]["foaf:Person"]
item = items
reg = item["ya:created"]
res = reg["@dc:date"].split("T")[0]
year = res.split("-")[0]
mounth = res.split("-")[1].replace("01", "января").replace("02", "февраля").replace("03", "марта").replace( "04","апреля").replace( "05","мая").replace( "06", "июня").replace( "07", "июля").replace( "08", "августа").replace( "09", "сентября").replace("10","октября").replace("11","ноября").replace("12","декабря")
day = res.split("-")[2]
await env.reply(f'{item["foaf:name"]}\n'
f'🚀 Дата регистрации: {day} {mounth} {year} года')
@plugin.on_startswith_text('трейд')
async def trade(msg,ats,env):
if not env.body:
return await env.reply(f'"Бинарный опцион"\n• Суть игры: Бинарный опцион используется для получения прибыли на движениях цены активов (в нашем случае валют) на мировых финансовых рынках. Пользователь делает прогноз как изменится цена его актива.\n• Помощь: Для того, чтобы начать игру, введите \"трейд [прогноз (вверх/вниз)] [ставка]\"')
try:
forecast, bet = env.body.split(" ")
except Exception as e:
return await env.reply(f"Недостаточно аргументов.\nДля того, чтобы начать игру, введите \"трейд [прогноз (вверх/вниз)] [ставка]\"")
if not forecast.lower() in ('вверх','вниз'):
return
p = await get_or_create_profile(msg.from_id)
try:
value = re.findall(r'\d+', bet)
text = re.sub(r'[^\w\s]+|[\d]+', r'',bet).strip()
result = text_to_value(value[0], text)
except:
return await env.reply('Что-то пошло не так.')
if int(result) > p.money:
return await env.reply("Недостаточно средств на счету.")
if int(result) < 50:
return await env.reply('минимальная ставка - 50$')
course_money = 100
res = course_money + random.randint(-50, 50)
data = await get_diff(res, course_money)
p.money -= int(result)
if forecast == "вверх" and res >= course_money or forecast == "вниз" and res <= course_money:
p.money += int(result) + int(result)
vk_message = f'курс акции {"подорожал⤴" if res >= course_money else "подешевел⤵"} на {data}$\n{"✔ Вы получили" if forecast == "вверх" and res >= course_money or forecast == "вниз" and res <= course_money else "❌ Вы потеряли"}: {humanize(int(result))}$\n💰 | Ваш баланс: {humanize(p.money)}$\n'
await db.update(p)
return await env.reply(vk_message)
@plugin.on_startswith_text('кубик')
async def cube(msg,ats,env):
if not env.body or not env.body.isdigit():
return await env.reply('для игры в кубик используйте команду "Кубик [1-6]"')
bot_choice = random.randint(1,6)
if int(env.body) == int(bot_choice):
money = random.randint(2000, 10000)
p = await get_or_create_profile(msg.from_id)
p.money += money
await db.update(p)
return await env.reply(f'вы угадали! Приз {humanize(money)}$ {plugin.positive}')
else:
return await env.reply(f'вы не угадали {plugin.negative}\n🎲 Выпало число {bot_choice}')
@plugin.on_startswith_text('стаканчик')
async def stakan(msg,ats,env):
if not env.body:
return await env.reply('для игры в стаканчик используйте команду "Стаканчик [1-3] [сумма]"')
try:
choice, bet = env.body.split(" ")
if not choice.isdigit() or int(choice) > 3 or int(choice) < 1:
raise ValueError
except Exception as e:
return await env.reply('для игры в стаканчик используйте команду "Стаканчик [1-3] [сумма]"')
try:
value = re.findall(r'\d+', bet)
text = re.sub(r'[^\w\s]+|[\d]+', r'',bet).strip()
result = text_to_value(value[0], text)
except:
return await env.reply('Что-то пошло не так.')
p = await get_or_create_profile(msg.from_id)
if int(result) > p.money:
return await env.reply("Недостаточно средств на счету.")
if int(result) < 50:
return await env.reply('минимальная ставка - 50$')
bot_choice = random.randint(1,3)
if int(choice) == int(bot_choice):
money = random.randint(100,2000)
p.money += Decimal(int(result) + int(money))
await db.update(p)
return await env.reply(f'вы угадали! Приз {humanize(int(result) + int(money))}$ {plugin.positive}')
else:
p.money -= Decimal(result)
await db.update(p)
return await env.reply(f'вы не угадали, это был {bot_choice} стаканчик {plugin.negative}')
| 2.25
| 2
|
plugins/niux2_hermit_player/__init__.py
|
sdphome/blog-content
| 0
|
12785074
|
# -*- coding: UTF-8 -*-
from .niux2_hermit_player import *
| 0.917969
| 1
|
Crypt/Crypt/Crypt.py
|
wtglover/TOR
| 0
|
12785075
|
from Crypto.PublicKey import RSA
from Crypto.Signature import pss
from Crypto.Hash import SHA256
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
from os import urandom
import struct
import logging
import sys
crypt_logger = logging.getLogger("Crypt")
crypt_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
crypt_logger.addHandler(ch)
sym_logger = logging.getLogger("Symmetric")
sym_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
sym_logger.addHandler(ch)
MAX_MSG_LEN = 214 # determined manually for RSA2048 key, padding with PKCS1_OAEP
KEY_SIZE = 2048
class Crypt(object):
PUB_DER_LEN = len(RSA.generate(KEY_SIZE).publickey().exportKey('DER'))
def __init__(self, private_key=None, public_key=None, name='', debug=False):
self.public_key = public_key
self._private_key = private_key
self._name = name
self._debug = debug
def log(self, message):
if self._debug:
crypt_logger.debug(message)
def generate_key(self):
return RSA.generate(KEY_SIZE)
def available(self):
return not (self.public_key is None or self._private_key is None)
def setPublicKey(self, publicKey):
self.public_key = publicKey
def sign_and_encrypt(self, data):
cipher = PKCS1_OAEP.new(self.public_key)
self.log("Signing with own key %s" % self._private_key.publickey().exportKey(format="DER").encode('hex')[66:74])
self.log("Encrypting with %s's key %s" % (self._name, self.public_key.exportKey(format="DER").encode('hex')[66:74]))
signature = pss.new(self._private_key).sign(SHA256.new(data))
# print signature.encode('hex')[:16], signature.encode('hex')[-16:], data.encode('hex')[:16], data.encode('hex')[-16:]
data = signature + data
message = ""
i = 0
while i * MAX_MSG_LEN < len(data):
message += cipher.encrypt(data[i * MAX_MSG_LEN : (i + 1) * MAX_MSG_LEN])
i += 1
return message
def decrypt(self, message):
self.log("Decrypting with own key %s" % self._private_key.publickey().exportKey(format="DER").encode('hex')[66:74])
cipher = PKCS1_OAEP.new(self._private_key)
chunk_size = KEY_SIZE / 8
data = ""
i = 0
while chunk_size * i < len(message):
chunk = message[i * chunk_size : (i + 1) * chunk_size]
data += cipher.decrypt(chunk)
i += 1
# print data[:256].encode('hex')[:16], data[:256].encode('hex')[-16:], data[256:].encode('hex')[:16], data[256:].encode('hex')[-16:]
return data[256:], data[:256]
def auth(self, data, hash):
self.log("Checking signature with %s's key %s" % (self._name, self.public_key.exportKey(format="DER").encode('hex')[66:74]))
verifier = pss.new(self.public_key)
verifier.verify(SHA256.new(data), hash)
# raises error if verification fails
def decrypt_and_auth(self, message):
data, hash = self.decrypt(message)
self.auth(data, hash)
return data
class BadSID(Exception):
pass
class MACMismatch(Exception):
pass
class Symmetric(object):
CRYPT_HEADER_LEN = 16 * 5
HEADER_LEN = 16
FULL_HEADER = CRYPT_HEADER_LEN + HEADER_LEN
STATUS_OK = "OKOK"
STATUS_EXIT = "EXIT"
def __init__(self, key='', sid="\00"*8, debug=False):
self.raw_key = key
self.sid = sid
self.key = None
self.salt = None
self.head_nonce = None
self.head_tag = None
self.body_nonce = None
self.body_tag = None
self._debug = debug
def log(self, message):
if self._debug:
sym_logger.debug(message)
@staticmethod
def generate():
return urandom(16)
def unpack_payload(self, payload):
return (payload[:self.CRYPT_HEADER_LEN],
payload[self.CRYPT_HEADER_LEN:self.FULL_HEADER],
payload[self.FULL_HEADER:])
def absorb_crypto_header(self, header):
"""Absorbs the cryptographic information in the crypto header
Args:
header (str): 80B cryptographic header
"""
self.salt, self.head_tag, self.head_nonce, self.body_tag, self.body_nonce \
= [header[i:i+16] for i in range(0, 16 * 5, 16)]
def decrypt_header(self, header):
"""Decrypts and authenticates the packet header
Args:
header (str): 16B header
Returns:
(int, str): number of bytes to come and status message
Raises:
MACMismatch: data authentication failed
BadSID: SID doesn't match
"""
self.log("Decrypting header with key %s" % repr(self.raw_key.encode('hex')))
self.log("\nkey: %s\nsalt: %s\nnonce: %s\ntag: %s\nheader: %s"
% (repr(self.raw_key.encode('hex')),
repr(self.salt.encode('hex')),
repr(self.head_nonce.encode('hex')),
repr(self.head_tag.encode('hex')),
repr(header.encode('hex'))))
self.log("ENCHEAD: %s" % repr(header))
key = PBKDF2(self.raw_key, self.salt)
cipher = AES.new(key, AES.MODE_GCM, self.head_nonce)
cipher.update(self.sid)
header = cipher.decrypt_and_verify(header, self.head_tag)
try:
pass
except ValueError:
raise MACMismatch
num_bytes, status, sid = struct.unpack("!L4s8s", header)
if self.sid != sid:
print self.sid.encode("hex")
print sid.encode('hex')
raise BadSID
return num_bytes, status
def decrypt_body(self, body):
"""Decrypts and authenticates the packet header
Args:
body (str): data (multiple of 16B)
Returns:
str: decrypted and authenticated data
Raises:
MACMismatch: data authentication failed
"""
self.log("Decrypting body with key %s" % repr(self.raw_key.encode('hex')))
key = PBKDF2(self.raw_key, self.salt)
cipher = AES.new(key, AES.MODE_GCM, self.body_nonce)
cipher.update(self.sid)
try:
return cipher.decrypt_and_verify(body, self.body_tag)
except ValueError:
raise MACMismatch
def encrypt_payload(self, data, status):
"""Encrypts and data and formats into packet
Args:
data (str): data to encrypt
status (str): 4B status string
Returns:
str: encrypted data
"""
self.log("Encrypting body with key %s" % repr(self.raw_key.encode('hex')))
# encrypt body
salt = get_random_bytes(16)
key = PBKDF2(self.raw_key, salt)
cipher = AES.new(key, AES.MODE_GCM)
cipher.update(self.sid)
ct, body_tag = cipher.encrypt_and_digest(data)
body_nonce = cipher.nonce
# build header
header = struct.pack("!L4s8s", len(ct), status, self.sid)
# encrypt header
cipher = AES.new(key, AES.MODE_GCM)
cipher.update(self.sid)
header, head_tag = cipher.encrypt_and_digest(header)
head_nonce = cipher.nonce
crypto_head = salt + head_tag + head_nonce + body_tag + body_nonce
self.log("\nkey: %s\nsalt: %s\nnonce: %s\ntag: %s\nheader: %s"
% (repr(self.raw_key.encode('hex')),
repr(salt.encode('hex')),
repr(head_nonce.encode('hex')),
repr(head_tag.encode('hex')),
repr(header.encode('hex'))))
self.log("SYM HEADER: '%s...%s'" % (crypto_head.encode('hex')[:8],
header.encode('hex')[-8:]))
self.log("ENCHEAD: %s" % repr(header))
return crypto_head + header + ct
def test():
key1 = Crypt().generate_key()
key2 = Crypt().generate_key()
crypt1 = Crypt(key1, key2.publickey(), debug=True)
crypt2 = Crypt(key2, key1.publickey(), debug=True)
message = "this is a test"
data = crypt1.sign_and_encrypt(message)
if crypt2.decrypt_and_auth(data) == message:
print "Test pass"
else:
raise TypeError('TEST DID NOT PASS')
def test_sym():
key = get_random_bytes(16)
sid = "12345678"
message = "This is the example message! " * 10
status = "OKOK"
c1 = Symmetric(key, sid)
packet = c1.encrypt_payload(message, status)
print c1.unpack_payload(packet)
crypt_header, header, body = c1.unpack_payload(packet)
c2 = Symmetric(key, sid)
c2.absorb_crypto_header(crypt_header)
print c2.decrypt_header(header)
print repr(c2.decrypt_body(body))
if __name__ == '__main__':
# test()
test_sym()
| 2.453125
| 2
|
sphinxcontrib/pylit/PylitFile.py
|
rblack42/sphinxcontrib-pylit
| 0
|
12785076
|
from sphinx.directives.code import CodeBlock
class PylitFile(CodeBlock):
def run(self):
caption = self.options.get('caption')
if not caption:
caption = ""
newcaption = '<<' + caption + '>>=='
self.options['caption'] = newcaption
# format the block and return
return super(PylitFile, self).run()
| 2.625
| 3
|
Incident-Response/Tools/grr/grr/client/grr_response_client/unprivileged/memory/client.py
|
sn0b4ll/Incident-Playbook
| 1
|
12785077
|
#!/usr/bin/env python
"""Unprivileged memory RPC client code."""
import abc
from typing import TypeVar, Generic
from grr_response_client.unprivileged import communication
from grr_response_client.unprivileged.proto import memory_pb2
class ConnectionWrapper:
"""Wraps a connection, adding protobuf serialization of messages."""
def __init__(self, connection: communication.Connection):
self._connection = connection
def Send(self, request: memory_pb2.Request) -> None:
self._connection.Send(
communication.Message(request.SerializeToString(), b""))
def Recv(self) -> memory_pb2.Response:
raw_response, _ = self._connection.Recv()
response = memory_pb2.Response()
response.ParseFromString(raw_response)
return response
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class OperationError(Exception):
"""Error while executing the operation."""
def __init__(self, message: str, formatted_exception: str):
"""Constructor.
Args:
message: the exception message
formatted_exception: the remote exception formatted using
traceback.format_exc()
"""
super().__init__(message)
self.formatted_exception = formatted_exception
RequestType = TypeVar("RequestType")
ResponseType = TypeVar("ResponseType")
class OperationHandler(abc.ABC, Generic[RequestType, ResponseType]):
"""Base class for RPC handlers."""
def __init__(self, connection: ConnectionWrapper):
self._connection = connection
def Run(self, request: RequestType) -> ResponseType:
self._connection.Send(self.PackRequest(request))
packed_response = self._connection.Recv()
if packed_response.HasField("exception"):
raise OperationError(packed_response.exception.message,
packed_response.exception.formatted_exception)
else:
response = self.UnpackResponse(packed_response)
return response
@abc.abstractmethod
def UnpackResponse(self, response: memory_pb2.Response) -> ResponseType:
"""Extracts an inner Response message from a response message."""
pass
@abc.abstractmethod
def PackRequest(self, request: RequestType) -> memory_pb2.Request:
"""Packs an inner Request message into a request message."""
pass
class UploadSignatureHandler(
OperationHandler[memory_pb2.UploadSignatureRequest,
memory_pb2.UploadSignatureResponse]):
"""Implements the UploadSignature RPC."""
def UnpackResponse(
self,
response: memory_pb2.Response) -> memory_pb2.UploadSignatureResponse:
return response.upload_signature_response
def PackRequest(
self, request: memory_pb2.UploadSignatureRequest) -> memory_pb2.Request:
return memory_pb2.Request(upload_signature_request=request)
class ProcessScanHandler(OperationHandler[memory_pb2.ProcessScanRequest,
memory_pb2.ProcessScanResponse]):
"""Implements the ProcessScan RPC."""
def UnpackResponse(
self, response: memory_pb2.Response) -> memory_pb2.ProcessScanResponse:
return response.process_scan_response
def PackRequest(self,
request: memory_pb2.ProcessScanRequest) -> memory_pb2.Request:
return memory_pb2.Request(process_scan_request=request)
class Client:
"""Client for the RPC memory service."""
def __init__(self, connection: communication.Connection):
self._connection = ConnectionWrapper(connection)
def UploadSignature(self, yara_signature: str):
"""Uploads a yara signature to be used for this connection."""
request = memory_pb2.UploadSignatureRequest(yara_signature=yara_signature)
UploadSignatureHandler(self._connection).Run(request)
def ProcessScan(self, serialized_file_descriptor: int, offset: int, size: int,
timeout_seconds: int) -> memory_pb2.ProcessScanResponse:
"""Scans process memory.
Args:
serialized_file_descriptor: Serialized file descriptor for the process
memory. The file descriptor must be accessible by the server process.
offset: Offset in memory.
size: Size of memory to scan.
timeout_seconds: Timeout in seconds.
Returns:
A `ScanResult` proto.
"""
request = memory_pb2.ProcessScanRequest(
serialized_file_descriptor=serialized_file_descriptor,
offset=offset,
size=size,
timeout_seconds=timeout_seconds)
response = ProcessScanHandler(self._connection).Run(request)
return response
def CreateMemoryClient(connection: communication.Connection) -> Client:
"""Creates a memory client."""
return Client(connection)
| 2.578125
| 3
|
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/api/graphql/views.py
|
Anyesh/cookiecutter-flask-all-in-one
| 35
|
12785078
|
<gh_stars>10-100
from flask import Blueprint
from flask_graphql import GraphQLView
from {{cookiecutter.app_name}}.api.graphql.schemas import schema
bp = Blueprint('graphql', __name__)
def graphql():
view = GraphQLView.as_view('graphql', schema=schema, graphiql=True)
return view
bp.add_url_rule('/graphql', view_func=graphql())
| 2.0625
| 2
|
bbt_bpm/bbt_bpm/report/obsolescence_report/obsolescence_report.py
|
rathodjitendra/BBT-I2E
| 0
|
12785079
|
<reponame>rathodjitendra/BBT-I2E
# Copyright (c) 2013, Bakelite and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _
from frappe.utils import has_common
import json
from six import StringIO, string_types
from datetime import date
from frappe.utils import cstr, getdate, split_emails, add_days, today, get_last_day, get_first_day, month_diff, nowdate, cint, flt, date_diff
from six import iteritems
import datetime
from datetime import date
from dateutil.relativedelta import relativedelta
import copy
def execute(filters=None):
columns, data = get_columns(), get_data(filters)
return columns, data
def get_data(filters):
if filters.get("company"):
company_currency = erpnext.get_company_currency(filters.get("company"))
else:
company_currency = frappe.db.get_single_value("Global Defaults", "default_currency")
items = get_items(filters)
sle = get_stock_ledger_entries(filters, items)
iwb_map = get_item_warehouse_map(filters, sle)
item_map = get_item_details(items, sle, filters)
item_reorder_detail_map = get_item_reorder_details(item_map.keys())
stock_qty=stock_qty_before_some_month(filters)
data = []
conversion_factors = {}
_func = lambda x: x[1]
for (company, item, warehouse) in sorted(iwb_map):
if item_map.get(item):
qty_dict = iwb_map[(company, item, warehouse)]
item_reorder_level = 0
item_reorder_qty = 0
if item + warehouse in item_reorder_detail_map:
item_reorder_level = item_reorder_detail_map[item + warehouse]["warehouse_reorder_level"]
item_reorder_qty = item_reorder_detail_map[item + warehouse]["warehouse_reorder_qty"]
last_stock_updated_date=frappe.db.sql("""SELECT item_code, posting_date from `tabStock Ledger Entry` where item_code='{0}' and company='{1}' and warehouse='{2}' ORDER BY posting_date DESC """.format(item, company, warehouse), as_dict=1)
if item in stock_qty and stock_qty.get(item).get("warehouse")==warehouse:
report_data = {
'currency': company_currency,
'item_code': item,
'warehouse': warehouse,
'company': company,
'reorder_level': item_reorder_level,
'reorder_qty': item_reorder_qty,
'last_stock_updated_date':last_stock_updated_date[0].get("posting_date") if last_stock_updated_date[0] else "",
'days_60':stock_qty.get(item).get(2, 0),
'days_90':stock_qty.get(item).get(3, 0),
'days_180':stock_qty.get(item).get(6, 0),
'year_1':stock_qty.get(item).get(12, 0),
'year_2':stock_qty.get(item).get(24, 0)
}
else:
report_data = {
'currency': company_currency,
'item_code': item,
'warehouse': warehouse,
'company': company,
'reorder_level': item_reorder_level,
'reorder_qty': item_reorder_qty,
'last_stock_updated_date':last_stock_updated_date[0].get("posting_date") if last_stock_updated_date[0] else ""
}
report_data.update(item_map[item])
report_data.update(qty_dict)
data.append(report_data)
return data
def get_stock_ledger_entries(filters, items):
item_conditions_sql = ''
if items:
item_conditions_sql = ' and sle.item_code in ({})'\
.format(', '.join([frappe.db.escape(i, percent=False) for i in items]))
conditions = get_conditions(filters)
return frappe.db.sql("""
select
sle.item_code, warehouse, sle.posting_date, sle.actual_qty, sle.valuation_rate,
sle.company, sle.voucher_type, sle.qty_after_transaction, sle.stock_value_difference,
sle.item_code as name, sle.voucher_no, sle.stock_value, sle.name
from
`tabStock Ledger Entry` sle force index (posting_sort_index)
where sle.docstatus < 2 %s %s
order by sle.posting_date, sle.posting_time, sle.creation, sle.actual_qty""" % #nosec
(item_conditions_sql, conditions), as_dict=1)
def get_conditions(filters):
conditions = ""
# if not filters.get("from_date"):
# frappe.throw(_("'From Date' is required"))
# if filters.get("to_date"):
# conditions += " and sle.posting_date <= %s" % frappe.db.escape(filters.get("to_date"))
# else:
# frappe.throw(_("'To Date' is required"))
if filters.get("company"):
conditions += " and sle.company = %s" % frappe.db.escape(filters.get("company"))
if filters.get("warehouse"):
warehouse_details = frappe.db.get_value("Warehouse",
filters.get("warehouse"), ["lft", "rgt"], as_dict=1)
if warehouse_details:
conditions += " and exists (select name from `tabWarehouse` wh \
where wh.lft >= %s and wh.rgt <= %s and sle.warehouse = wh.name)"%(warehouse_details.lft,
warehouse_details.rgt)
# if filters.get("warehouse_type") and not filters.get("warehouse"):
# conditions += " and exists (select name from `tabWarehouse` wh \
# where wh.warehouse_type = '%s' and sle.warehouse = wh.name)"%(filters.get("warehouse_type"))
return conditions
def get_item_warehouse_map(filters, sle):
iwb_map = {}
from_date = getdate(filters.get("from_date"))
to_date = getdate(filters.get("to_date"))
float_precision = cint(frappe.db.get_default("float_precision")) or 3
for d in sle:
key = (d.company, d.item_code, d.warehouse)
if key not in iwb_map:
iwb_map[key] = frappe._dict({
"opening_qty": 0.0, "opening_val": 0.0,
"in_qty": 0.0, "in_val": 0.0,
"out_qty": 0.0, "out_val": 0.0,
"bal_qty": 0.0, "bal_val": 0.0,
"val_rate": 0.0
})
qty_dict = iwb_map[(d.company, d.item_code, d.warehouse)]
if d.voucher_type == "Stock Reconciliation":
qty_diff = flt(d.qty_after_transaction) - flt(qty_dict.bal_qty)
else:
qty_diff = flt(d.actual_qty)
value_diff = flt(d.stock_value_difference)
# if d.posting_date < from_date:
# qty_dict.opening_qty += qty_diff
# qty_dict.opening_val += value_diff
# elif d.posting_date >= from_date and d.posting_date <= to_date:
# if flt(qty_diff, float_precision) >= 0:
# qty_dict.in_qty += qty_diff
# qty_dict.in_val += value_diff
# else:
# qty_dict.out_qty += abs(qty_diff)
# qty_dict.out_val += abs(value_diff)
qty_dict.val_rate = d.valuation_rate
qty_dict.bal_qty += qty_diff
qty_dict.bal_val += value_diff
iwb_map = filter_items_with_no_transactions(iwb_map, float_precision)
return iwb_map
def filter_items_with_no_transactions(iwb_map, float_precision):
for (company, item, warehouse) in sorted(iwb_map):
qty_dict = iwb_map[(company, item, warehouse)]
no_transactions = True
for key, val in iteritems(qty_dict):
val = flt(val, float_precision)
qty_dict[key] = val
if key != "val_rate" and val:
no_transactions = False
if no_transactions:
iwb_map.pop((company, item, warehouse))
return iwb_map
def get_item_details(items, sle, filters):
item_details = {}
if not items:
items = list(set([d.item_code for d in sle]))
if not items:
return item_details
cf_field = cf_join = ""
if filters.get("include_uom"):
cf_field = ", ucd.conversion_factor"
cf_join = "left join `tabUOM Conversion Detail` ucd on ucd.parent=item.name and ucd.uom=%s" \
% frappe.db.escape(filters.get("include_uom"))
res = frappe.db.sql("""
select
item.name, item.item_name, item.description, item.item_group, item.brand, item.stock_uom %s
from
`tabItem` item
%s
where
item.name in (%s)
""" % (cf_field, cf_join, ','.join(['%s'] *len(items))), items, as_dict=1)
for item in res:
item_details.setdefault(item.name, item)
if filters.get('show_variant_attributes', 0) == 1:
variant_values = get_variant_values_for(list(item_details))
item_details = {k: v.update(variant_values.get(k, {})) for k, v in iteritems(item_details)}
return item_details
def get_item_reorder_details(items):
item_reorder_details = frappe._dict()
if items:
item_reorder_details = frappe.db.sql("""
select parent, warehouse, warehouse_reorder_qty, warehouse_reorder_level
from `tabItem Reorder`
where parent in ({0})
""".format(', '.join([frappe.db.escape(i, percent=False) for i in items])), as_dict=1)
return dict((d.parent + d.warehouse, d) for d in item_reorder_details)
def get_items(filters):
conditions = []
if filters.get("item_code"):
conditions.append("item.name=%(item_code)s")
items = []
if conditions:
items = frappe.db.sql_list("""select name from `tabItem` item where {}"""
.format(" and ".join(conditions)), filters)
return items
def stock_qty_before_some_month(filters):
months_list = [2, 3, 6, 12, 24]
months_date = []
# months_date.append(getdate(today()))
for row in months_list:
_date = date.today() + relativedelta(months=-row)
months_date.append(getdate(_date))
new_list = {}
stock_dict = {}
for idx, v in enumerate(months_date):
month_len = len(months_date)-1
if month_len != idx:
sle_qty = frappe.db.sql("""select sle.item_code, sle.warehouse, sle.posting_date, ABS(sum(sle.actual_qty) - sum(sle.qty_after_transaction)) as bal_qty, sle.company from `tabStock Ledger Entry` sle where sle.company='{0}' and sle.posting_date BETWEEN '{1}' and '{2}' GROUP BY sle.item_code, sle.warehouse""".format(filters.get("company"), months_date[idx+1], v), as_dict=1)
else:
sle_qty = frappe.db.sql("""select distinct sle.item_code, sle.warehouse, sle.posting_date, ABS(sum(sle.actual_qty) - sum(sle.qty_after_transaction)) as bal_qty, sle.company from `tabStock Ledger Entry` sle where sle.company='{0}' and sle.posting_date < '{1}' GROUP BY sle.item_code, sle.warehouse""".format(filters.get("company"), v), as_dict=1)
for row in sle_qty:
if row.get("item_code") in stock_dict:
d=stock_dict[row.get("item_code")]
d[months_list[idx]]=row.get("bal_qty")
stock_dict[row.get("item_code")]=d
else:
row[months_list[idx]]=row.get("bal_qty")
stock_dict[row.get("item_code")] = row
return stock_dict
def get_columns():
return [
{
"label": _("Branch"),
"fieldname": "company",
"fieldtype": "Data",
"width": 150
},
{
"label": _("Warehouse"),
"fieldname": "warehouse",
"fieldtype": "Data",
"width": 120
},
{
"label": _("Book Code"),
"fieldname": "item_code",
"fieldtype": "Data",
"width": 120
},
{
"label": _("Book Description"),
"fieldname": "description",
"fieldtype": "Data",
"width": 120
},
{
"label": _("Last Stock Updated Date"),
"fieldname": "last_stock_updated_date",
"fieldtype": "Date",
"width": 150
},
{
"label": _("Available Stock"),
"fieldname": "bal_qty",
"fieldtype": "Data",
"width": 120
},
{
"label": _("0-30 Days"),
"fieldname": "bal_qty",
"fieldtype": "Data",
"width": 120
},
{
"label": _("60 Days"),
"fieldname": "days_60",
"fieldtype": "Data",
"width": 150
},
{
"label": _("90 Days"),
"fieldname": "days_90",
"fieldtype": "Data",
"width": 120
},
{
"label": _("180 Days"),
"fieldname": "days_180",
"fieldtype": "Data",
"width": 120
},
{
"label": _("1 Year"),
"fieldname": "year_1",
"fieldtype": "Data",
"width": 120
},
{
"label": _(">2 Year"),
"fieldname": "year_2",
"fieldtype": "Data",
"width": 150
}
]
| 1.710938
| 2
|
ubxlib/cid.py
|
albard00/ubxlib
| 3
|
12785080
|
<gh_stars>1-10
class UbxCID(object):
# UBX Class IDs
CLASS_NAV = 0x01
CLASS_ACK = 0x05
CLASS_CFG = 0x06
CLASS_UPD = 0x09
CLASS_MON = 0x0A
CLASS_ESF = 0x10
CLASS_MGA = 0x13
def __init__(self, cls, id):
super().__init__()
self.__cls = cls
self.__id = id
@property
def cls(self):
return self.__cls
@property
def id(self):
return self.__id
def __eq__(self, other):
return self.__cls == other.__cls and self.__id == other.__id
def __str__(self):
return f'cls:{self.__cls:02x} id:{self.__id:02x}'
def __hash__(self):
return hash((self.__cls, self.__id))
| 2.5
| 2
|
mpqa.py
|
trondth/master
| 0
|
12785081
|
<reponame>trondth/master<gh_stars>0
import ast
import os
import random
import re
from collections import OrderedDict
from future_builtins import zip
import shlex
from masters_project_config import *
def pairwise(iterable):
"""
@param iterable: List or other iterable
@return: List of tuples
"""
a = iter(iterable)
return zip(a, a)
class MPQA:
re_attr = re.compile(r'("[^"]*"|[\w-]+)')
def __init__(self, name="", mpqa_root=DATA_PREFIX):
self.docs = OrderedDict({})
self.name = name
self.mpqa_root = mpqa_root
if self.mpqa_root[:-1] != '/':
self.mpqa_root += '/'
def annotations_from_file(self, document):
"""
Returns list with sentence objects from a gatesentences.mpqa.2.0-file.
@param document: String refering to unique doc, example: 20010927/23.18.15-25073
@return: List of sentence objects.
"""
annotations = []
f = file(self.mpqa_root + document, 'r')
tmp = f.read()
f.close()
for tuple in self.getmpqatuples(document, 'annotations'):
annotations.append((tuple, tmp[tuple[1]]))
annotations.sort(key=lambda x: (x[1][1].start))
return annotations
def annotation_tuples_from_file(self, document):
"""
Returns list of tuples from a gateman.mpqa.2.0-file, sorted on start positions.
@param document: String refering to unique doc, example: 20010927/23.18.15-25073
@return: List of tuples from annotation file.
"""
annotations = []
f = file(self.mpqa_root + document, 'r')
tmp = f.read()
f.close()
for tuple in self.getmpqatuples(document, 'annotations'):
annotations.append(tuple)
#print annotations
annotations.sort(key=lambda x: (x[1].start))
#print annotations
return annotations
def sentences_from_file(self, document):
"""
Returns list with sentence objects from a gatesentences.mpqa.2.0-file.
@param document: String refering to unique doc, example: 20010927/23.18.15-25073
@return: List of sentence objects.
"""
sentences = []
f = file(self.mpqa_root + document, 'r')
tmp = f.read()
f.close()
for tuple in self.getmpqatuples(document, 'sentence'):
sentences.append((tmp[tuple[1]],tuple))
sentences.sort(key=lambda x: (x[1][1].start))
return sentences
def expandvariant(self, variant):
if variant == 'sentence':
return 'gatesentences.mpqa.2.0'
if variant == 'annotations':
return 'gateman.mpqa.lre.2.0'
def tuplefrommpqa(self, line):
"""
@param Line: from mpqatuples
@return: Tuple with slice object as second element
"""
tmp = line.split(None, 4)
tmp[1] = (lambda x: slice(int(x[0]), int(x[1])))(tmp[1].split(','))
d = {}
if len(tmp) > 4:
#print tmp[4].split() # = (lambda x: slice(int(x[0]), int(x[1])))(tmp[1].split(','))
tmp[4] = self.attributesfrommpqa(tmp[4])
return tmp
def attributesfrommpqa(self, attributes):
"""
@param attributes: String with attributes
@return: Dictionary with attributes
"""
tmp = self.re_attr.findall(attributes)
if len(tmp) % 2 != 0:
print "Attribute string in MPQA file not wellformed. ({})".format(attributes)
return {key: value.strip('"') for (key, value) in pairwise(tmp)}
re_docs = re.compile(r'/docs/')
def getmpqatuples(self, document, variant):
"""
@param document: String refering to unique doc, example: 20010927/23.18.15-25073
@param variant:
@return: List of 5-tuples from data in the MPQA-document
"""
variant = self.expandvariant(variant)
tuples = []
f = file(self.mpqa_root + self.re_docs.sub(r'/man_anns/', document) + '/' + variant, 'r')
for line in f:
if line[0] != '#':
tuples.append(self.tuplefrommpqa(line))
f.close()
return tuples
if __name__ == "__main__":
m = MPQA("Foo")
#testdoc = "database.mpqa.2.0/docs/20020510/21.50.13-28912"
#testdoc = "database.mpqa.2.0/docs/20010630/00.48.42-17806"
#testdoc = 'database.mpqa.2.0/docs/20020314/20.23.54-19638'
#testdoc = 'database.mpqa.2.0/docs/ula/20000410_nyt-NEW'
#testdoc = 'database.mpqa.2.0/docs/ula/20000815_AFP_ARB.0084.IBM-HA-NEW'
testdoc = 'database.mpqa.2.0/docs/20020315/20.42.26-19148'
#testdoc = 'database.mpqa.2.0/docs/20020331/21.09.25-22686'
s = m.getmpqatuples(testdoc, 'sentence')
s2 = m.annotation_tuples_from_file(testdoc)
#s = m.sentences_from_file(testdoc)
#print s
| 2.453125
| 2
|
bookyourcab/models.py
|
shankarj67/bookyouruber
| 0
|
12785082
|
from django.db import models
# Create your models here.
class Uber(models.Model):
source = models.CharField(max_length=150)
destination = models.CharField(max_length=150)
time = models.TimeField()
email = models.EmailField()
| 2.234375
| 2
|
0062 The Accountant.py
|
ansabgillani/binarysearchcomproblems
| 1
|
12785083
|
<filename>0062 The Accountant.py
class Solution:
def solve(self, n):
ans = []
while n > 0:
ans.append(chr(ord("A") + (n-1)%26))
n = (n-1)//26
ans.reverse()
return "".join(ans)
| 3.328125
| 3
|
pytorch/sje_gmpool.py
|
tchittesh/zsl-project
| 0
|
12785084
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from utils import normalizeFeaturesL2
class SJE_GMPool(nn.Module):
def __init__(self, img_feature_size, num_attributes, margin):
super(SJE_GMPool, self).__init__()
self.margin = margin
# copying initialization technique from original code
W = torch.rand(img_feature_size, num_attributes, requires_grad=True)
W = normalizeFeaturesL2(W.permute(1,0)).permute(1,0)
self.W = nn.Parameter(W, requires_grad=True)
power = torch.zeros(num_attributes, requires_grad=True)
self.power = nn.Parameter(power, requires_grad=True)
self.example_indices = random.choices(range(1000), k=2) # this is a hack
def get_power(self):
c = float(10)
p = self.power * 3
power = torch.zeros_like(p)
power[p>=2] = c
power = torch.where((0<=p)&(p<2), (c-1)/2*p+1, power)
power = torch.where((-1<=p)&(p<0), 1/((1-c)*p+1), power)
power = torch.where((-1.5<=p)&(p<-1), -1/(2*(c-1)*(p+1.5)+1), power)
power = torch.where((-2<=p)&(p<-1.5), 2*(c-1)*(p+2)-c, power)
power[p<-2] = -c
assert torch.all(power != 0)
return power
def apply_gmpool(self, projected_feats):
'''
projected_feats: torch.Tensor of shape [B, num_attributes, H, W]
returns pooled features of shape [B, num_attributes]
'''
m = projected_feats.min()
p = self.get_power().view(1,-1,1,1)
if m < 0:
pooled = (projected_feats-m+1e-3).pow(p).mean(2, keepdim=True).mean(3, keepdim=True).pow(1/p)+m+1e-3
else:
pooled = projected_feats.pow(p).mean(2, keepdim=True).mean(3, keepdim=True).pow(1/p)
return pooled.squeeze(2).squeeze(2)
def forward(self, *args, **kwargs):
if self.training:
return self.forward_train(*args, **kwargs)
else:
return self.forward_test(*args, **kwargs)
def forward_train(self, img_features, all_class_attributes, class_attributes, labels):
'''
img_features: torch.Tensor of shape [B, img_feature_size, H, W]
class_attributes: torch.Tensor of shape [B, num_attributes]
labels: torch.Tensor of shape [B]
all_class_attributes: torch.Tensor of shape [num_attributes, num_classes]
returns scalar loss
'''
XW = torch.tensordot(img_features, self.W, [[1],[0]]).permute(0,3,1,2) # shape [B, num_attributes, H, W]
XW = self.apply_gmpool(XW) # shape [B, num_attributes]
if torch.any(XW.isnan()):
print("YIKES")
XW = normalizeFeaturesL2(XW) # normalize each projected vector to have unit length
scores = torch.matmul(XW.unsqueeze(1), all_class_attributes).squeeze(1) # shape [B, num_classes]
gt_class_scores = scores[torch.arange(len(scores)), labels].unsqueeze(1) # shape [B, 1]
# add margin to scores
losses = self.margin + scores - gt_class_scores # shape [B, num_classes]
losses[torch.arange(len(losses)), labels] = 0.0
losses = losses.max(dim=1)[0] # shape [B]
return losses.clamp(0).mean()
def forward_test(self, img_features, all_class_attributes):
XW = torch.tensordot(img_features, self.W, [[1],[0]]).permute(0,3,1,2) # shape [B, num_attributes, H, W]
XW = self.apply_gmpool(XW) # shape [B, num_attributes]
if torch.any(XW.isnan()):
print("YIKES")
XW = normalizeFeaturesL2(XW) # normalize each projected vector to have unit length
scores = torch.matmul(XW.unsqueeze(1), all_class_attributes).squeeze(1) # shape [B, num_classes]
return scores.argmax(1) # shape [B]
def log_spatial_examples(self, dataloader, device, writer, split, epoch):
dataset = dataloader.dataset
self.eval()
classes = dataset.classes
for i, idx in enumerate(self.example_indices):
# unpack data
data = dataset[idx]
img_features = data['img'].to(device).unsqueeze(0)
gt_label = classes[data['label']]
all_class_attributes = dataset.class_attributes
gt_class_attributes = all_class_attributes[:,data['label']]
img = mpimg.imread(dataset.get_img_path(idx))
# forward pass
XW = torch.tensordot(img_features, self.W, [[1],[0]]).permute(0,3,1,2).squeeze() # shape [num_attributes, H, W]
for spatial_dist, gt_attribute_score, attribute_name in zip(XW, gt_class_attributes, dataset.attributes):
fig, (ax1,ax2) = plt.subplots(nrows=1,ncols=2)
ax1.set_title(f"Attribute: {attribute_name}\nGT Attribute Value: {gt_attribute_score:.4f}")
mappable = ax1.imshow(spatial_dist.cpu().detach().numpy(), vmin=-0.25, vmax=0.25)
fig.colorbar(mappable, ax=ax1)
ax2.set_title(f"Original Image({gt_label})")
ax2.imshow(img)
plt.tight_layout()
writer.add_figure(f"Spatial Examples ({split})/{attribute_name}-{i}", fig, epoch)
plt.close(fig)
| 2.28125
| 2
|
market/migrations/0008_auto_20161206_2042.py
|
zhanhailiu/Market
| 39
|
12785085
|
<filename>market/migrations/0008_auto_20161206_2042.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-06 12:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('market', '0007_goods_publish_time'),
]
operations = [
migrations.AlterField(
model_name='goods',
name='goods_phone',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='goods',
name='goods_qq',
field=models.IntegerField(blank=True, null=True),
),
]
| 1.390625
| 1
|
pyscreenshot/__init__.py
|
audreyr/pyscreenshot
| 1
|
12785086
|
<gh_stars>1-10
from easyprocess import EasyProcess
from pyscreenshot.backendloader import BackendLoader
from PIL import Image
import logging
import tempfile
import sys
__version__ = '0.3.2'
log = logging.getLogger(__name__)
log.debug('version=' + __version__)
def _grab(to_file, childprocess=False, backend=None, bbox=None, filename=None):
if childprocess:
log.debug('running "%s" in child process' % backend)
if not to_file:
f = tempfile.NamedTemporaryFile(
suffix='.png', prefix='pyscreenshot_childprocess_')
filename = f.name
params = ["'%s'" % (filename), 'childprocess=False']
if backend:
params += ["backend='%s'" % (backend)]
params = ','.join(params)
EasyProcess([sys.executable,
'-c',
"import pyscreenshot; pyscreenshot.grab_to_file(%s)" % (
params),
]).check()
if not to_file:
im = Image.open(filename)
if bbox:
im = im.crop(bbox)
return im
else:
if backend:
BackendLoader().force(backend)
backend_obj = BackendLoader().selected()
if to_file:
return backend_obj.grab_to_file(filename)
else:
return backend_obj.grab(bbox)
def grab(bbox=None, childprocess=False, backend=None):
'''Copy the contents of the screen to PIL image memory.
:param bbox: optional bounding box
:param childprocess: pyscreenshot can cause an error,
if it is used on more different virtual displays
and back-end is not in different process.
Some back-ends are always different processes: scrot, imagemagick
:param backend: back-end can be forced if set (examples:scrot, wx,..),
otherwise back-end is automatic
'''
return _grab(to_file=False, childprocess=childprocess, backend=backend, bbox=bbox)
def grab_to_file(filename, childprocess=False, backend=None):
'''Copy the contents of the screen to a file.
:param filename: file for saving
:param childprocess: see :py:func:`grab`
:param backend: see :py:func:`grab`
'''
return _grab(to_file=True, childprocess=childprocess, backend=backend, filename=filename)
| 2.328125
| 2
|
scripts/hyphy_simulated.py
|
ThibaultLatrille/NucleotideBias
| 0
|
12785087
|
<reponame>ThibaultLatrille/NucleotideBias
# GLOBAL IMPORTS
import argparse
import pandas as pd
from plot_module import *
from hyphy_format import *
import statsmodels.api as sm
from stat_simulated import stats_from_ali, open_fasta_file, omega_pairwise_from_profile
from scipy.linalg import null_space
def plot_pairwise_matrices(predicted, estimated, output, estimated_list=None):
if estimated_list is None: estimated_list = []
fig, axs = plt.subplots(1, 3, figsize=(16, 6))
cbar = fig.colorbar(axs[0].imshow(predicted), ax=axs[0], orientation='horizontal', fraction=.05)
cbar.ax.tick_params(labelsize=font_size)
axs[0].set_title('$\\left\\langle 2 N_{\\mathrm{e}} \\mathbb{P}_{\\mathrm{fix}} (x \\to y) \\right\\rangle $ '
'predicted between\npairs of amino-acids', fontsize=font_size * 1.2)
cbar = fig.colorbar(axs[1].imshow(estimated), ax=axs[1], orientation='horizontal', fraction=.05)
cbar.ax.tick_params(labelsize=font_size)
axs[1].set_title('$\\widehat{\\omega}_{x,y}$ estimated between\npairs of amino-acids', fontsize=font_size * 1.2)
for index in [0, 1]:
# We want to show all ticks...
axs[index].set_xticks(np.arange(len(amino_acids)))
axs[index].set_yticks(np.arange(len(amino_acids)))
# ... and label them with the respective list entries
axs[index].set_xticklabels(amino_acids)
axs[index].set_yticklabels(amino_acids)
# Loop over data dimensions and create text annotations.
'''
for i in range(len(amino_acids)):
for j in range(len(amino_acids)):
text = axs[index].text(j, i, "{0:.2f}".format(predicted[i, j] if index == 0 else estimated[i, j]),
ha="center", va="center", color="w")
'''
filt = np.isfinite(predicted) & np.isfinite(estimated)
x = predicted[filt].flatten()
y = estimated[filt].flatten()
axs[2].scatter(x, y)
axs[2].set_title('Fixation probabilities', fontsize=font_size * 1.2)
model = sm.OLS(y, sm.add_constant(x))
results = model.fit()
b, a = results.params[0:2]
idf = np.linspace(min(x), max(x), 100)
axs[2].plot(idf, a * idf + b, '-', color=BLUE,
label=r"$y={0:.2g}x {3} {1:.2g}$ ($r^2={2:.2g})$".format(a, abs(b), results.rsquared,
"+" if float(b) > 0 else "-"))
if estimated_list and len(estimated_list) > 5:
# yerr = np.array([y - np.percentile(estimated_list, 5, axis=0)[filt].flatten(), np.percentile(estimated_list, 95, axis=0)[filt].flatten() - y])
yerr = 1.96 * np.std(estimated_list, axis=0)[filt].flatten() / np.sqrt(len(estimated_list))
axs[2].errorbar(x, y, yerr=yerr, fmt='o', marker=None, mew=0, ecolor=BLUE, lw=0.5,
zorder=-1)
axs[2].set_xlabel('True $\\left\\langle 2 N_{\\mathrm{e}} \\mathbb{P}_{\\mathrm{fix}} (x \\to y) \\right\\rangle $',
fontsize=font_size * 1.2)
axs[2].set_ylabel('Estimated $\\widehat{\\omega}_{x,y}$', fontsize=font_size * 1.2)
axs[2].legend(fontsize=font_size * 0.8, loc="lower right")
fig.tight_layout()
plt.savefig(output + ".pdf", format="pdf")
plt.savefig(output + ".png", format="png")
plt.clf()
plt.close('all')
def plot_scaling(list_plot, outname, ylabel, plot_id=True, yscale="log", loc="lower right", legend_fontsize=14):
fig, ax = plt.subplots()
if plot_id:
ax.plot(lambda_mut, lambda_mut, color="black", linestyle='-', linewidth=2, label="y=x")
for param in list_plot:
y_list = np.array([np.mean(k[param["experiment"]]) for k in nested_dict.values()])
ax.plot(lambda_mut, y_list, linestyle=param["linestyle"], label=param["label"],
color=param["color"], linewidth=param["linewidth"])
reps_set_len = set([len(k[param["experiment"]]) for k in nested_dict.values()])
assert (len(reps_set_len) == 1)
reps_len = reps_set_len.pop()
if reps_len > 2:
std = np.array([np.std(k[param["experiment"]]) for k in nested_dict.values()])
yerr = 1.96 * std / np.sqrt(reps_len)
lower = y_list - yerr
upper = y_list + yerr
ax.fill_between(lambda_mut, lower, upper, alpha=0.3, color=param["color"], facecolor=param["color"])
lambda_inf = np.array([np.mean(k["lambda_inf"]) for k in nested_dict.values()])
out_file = open("{0}/{1}.tsv".format(args.output, outname), "w")
out_file.write("λ (precision in %)\n{0:.2f}%".format(100 * np.mean(np.abs((lambda_inf - lambda_mut)) / lambda_mut)))
out_file.close()
ax.set_xscale('log')
ax.set_xlabel('$\\lambda$ used for the simulation', fontsize=font_size)
ax.set_yscale(yscale)
ax.set_ylabel(ylabel, fontsize=font_size)
ax.get_xaxis().get_major_formatter().labelOnlyBase = False
ax.legend(fontsize=legend_fontsize, loc=loc)
model_name = {"GTR": "General time-reversible (GTR) on third positions", "MG": "Muse & Gaut codon model",
"MF": "Mean-field codon model"}
ax.set_title(model_name[args.model], fontsize=font_size)
ax.set_xticks([0.2, 1, 5])
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
if yscale == "log":
ax.set_yticks([0.2, 1, 5])
ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.xticks(fontsize=legend_size)
plt.yticks(fontsize=legend_size)
plt.tight_layout()
plt.savefig("{0}/{1}.pdf".format(args.output, outname), format="pdf")
plt.savefig("{0}/{1}.png".format(args.output, outname), format="png")
plt.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-o', '--output', required=True, type=str, dest="output")
parser.add_argument('-i', '--trace', required=True, type=str, nargs='+', dest="input")
parser.add_argument('-m', '--model', required=True, type=str, dest="model")
parser.add_argument('-n', '--mutation_matrix', required=False, type=str, default="False", dest="mutation_matrix")
args = parser.parse_args()
freqs = np.zeros(4) * 0.25
if args.mutation_matrix != "False":
df = pd.read_csv(args.mutation_matrix, sep="\t")
nuc_matrix = np.zeros((len(nucleotides), len(nucleotides)))
for id_s, nuc_source in enumerate(nucleotides):
for id_t, nuc_target in enumerate(nucleotides):
if nuc_source == nuc_target: continue
sub = "q_" + nuc_source + nuc_target
nuc_matrix[id_s, id_t] = df[sub].values[0]
nuc_matrix[id_s, id_s] -= nuc_matrix[id_s, id_t]
freqs = null_space(nuc_matrix.T).T[0]
freqs /= freqs.sum()
assert (np.sum(np.abs(np.dot(freqs, nuc_matrix))) < 1e-5)
nested_dict = defaultdict(lambda: defaultdict(lambda: list()))
predicted_dico = {}
estimated_array = []
for batch in args.input:
at_gc_pct = float(batch.split("/")[-1].split("_")[0])
replicate = int(batch.split("/")[-1].split("_")[1])
if args.mutation_matrix == "False":
freqs = [(at_gc_pct if nuc in weak_nucleotides else 1.0) for nuc in nucleotides]
freqs = np.array(freqs) / sum(freqs)
alpha = float(batch.split("/")[-2])
exp = batch.replace(args.model + "_run.bf_hyout.txt", "exp")
species, alignment = open_fasta_file(exp + (".ThirdPos.fasta" if args.model == "GTR" else ".fasta"))
ali_dico = stats_from_ali(alignment)
nested_dict[at_gc_pct]["AT/GC_obs"].append(ali_dico["at_over_gc"])
if args.model != "GTR":
nested_dict[at_gc_pct]["AT/GC_1_obs"].append(ali_dico["at_over_gc_1"])
nested_dict[at_gc_pct]["AT/GC_2_obs"].append(ali_dico["at_over_gc_2"])
nested_dict[at_gc_pct]["AT/GC_3_obs"].append(ali_dico["at_over_gc_3"])
hyphy_dico = dico_from_file(batch)
format_hyphy_dico(hyphy_dico, args.model)
if args.model == "MF":
profile_path = "/".join(batch.split("/")[:-1]) + "_profile.prefs"
key = profile_path + str(at_gc_pct)
if key not in predicted_dico:
predicted_dico[key] = omega_pairwise_from_profile(profile_path, freqs)
estimated_array.append(omega_pairwise_from_hyphy(hyphy_dico))
# plot_pairwise_matrices(predicted_dico[key], estimated_array[-1], "{0}/omega.{1}_{2}".format(args.output, at_gc_pct, replicate))
results_dico = {k: v[0] for k, v in pd.read_csv(exp + ".tsv", sep='\t').items()}
nested_dict[at_gc_pct]["w_obs"].append(results_dico["dnd0_event_tot"])
if args.model != "GTR":
nested_dict[at_gc_pct]["w_inf"].append(hyphy_dico["w"])
if ("pnG" in hyphy_dico) and ("pnG" in hyphy_dico):
gc_pct = hyphy_dico["pnG"] + hyphy_dico["pnC"]
nested_dict[at_gc_pct]["lambda_inf"].append((1 - gc_pct) / gc_pct)
atgc_tot, atgc_1, atgc_2, atgc_3 = equilibrium_lambda(hyphy_dico)
nested_dict[at_gc_pct]["AT/GC_inf"].append(atgc_tot)
nested_dict[at_gc_pct]["AT/GC_1_inf"].append(atgc_1)
nested_dict[at_gc_pct]["AT/GC_2_inf"].append(atgc_2)
nested_dict[at_gc_pct]["AT/GC_3_inf"].append(atgc_3)
if args.model == "MF":
predicted_mean = np.mean(list(predicted_dico.values()), axis=0)
estimated_mean = np.mean(estimated_array, axis=0)
plot_pairwise_matrices(predicted_mean, estimated_mean, "{0}/mean.omega".format(args.output), estimated_array)
if args.model != "GTR":
f = open("{0}/omega.tsv".format(args.output), "w")
f.write("ω (precision in %)" + ("\tλ\n" if len(nested_dict) > 1 else "\n"))
for lambda_mut in nested_dict.keys():
omega_obs = np.array(nested_dict[lambda_mut]["w_obs"])
omega_inf = np.array(nested_dict[lambda_mut]["w_inf"])
f.write("{0:.2f}".format(100 * np.mean(np.abs((omega_inf - omega_obs)) / omega_obs)) + (
"\t{0}".format(lambda_mut) if len(nested_dict) > 1 else ""))
f.close()
lambda_mut = list(nested_dict.keys())
if len(lambda_mut) < 1:
exit(0)
list_plots = list()
list_plots.append(
{"experiment": "AT/GC_obs", "color": BLUE, "linestyle": '-', "linewidth": 2, "label": "AT/GC observed"})
list_plots.append({"experiment": "lambda_inf", "color": GREEN, "linestyle": '--', "linewidth": 2,
"label": "$\\widehat{\\lambda}$ inferred"})
list_plots.append({"experiment": "AT/GC_inf", "color": YELLOW, "linestyle": '--', "linewidth": 2,
"label": "AT/GC predicted"})
plot_scaling(list_plots, "lambda", '$\\lambda$ estimated')
if args.model == "GTR":
exit(0)
list_plots = list()
list_plots.append({"experiment": "AT/GC_1_obs", "color": BLUE, "linestyle": '-', "linewidth": 2,
"label": "AT/GC observed"})
list_plots.append({"experiment": "AT/GC_1_inf", "color": YELLOW, "linestyle": '--', "linewidth": 3,
"label": "AT/GC predicted"})
plot_scaling(list_plots, "obs_atgc_1", "AT/GC at first position")
list_plots = list()
list_plots.append({"experiment": "AT/GC_2_obs", "color": BLUE, "linestyle": '-', "linewidth": 2,
"label": "AT/GC observed"})
list_plots.append({"experiment": "AT/GC_2_inf", "color": YELLOW, "linestyle": '--', "linewidth": 3,
"label": "AT/GC predicted"})
plot_scaling(list_plots, "obs_atgc_2", "AT/GC at second position")
list_plots = list()
list_plots.append({"experiment": "AT/GC_3_obs", "color": BLUE, "linestyle": '-', "linewidth": 2,
"label": "AT/GC observed"})
list_plots.append({"experiment": "AT/GC_3_inf", "color": YELLOW, "linestyle": '--', "linewidth": 2,
"label": "AT/GC predicted"})
plot_scaling(list_plots, "obs_atgc_3", "AT/GC at third position")
list_plots = list()
list_plots.append(
{"experiment": "w_obs", "color": BLUE, "linestyle": '-', "linewidth": 2, "label": "$\\omega$ of simulation"})
list_plots.append({"experiment": "w_inf", "color": GREEN, "linestyle": '--', "linewidth": 2,
"label": "$\\widehat{\\omega}$ inferred"})
plot_scaling(list_plots, "omega", '$\\omega$', plot_id=False, yscale="linear", loc="upper right")
| 2.5625
| 3
|
src/h_matchers/matcher/number.py
|
hypothesis/h-matcher
| 0
|
12785088
|
"""A collection of matchers for various number types."""
# pylint: disable=too-few-public-methods
from decimal import Decimal
from h_matchers.matcher.core import Matcher
class AnyNumber(Matcher):
"""Matches any number."""
_types = (int, float, complex, Decimal)
_type_description = "number"
def __init__(self):
self.conditions = []
super().__init__("dummy", self.assert_equal_to)
def assert_equal_to(self, other):
# Ints are also booleans
# pylint: disable=compare-to-zero
assert other is not True and other is not False, "Not a boolean"
# Check it's the right type
assert isinstance(other, self._types)
# Apply all the different conditions
for label, test in self.conditions:
assert test(other), label
return True
def not_equal_to(self, value):
"""Constrain this number to be not equal to a number."""
return self._add_condition(f"!= {value}", lambda other: other != value)
def truthy(self):
"""Constrain this number to be truthy."""
return self._add_condition("truthy", bool)
def falsy(self):
"""Constrain this number to be falsy."""
return self._add_condition("falsy", lambda other: not bool(other))
def _add_condition(self, description, test):
self.conditions.append((description, test))
return self
def __str__(self):
parts = [self._type_description]
for condition_label, _ in self.conditions:
parts.append(condition_label)
if len(parts) > 3:
parts[-1] = f"and {parts[-1]}"
return f"** any {', '.join(parts)} **"
class AnyReal(AnyNumber):
"""Matches any real number."""
_types = (int, float, Decimal)
# We're going to refer to this as just a "number" as it's what we're going
# to work on 99.9% of the time
_type_description = "number"
def less_than(self, value):
"""Constrain this number to be less than a number."""
return self._add_condition(f"<{value}", lambda other: other < value)
def less_than_or_equal_to(self, value):
"""Constrain this number to be less than or equal a number."""
return self._add_condition(f">={value}", lambda other: other <= value)
def greater_than(self, value):
"""Constrain this number to be greater than a number."""
return self._add_condition(f">{value}", lambda other: other > value)
def greater_than_or_equal_to(self, value):
"""Constrain this number to be greater than or equal to a number."""
return self._add_condition(f">={value}", lambda other: other >= value)
def multiple_of(self, value):
"""Constrain this number to be a multiple of a number."""
return self._add_condition(
f"multiple of {value}", lambda other: not other % value
)
def even(self):
"""Constrain this number to be even."""
return self.multiple_of(2)
def odd(self):
"""Constrain this number to be odd."""
return self._add_condition("odd", lambda other: other % 2 == 1)
def approximately(self, value, error_factor=0.05):
"""Constrain this number to be approximately a number."""
return self._add_condition(
f"~ {value} ({error_factor})",
lambda other: abs(value - other) <= error_factor * float(value),
)
def __lt__(self, value):
return self.less_than(value)
def __le__(self, value):
return self.less_than_or_equal_to(value)
def __gt__(self, value):
return self.greater_than(value)
def __ge__(self, value):
return self.greater_than_or_equal_to(value)
class AnyInt(AnyReal):
"""Matches any integer."""
_types = (int,)
_type_description = "integer"
class AnyFloat(AnyReal):
"""Matches any float."""
_types = (float,)
_type_description = "float"
class AnyDecimal(AnyReal):
"""Matches any Decimal."""
_types = (Decimal,)
_type_description = "decimal"
class AnyComplex(AnyNumber):
"""Matches any complex number."""
_types = (complex,)
_type_description = "complex"
| 3.109375
| 3
|
turbo/__init__.py
|
blopker/turbo-django
| 0
|
12785089
|
<gh_stars>0
from django.db.models import Model
default_app_config = 'turbo.apps.TurboDjangoConfig'
def make_channel_name(model_label, pk):
return f"BROADCAST-{model_label}-{pk}".lower()
def channel_name_for_instance(instance: Model):
return make_channel_name(instance._meta.label, instance.pk)
| 2
| 2
|
src/py/4.3.4-Task-API-Multi-label.py
|
saibaldas/automl-in-action-notebooks
| 1
|
12785090
|
<filename>src/py/4.3.4-Task-API-Multi-label.py
"""shell
pip install -r https://raw.githubusercontent.com/datamllab/automl-in-action-notebooks/master/requirements.txt
"""
import tensorflow as tf
import autokeras as ak
"""
### Create synthetic multi-label dataset
"""
from sklearn.datasets import make_multilabel_classification
X, Y = make_multilabel_classification(
n_samples=100,
n_features=64,
n_classes=3,
n_labels=2,
allow_unlabeled=False,
random_state=1,
)
X = X.reshape((100, 8, 8))
X.shape, Y.shape
"""invisible
"""
x_train, x_test, y_train, y_test = X[:80], X[80:], Y[:80], Y[80:]
"""
### Run the ImageClassifier for multi-label classification
"""
# Initialize the image classifier.
clf = ak.ImageClassifier(
max_trials=10, multi_label=True, overwrite=True
) # It tries two different pipelines.
# Feed the image classifier with training data
# 20% of the data is used as validation data by default for tuning
# the process may run for a bit long time, please try to use GPU
clf.fit(x_train, y_train, epochs=3, verbose=2) # each model is trained for three epochs
"""
### Predict with the best model.
"""
predicted_y = clf.predict(x_test)
print("The prediction shape is: {}".format(predicted_y.shape))
print(
"The predicted labels of the first five instances are:\n {}".format(
predicted_y[:5, :]
)
)
"""invisible
"""
test_loss, test_acc = clf.evaluate(x_test, y_test, verbose=0)
print("Test accuracy: ", test_acc)
| 3.109375
| 3
|
oz/error_pages/uimodules.py
|
dailymuse/oz
| 36
|
12785091
|
"""UIModules for the error pages plugin"""
import oz
import base64
import pprint
import oz.error_pages
import tornado.web
import tornado.escape
TABLE_FORMAT = """
<table %s %s>
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
%s
</tbody>
</table>
"""
TABLE_ROW_FORMAT = """
<tr>
<td>%s %s</td>
<td class="code">%s</td>
</tr>
"""
@oz.uimodule
class DictTable(tornado.web.UIModule):
"""Renders an HTML table from a dict"""
def render(self, d, id=None, kls=None):
items = sorted(d.items())
if items:
rows = []
for k, v in items:
try:
escaped_val = tornado.escape.xhtml_escape(oz.error_pages.prettify_object(v))
rows.append(TABLE_ROW_FORMAT % (k, "", escaped_val))
except UnicodeDecodeError:
rows.append(TABLE_ROW_FORMAT % (k, "(in base64)", base64.b64encode(v)))
return TABLE_FORMAT % ("id='%s'" % id if id else "", "class='%s'" if kls else "", "\n".join(rows))
else:
return "<p>No data</p>"
| 2.59375
| 3
|
subsync.py
|
Xwaler/Subsync-headless
| 0
|
12785092
|
import os
import time
import shlex
import shutil
import requests
import threading
import json
from watchdog.observers import Observer
from watchdog.events import FileSystemEvent, FileSystemEventHandler
from subprocess import check_call, DEVNULL, check_output, STDOUT, CalledProcessError
BAZARR_URL = os.environ.get('BAZARR_URL')
BAZARR_API_KEY = os.environ.get('BAZARR_API_KEY')
BAZARR_USERNAME = os.environ.get('BAZARR_USERNAME')
BAZARR_PASSWORD = os.environ.get('BAZARR_PASSWORD')
NUM_WORKERS = int(os.environ.get('NUM_WORKERS')) if os.environ.get('NUM_WORKERS') else 1
JOBS_FOLDER = '/.config/jobs'
FAILED_JOBS_FOLDER = '/.config/failed_jobs'
if not os.path.exists(JOBS_FOLDER):
os.mkdir(JOBS_FOLDER)
if not os.path.exists(FAILED_JOBS_FOLDER):
os.mkdir(FAILED_JOBS_FOLDER)
event_lock = threading.Lock()
last_file_event = 0
last_event = None
worker_sem = threading.Semaphore(NUM_WORKERS)
working_lock = threading.Lock()
working = set()
class AnyEventHandler(FileSystemEventHandler):
def on_any_event(self, event):
global last_file_event
global last_event
event_lock.acquire()
t = time.time()
if t > last_file_event:
last_file_event = t
if not isinstance(last_event, FileSystemEvent) or event.src_path != last_event.src_path:
print(event)
last_event = event
event_lock.release()
def sync(file):
global worker_sem
global working
with open(file, 'r') as f:
job = json.load(f)
if job['ref_lang'] == 'None':
job['ref_lang'] = 'eng'
subsync_ref_lang = job['ref_lang'] \
.replace('fra', 'fre') \
.replace('deu', 'ger') \
.replace('lit', 'eng') # Bazarr thinks YTS.LT releases are Lithuanian
subsync_sub_lang = job['sub_lang'] \
.replace('fra', 'fre') \
.replace('deu', 'ger') \
.replace('lit', 'eng') # Bazarr thinks YTS.LT releases are Lithuanian
print(f'Syncing {os.path.basename(file)}')
command = f'/subsync/bin/subsync --cli --verbose 0 sync ' \
f'--ref "{job["ref"]}" --ref-stream-by-type audio --ref-lang "{subsync_ref_lang}" ' \
f'--sub "{job["sub"]}" --sub-lang "{subsync_sub_lang}" ' \
f'--out "{job["sub"]}" --overwrite'
try:
check_call(shlex.split(command), stdout=DEVNULL, stderr=DEVNULL)
print(f'Successful subsync {os.path.basename(file)}')
if os.path.exists(os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file))):
os.remove(os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file)))
except CalledProcessError as e:
print(f'Subsync failed {os.path.basename(file)} | {e}')
command = f'/usr/local/bin/ffsubsync "{job["ref"]}" -i "{job["sub"]}" ' \
f' --max-offset-seconds 600 --encoding UTF-8 --overwrite-input'
try:
stdout = check_output(shlex.split(command), stderr=STDOUT, encoding='UTF-8')
if 'Synchronization failed' in str(stdout):
raise CalledProcessError(2, shlex.split(command))
print(f'Successful ffsubsync {os.path.basename(file)}')
if os.path.exists(os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file))):
os.remove(os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file)))
except CalledProcessError as e:
print(f'FFSubsync failed {os.path.basename(file)} | {e}')
print(f'Blacklisting {os.path.basename(file)}')
s = requests.session()
headers = {"x-api-key": BAZARR_API_KEY}
r = s.post(f"{BAZARR_URL}/api/system/account?action=login",
data={"username": BAZARR_USERNAME, "password": <PASSWORD>})
if not r.ok:
print("Authentication failed")
shutil.copy(file, os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file)))
else:
data = {
'subtitles_path': job["sub"],
'provider': job["provider"],
'subs_id': job["sub_id"],
'language': job["sub_code_2"],
}
if not job["series_id"]:
url = f"{BAZARR_URL}/api/movies/blacklist?radarrid={job['episode_id']}"
else:
url = f"{BAZARR_URL}/api/episodes/blacklist?seriesid={job['series_id']}&episodeid={job['episode_id']}"
r = s.post(url, data=data, headers=headers)
if r.ok:
print(f'Blacklisted {os.path.basename(file)}')
else:
print(f'Failed to blacklist {os.path.basename(file)} : {r.text}')
shutil.copy(file, os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file)))
finally:
working_lock.acquire()
os.remove(file)
working.remove(file)
working_lock.release()
worker_sem.release()
if __name__ == '__main__':
observer = Observer()
observer.schedule(AnyEventHandler(), JOBS_FOLDER, recursive=True)
observer.start()
while True:
time.sleep(3)
event_lock.acquire()
content = os.listdir(JOBS_FOLDER)
if last_file_event + 10 < time.time():
event_lock.release()
for thing in content:
path = os.path.join(JOBS_FOLDER, thing)
working_lock.acquire()
cond = path in working
working_lock.release()
if cond:
continue
if os.path.exists(path):
if os.path.isfile(path):
worker_sem.acquire()
working_lock.acquire()
working.add(path)
working_lock.release()
worker = threading.Thread(target=sync, args=(path,))
worker.start()
else:
print(f'Warning: non-file found in jobs queue ({thing})')
else:
print(f"Job file doesn't exist ({thing})")
else:
event_lock.release()
| 2.109375
| 2
|
source/functions/Key_generate.py
|
GucciHsuan/CampusCyberInspectionTool2021
| 0
|
12785093
|
from cryptography.fernet import Fernet
class Key_generate:
def write_key():
"""
Generates a key and save it into a file
"""
key = Fernet.generate_key()
with open("key.key", "wb") as key_file:
key_file.write(key)
key = open("key.key", "rb").read()
print(key)
| 3.453125
| 3
|
github_commit.py
|
Aaron1011/CloudBotPlugins
| 0
|
12785094
|
<filename>github_commit.py<gh_stars>0
from cloudbot import hook
import requests
GITHUB_URL = "https://api.github.com/repos/{}/{}/branches/{}"
DEFAULT_OWNER = "SpongePowered"
DEFAULT_REPO = "SpongeAPI"
DEFAULT_BRANCH = "master"
@hook.command("latest", "l")
def latest(text, message):
data = None
owner = DEFAULT_OWNER
repo = DEFAULT_REPO
branch = DEFAULT_BRANCH
if len(text) == 0:
data = requests.get(GITHUB_URL.format(DEFAULT_OWNER, DEFAULT_REPO,
DEFAULT_BRANCH)).json()
else:
text = text.split(' ')
if len(text) == 1:
branch = text[0]
data = requests.get(GITHUB_URL.format(DEFAULT_OWNER, DEFAULT_REPO,
branch)).json()
elif len(text) == 2:
repo = text[0]
branch = text[1]
data = requests.get(GITHUB_URL.format(DEFAULT_OWNER, repo,
branch)).json()
if data != None:
slug = "{}/{}".format(repo, branch)
sha = data["commit"]["sha"][0:7]
print(data["commit"])
commit_message = data["commit"]["commit"]["message"].split("\n")[0]
author = data["commit"]["commit"]["author"]["name"]
message("{} {}: {} (by {})".format(slug, sha, commit_message, author))
| 2.640625
| 3
|
stagecraft/apps/dashboards/models/__init__.py
|
alphagov-mirror/stagecraft
| 3
|
12785095
|
<gh_stars>1-10
from .dashboard import Dashboard, Link
from .module import Module, ModuleType
| 1.101563
| 1
|
RecoTauTag/Configuration/python/tools/adaptToRunAtMiniAOD.py
|
menglu21/cmssw
| 0
|
12785096
|
<reponame>menglu21/cmssw<filename>RecoTauTag/Configuration/python/tools/adaptToRunAtMiniAOD.py
import FWCore.ParameterSet.Config as cms
######
# Tools to adapt Tau sequences to run tau ReReco+PAT at MiniAOD samples
# <NAME>, <NAME>
# based on work of <NAME>, CERN
# Created: 9 Nov. 2017
######
import PhysicsTools.PatAlgos.tools.helpers as configtools
#####
class adaptToRunAtMiniAOD(object):
def __init__(self, process, runBoosted=False, postfix=""):
self.process = process
self.runBoosted = runBoosted
self.postfix = postfix
if runBoosted:
self.postfix = 'Boosted'+postfix
#print("Adapting boosted tau reconstruction to run at miniAOD; postfix = \"%s\"" % self.postfix)
#else:
# print("Adapting tau reconstruction to run at miniAOD; postfix = \"%s\"" % self.postfix)
#####
def addTauReReco(self):
#PAT
self.process.load('PhysicsTools.PatAlgos.producersLayer1.tauProducer_cff')
self.process.load('PhysicsTools.PatAlgos.selectionLayer1.tauSelector_cfi')
self.process.selectedPatTaus.cut="pt > 18. && tauID(\'decayModeFindingNewDMs\')> 0.5" #Cut as in MiniAOD
#Tau RECO
self.process.load("RecoTauTag.Configuration.RecoPFTauTag_cff")
#Task/Sequence for tau rereco
self.process.miniAODTausTask = cms.Task(
self.process.PFTauTask,
self.process.makePatTausTask,
self.process.selectedPatTaus
)
#Add Run-2 tauIDs for boostedTaus
if self.runBoosted:
self.process.PFTauMVAIsoTask = cms.Task(
self.process.hpsPFTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw,
self.process.hpsPFTauDiscriminationByIsolationMVArun2v1DBoldDMwLT,
self.process.hpsPFTauDiscriminationByIsolationMVArun2v1DBnewDMwLTraw,
self.process.hpsPFTauDiscriminationByIsolationMVArun2v1DBnewDMwLT
)
self.process.PFTauTask.add(self.process.PFTauMVAIsoTask)
self.miniAODTausTask = configtools.cloneProcessingSnippetTask(
self.process,self.process.miniAODTausTask,postfix=self.postfix)
setattr(self.process,'miniAODTausSequence'+self.postfix,cms.Sequence(self.miniAODTausTask))
if not self.postfix=="":
del self.process.miniAODTausTask
#####
def convertModuleToMiniAODInput(self,name):
module = getattr(self.process, name)
if hasattr(module, 'particleFlowSrc'):
module.particleFlowSrc = cms.InputTag("packedPFCandidates", "", "")
if hasattr(module, 'vertexSrc'):
module.vertexSrc = cms.InputTag('offlineSlimmedPrimaryVertices')
if hasattr(module, 'qualityCuts') and hasattr(module.qualityCuts, 'primaryVertexSrc'):
module.qualityCuts.primaryVertexSrc = cms.InputTag('offlineSlimmedPrimaryVertices')
#####
def adaptTauToMiniAODReReco(self,reclusterJets=True):
# TRYING TO MAKE THINGS MINIAOD COMPATIBLE, FROM THE START, TO THE END, 1 BY 1
#print '[adaptTauToMiniAODReReco]: Start'
jetCollection = 'slimmedJets'
# Add new jet collections if reclustering is demanded
if self.runBoosted:
jetCollection = 'boostedTauSeedsPAT'+self.postfix
from RecoTauTag.Configuration.boostedHPSPFTaus_cff import ca8PFJetsCHSprunedForBoostedTaus
setattr(self.process,'ca8PFJetsCHSprunedForBoostedTausPAT'+self.postfix,ca8PFJetsCHSprunedForBoostedTaus.clone(
src = 'packedPFCandidates',
jetCollInstanceName = 'subJetsForSeedingBoostedTausPAT'
))
setattr(self.process,'boostedTauSeedsPAT'+self.postfix,
cms.EDProducer("PATBoostedTauSeedsProducer",
subjetSrc = cms.InputTag('ca8PFJetsCHSprunedForBoostedTausPAT'+self.postfix,'subJetsForSeedingBoostedTausPAT'),
pfCandidateSrc = cms.InputTag('packedPFCandidates'),
verbosity = cms.int32(0)
))
self.miniAODTausTask.add(getattr(self.process,'ca8PFJetsCHSprunedForBoostedTausPAT'+self.postfix))
self.miniAODTausTask.add(getattr(self.process,'boostedTauSeedsPAT'+self.postfix))
elif reclusterJets:
jetCollection = 'patJetsPAT'+self.postfix
from RecoJets.JetProducers.ak4PFJets_cfi import ak4PFJets
setattr(self.process,'ak4PFJetsPAT'+self.postfix,ak4PFJets.clone(
src = "packedPFCandidates"
))
# trivial PATJets
from PhysicsTools.PatAlgos.producersLayer1.jetProducer_cfi import _patJets
setattr(self.process,'patJetsPAT'+self.postfix,_patJets.clone(
jetSource = "ak4PFJetsPAT"+self.postfix,
addJetCorrFactors = False,
jetCorrFactorsSource = [],
addBTagInfo = False,
addDiscriminators = False,
discriminatorSources = [],
addAssociatedTracks = False,
addJetCharge = False,
addGenPartonMatch = False,
embedGenPartonMatch = False,
addGenJetMatch = False,
getJetMCFlavour = False,
addJetFlavourInfo = False,
))
self.miniAODTausTask.add(getattr(self.process,'ak4PFJetsPAT'+self.postfix))
self.miniAODTausTask.add(getattr(self.process,'patJetsPAT'+self.postfix))
# so this adds all tracks to jet in some deltaR region. we don't have tracks so don't need it :D
# self.process.ak4PFJetTracksAssociatorAtVertex.jets = cms.InputTag(jetCollection)
# Remove ak4PFJetTracksAssociatorAtVertex from recoTauCommonSequence
# Remove pfRecoTauTagInfoProducer from recoTauCommonSequence since it uses the jet-track association
# HOWEVER, may use https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookMiniAOD2017#Isolated_Tracks
# probably needs recovery of the two modules above
self.miniAODTausTask.remove(getattr(self.process,'ak4PFJetTracksAssociatorAtVertex'+self.postfix))
self.miniAODTausTask.remove(getattr(self.process,'pfRecoTauTagInfoProducer'+self.postfix))
self.miniAODTausTask.remove(getattr(self.process,'recoTauAK4PFJets08Region'+self.postfix))
setattr(self.process,'recoTauAK4Jets08RegionPAT'+self.postfix,
cms.EDProducer("RecoTauPatJetRegionProducer",
deltaR = self.process.recoTauAK4PFJets08Region.deltaR,
maxJetAbsEta = self.process.recoTauAK4PFJets08Region.maxJetAbsEta,
minJetPt = self.process.recoTauAK4PFJets08Region.minJetPt,
pfCandAssocMapSrc = cms.InputTag(""),
pfCandSrc = cms.InputTag("packedPFCandidates"),
src = cms.InputTag(jetCollection)
))
_jetRegionProducer = getattr(self.process,'recoTauAK4Jets08RegionPAT'+self.postfix)
self.miniAODTausTask.add(_jetRegionProducer)
if self.runBoosted:
_jetRegionProducer.pfCandAssocMapSrc = cms.InputTag(jetCollection, 'pfCandAssocMapForIsolation')
getattr(self.process,'recoTauPileUpVertices'+self.postfix).src = "offlineSlimmedPrimaryVertices"
for moduleName in self.miniAODTausTask.moduleNames():
self.convertModuleToMiniAODInput(moduleName)
# Adapt TauPiZeros producer
_piZeroProducer = getattr(self.process,'ak4PFJetsLegacyHPSPiZeros'+self.postfix)
for builder in _piZeroProducer.builders:
builder.qualityCuts.primaryVertexSrc = "offlineSlimmedPrimaryVertices"
_piZeroProducer.jetSrc = jetCollection
# Adapt TauChargedHadrons producer
_chargedHadronProducer = getattr(self.process,'ak4PFJetsRecoTauChargedHadrons'+self.postfix)
for builder in _chargedHadronProducer.builders:
builder.qualityCuts.primaryVertexSrc = "offlineSlimmedPrimaryVertices"
if builder.name.value() == 'tracks': #replace plugin based on generalTracks by one based on lostTracks
builder.name = 'lostTracks'
builder.plugin = 'PFRecoTauChargedHadronFromLostTrackPlugin'
builder.srcTracks = "lostTracks"
if self.runBoosted:
builder.dRcone = 0.3
builder.dRconeLimitedToJetArea = True
_chargedHadronProducer.jetSrc = jetCollection
# Adapt combinatoricRecoTau producer
_combinatoricRecoTauProducer = getattr(self.process,'combinatoricRecoTaus'+self.postfix)
_combinatoricRecoTauProducer.jetRegionSrc = 'recoTauAK4Jets08RegionPAT'+self.postfix
_combinatoricRecoTauProducer.jetSrc = jetCollection
# Adapt builders
for builder in _combinatoricRecoTauProducer.builders:
for name,value in builder.parameters_().items():
if name == 'qualityCuts':
builder.qualityCuts.primaryVertexSrc = 'offlineSlimmedPrimaryVertices'
elif name == 'pfCandSrc':
builder.pfCandSrc = 'packedPFCandidates'
# Adapt supported modifiers and remove unsupported ones
_modifiersToRemove = cms.VPSet()
for mod in _combinatoricRecoTauProducer.modifiers:
if mod.name.value() == 'elec_rej':
_modifiersToRemove.append(mod)
continue
elif mod.name.value() == 'TTIworkaround':
_modifiersToRemove.append(mod)
continue
elif mod.name.value() == 'tau_lost_tracks':
_modifiersToRemove.append(mod)
continue
for name,value in mod.parameters_().items():
if name == 'qualityCuts':
mod.qualityCuts.primaryVertexSrc = 'offlineSlimmedPrimaryVertices'
for mod in _modifiersToRemove:
_combinatoricRecoTauProducer.modifiers.remove(mod)
#print "\t\t Removing '%s' modifier from 'combinatoricRecoTaus'" %mod.name.value()
# Redefine tau PV producer
_tauPVProducer = getattr(self.process,'hpsPFTauPrimaryVertexProducer'+self.postfix)
_tauPVProducer.__dict__['_TypedParameterizable__type'] = 'PFTauMiniAODPrimaryVertexProducer'
_tauPVProducer.PVTag = 'offlineSlimmedPrimaryVertices'
_tauPVProducer.packedCandidatesTag = cms.InputTag("packedPFCandidates")
_tauPVProducer.lostCandidatesTag = cms.InputTag("lostTracks")
# Redefine tau SV producer
setattr(self.process,'hpsPFTauSecondaryVertexProducer'+self.postfix,
cms.EDProducer("PFTauSecondaryVertexProducer",
PFTauTag = cms.InputTag("hpsPFTauProducer"+self.postfix)
))
# Remove RecoTau producers which are not supported (yet?), i.e. against-e/mu discriminats
for moduleName in self.miniAODTausTask.moduleNames():
if 'ElectronRejection' in moduleName or 'MuonRejection' in moduleName:
if 'ByDeadECALElectronRejection' in moduleName: continue
self.miniAODTausTask.remove(getattr(self.process, moduleName))
# Instead add against-mu discriminants which are MiniAOD compatible
from RecoTauTag.RecoTau.hpsPFTauDiscriminationByMuonRejectionSimple_cff import hpsPFTauDiscriminationByMuonRejectionSimple
setattr(self.process,'hpsPFTauDiscriminationByMuonRejectionSimple'+self.postfix,
hpsPFTauDiscriminationByMuonRejectionSimple.clone(
PFTauProducer = "hpsPFTauProducer"+self.postfix))
_tauIDAntiMuSimple = getattr(self.process,'hpsPFTauDiscriminationByMuonRejectionSimple'+self.postfix)
if self.runBoosted:
_tauIDAntiMuSimple.dRmuonMatch = 0.1
self.miniAODTausTask.add(_tauIDAntiMuSimple)
#####
# PAT part in the following
getattr(self.process,'tauGenJets'+self.postfix).GenParticles = "prunedGenParticles"
getattr(self.process,'tauMatch'+self.postfix).matched = "prunedGenParticles"
# Remove unsupported tauIDs
_patTauProducer = getattr(self.process,'patTaus'+self.postfix)
for name,src in _patTauProducer.tauIDSources.parameters_().items():
if name.find('againstElectron') > -1 or name.find('againstMuon') > -1:
if name.find('againstElectronDeadECAL') > -1: continue
delattr(_patTauProducer.tauIDSources,name)
# Add MiniAOD specific ones
setattr(_patTauProducer.tauIDSources,'againstMuonLooseSimple',
cms.PSet(inputTag = cms.InputTag('hpsPFTauDiscriminationByMuonRejectionSimple'+self.postfix),
provenanceConfigLabel = cms.string('IDWPdefinitions'),
idLabel = cms.string('ByLooseMuonRejectionSimple')
))
setattr(_patTauProducer.tauIDSources,'againstMuonTightSimple',
cms.PSet(inputTag = cms.InputTag('hpsPFTauDiscriminationByMuonRejectionSimple'+self.postfix),
provenanceConfigLabel = cms.string('IDWPdefinitions'),
idLabel = cms.string('ByTightMuonRejectionSimple')
))
#Add Run-2 tauIDs still used for boostedTaus
if self.runBoosted:
from PhysicsTools.PatAlgos.producersLayer1.tauProducer_cfi import containerID
containerID(_patTauProducer.tauIDSources,
"hpsPFTauDiscriminationByIsolationMVArun2v1DBoldDMwLT"+self.postfix,
"rawValues", [
["byIsolationMVArun2DBoldDMwLTraw", "discriminator"]
])
containerID(_patTauProducer.tauIDSources,
"hpsPFTauDiscriminationByIsolationMVArun2v1DBoldDMwLT"+self.postfix,
"workingPoints", [
["byVVLooseIsolationMVArun2DBoldDMwLT", "_VVLoose"],
["byVLooseIsolationMVArun2DBoldDMwLT", "_VLoose"],
["byLooseIsolationMVArun2DBoldDMwLT", "_Loose"],
["byMediumIsolationMVArun2DBoldDMwLT", "_Medium"],
["byTightIsolationMVArun2DBoldDMwLT", "_Tight"],
["byVTightIsolationMVArun2DBoldDMwLT", "_VTight"],
["byVVTightIsolationMVArun2DBoldDMwLT", "_VVTight"]
])
containerID(_patTauProducer.tauIDSources,
"hpsPFTauDiscriminationByIsolationMVArun2v1DBnewDMwLT"+self.postfix,
"rawValues", [
["byIsolationMVArun2DBnewDMwLTraw", "discriminator"]
])
containerID(_patTauProducer.tauIDSources,
"hpsPFTauDiscriminationByIsolationMVArun2v1DBnewDMwLT"+self.postfix,
"workingPoints", [
["byVVLooseIsolationMVArun2DBnewDMwLT", "_VVLoose"],
["byVLooseIsolationMVArun2DBnewDMwLT", "_VLoose"],
["byLooseIsolationMVArun2DBnewDMwLT", "_Loose"],
["byMediumIsolationMVArun2DBnewDMwLT", "_Medium"],
["byTightIsolationMVArun2DBnewDMwLT", "_Tight"],
["byVTightIsolationMVArun2DBnewDMwLT", "_VTight"],
["byVVTightIsolationMVArun2DBnewDMwLT", "_VVTight"]
])
# Run TauIDs (anti-e && deepTau) on top of selectedPatTaus
_updatedTauName = 'selectedPatTausNewIDs'+self.postfix
_noUpdatedTauName = 'selectedPatTausNoNewIDs'+self.postfix
toKeep = ['deepTau2017v2p1']
#For boosted do not run deepTauIDs, but add still used Run-2 anti-e MVA
if self.runBoosted:
toKeep = ['againstEle2018']
import RecoTauTag.RecoTau.tools.runTauIdMVA as tauIdConfig
tauIdEmbedder = tauIdConfig.TauIDEmbedder(
self.process, debug = False,
originalTauName = _noUpdatedTauName,
updatedTauName = _updatedTauName,
postfix = "MiniAODTaus"+self.postfix,
toKeep = toKeep
)
tauIdEmbedder.runTauID()
setattr(self.process, _noUpdatedTauName, getattr(self.process,'selectedPatTaus'+self.postfix).clone())
self.miniAODTausTask.add(getattr(self.process,_noUpdatedTauName))
delattr(self.process, 'selectedPatTaus'+self.postfix)
setattr(self.process,'selectedPatTaus'+self.postfix,getattr(self.process, _updatedTauName).clone())
delattr(self.process, _updatedTauName)
setattr(self.process,'newTauIDsTask'+self.postfix,cms.Task(
getattr(self.process,'rerunMvaIsolationTaskMiniAODTaus'+self.postfix),
getattr(self.process,'selectedPatTaus'+self.postfix)
))
self.miniAODTausTask.add(getattr(self.process,'newTauIDsTask'+self.postfix))
#print '[adaptTauToMiniAODReReco]: Done!'
#####
def setOutputModule(self,mode=0):
#mode = 0: store original MiniAOD and new selectedPatTaus
#mode = 1: store original MiniAOD, new selectedPatTaus, and all PFTau products as in AOD (except of unsuported ones), plus a few additional collections (charged hadrons, pi zeros, combinatoric reco taus)
import Configuration.EventContent.EventContent_cff as evtContent
output = cms.OutputModule(
'PoolOutputModule',
fileName=cms.untracked.string('miniAOD_TauReco.root'),
fastCloning=cms.untracked.bool(False),
dataset=cms.untracked.PSet(
dataTier=cms.untracked.string('MINIAODSIM'),
filterName=cms.untracked.string('')
),
outputCommands = evtContent.MINIAODSIMEventContent.outputCommands,
SelectEvents=cms.untracked.PSet(
SelectEvents=cms.vstring('*',)
)
)
output.outputCommands.append('keep *_selectedPatTaus'+self.postfix+'_*_*')
if mode==1:
import re
for prod in evtContent.RecoTauTagAOD.outputCommands:
if prod.find('ElectronRejection') > -1 and prod.find('DeadECAL') == -1:
continue
if prod.find('MuonRejection') > -1:
continue
if prod.find("_*_*")>-1: # products w/o instance
output.outputCommands.append(prod.replace("_*_*",self.postfix+"_*_*"))
else: # check if there are prods with instance
m = re.search(r'_[A-Za-z0-9]+_\*', prod)
if m!=None:
inst = m.group(0)
output.outputCommands.append(prod.replace(inst,self.postfix+inst))
else:
print("Warning: \"%s\" do not match known name patterns; trying to keep w/o postfix" % prod)
output.outputCommands.append(prod)
output.outputCommands.append('keep *_hpsPFTauDiscriminationByMuonRejectionSimple'+self.postfix+'_*_*')
output.outputCommands.append('keep *_combinatoricReco*_*_*')
output.outputCommands.append('keep *_ak4PFJetsRecoTauChargedHadrons'+self.postfix+'_*_*')
output.outputCommands.append('keep *_ak4PFJetsLegacyHPSPiZeros'+self.postfix+'_*_*')
output.outputCommands.append('keep *_patJetsPAT'+self.postfix+'_*_*')
output.outputCommands.append('keep *_boostedTauSeedsPAT'+self.postfix+'_*_*')
return output
#####
| 1.984375
| 2
|
oscar/lib/python2.7/site-packages/_pytest/__init__.py
|
bhav11esh/Oscar-Bookshelf
| 1
|
12785097
|
#
__version__ = '3.0.6'
| 1.054688
| 1
|
instance.py
|
Elaina-Alex/PixivCrawler
| 1
|
12785098
|
<filename>instance.py
import os
import re
import time
from rich import print
import yaml
class Msg:
msg_help = [
"输入首字母",
"h | help\t\t\t\t\t\t--- 显示说明",
"q | quit\t\t\t\t\t\t--- 退出正在运作的程序",
"d | picture\t\t\t\t\t\t--- 输入id或url下载插画",
"t | recommend\t\t\t\t\t--- 下载pixiv推荐插画",
"s | start\t\t\t\t\t\t--- 下载账号收藏插画",
"r | rank\t\t\t\t\t\t--- 下载排行榜作品",
"n | tag name\t\t\t\t\t--- 输入插画名或者表情名",
"u | read text pid\t\t\t\t\t--- 读取本地文本里的pid批量下载",
"f | follow\t\t\t\t\t\t--- 下载关注的画师作品",
]
class YamlData:
def __init__(self, file_path=None, file_dir=None):
if file_dir is not None:
self.file_dir = os.path.join(os.getcwd(), file_dir)
if not os.path.exists(self.file_dir):
try:
os.mkdir(self.file_dir)
except (FileExistsError, OSError) as err:
print(err)
self.file_path = os.path.join(os.getcwd(), file_path)
self.data = {}
def load(self):
try:
with open(file=self.file_path, mode="r", encoding='utf-8') as f:
self.data = yaml.load(f, Loader=yaml.FullLoader)
if self.data is None:
self.data = {}
except FileNotFoundError:
with open(self.file_path, 'w', encoding='utf-8'):
self.data = {}
def save(self):
with open(file=self.file_path, mode="w", encoding='utf-8') as f:
yaml.safe_dump(self.data, f, default_flow_style=False, allow_unicode=True)
def write_file(file_dir: str, m: str, content: str = ""):
if m == "r":
return open(file_dir, "r", encoding='utf-8').read()
with open(file_dir, m, encoding='utf-8', newline="") as f:
f.write(content)
class Vars:
cfg = YamlData('pixiv-config.yaml')
images_info = None
complex_images_info = list()
images_info_list = list()
def count_time(func: callable) -> callable:
def wrapper(*arg, **kwargs):
start_time = time.time()
result = func(*arg, **kwargs)
print(f"下载耗时:{time.time() - start_time:.2f}s")
return result
return wrapper
def remove_str(content: str):
res_compile = re.compile(u'[\U00010000-\U0010ffff\\uD800-\\uDBFF\\uDC00-\\uDFFF]')
return res_compile.sub("", re.sub('[/:*?"<>|x08]', '-', content))
def rec_id(book_id: str):
book_id = book_id if 'http' not in book_id else re.findall(r'/(\d+)/?', book_id)[0]
return str(book_id) if book_id.isdigit() else f'输入信息 {book_id} 不是数字或链接!'
def index_title(division_index: int, image_name: str):
return str(division_index).rjust(4, "0") + '-' + str(image_name)
def input_str(prompt, default=None):
while True:
ret = input(prompt)
if ret != '':
return ret
elif default is not None:
return default
def input_int(prompt: str, max_number: int = None):
while True:
ret = input(prompt)
if ret.isdigit():
if max_number is None:
return int(ret)
if max_number is not None and int(ret) < max_number:
return int(ret)
else:
print(f"输入数字 {ret} 需要小于索引 {max_number} ")
continue
else:
if ret.strip() != '':
print(f"输入的内容 {ret} 不是数字,请重新输入")
def set_config():
Vars.cfg.load()
config_change = False
if type(Vars.cfg.data.get('max_thread')) is not int:
Vars.cfg.data['max_thread'] = 5
config_change = True
if Vars.cfg.data.get('save_file') is not str:
Vars.cfg.data['save_file'] = 'image_file'
config_change = True
if Vars.cfg.data.get('out_file') is not str:
Vars.cfg.data['out_file'] = 'downloaded'
config_change = True
if type(Vars.cfg.data.get('save_type')) is not bool:
Vars.cfg.data['save_type'] = False
config_change = True
if type(Vars.cfg.data.get('access_token')) is not str:
Vars.cfg.data['access_token'] = ""
config_change = True
if type(Vars.cfg.data.get('refresh_token')) is not str:
Vars.cfg.data['refresh_token'] = ""
config_change = True
if type(Vars.cfg.data.get('max_retry')) is not int:
Vars.cfg.data['max_retry'] = 5 # retry times when download failed
config_change = True
if not isinstance(Vars.cfg.data.get('file_name_config'), dict):
Vars.cfg.data['file_name_config'] = {'image_id': True, 'author': 'author'}
config_change = True
if not isinstance(Vars.cfg.data.get('user_info'), dict):
Vars.cfg.data['user_info'] = {} # save user info to config file
config_change = True
if config_change: # if config change, save it to file and reload.
Vars.cfg.save()
if not os.path.exists(Vars.cfg.data.get('save_file')):
os.mkdir(Vars.cfg.data.get('save_file'))
| 2.484375
| 2
|
_doc/examples/gallery1/plot_project_name.py
|
sdpython/python_project_template
| 0
|
12785099
|
<filename>_doc/examples/gallery1/plot_project_name.py
# -*- coding: utf-8 -*-
"""
===============================
Example with the project itself
===============================
Example with a simple import.
"""
##############################
# import
import python3_module_template
print(python3_module_template.__version__)
| 1.898438
| 2
|
lib/data_stores/tdb_data_store_test.py
|
nahidupa/grr
| 1
|
12785100
|
#!/usr/bin/env python
"""Tests the tdb data store - in memory implementation."""
import shutil
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.data_stores import tdb_data_store
# pylint: mode=test
class TDBTestMixin(object):
def InitDatastore(self):
self.token = access_control.ACLToken(username="test",
reason="Running tests")
config_lib.CONFIG.Set("Datastore.location", "%s/tdb_test/" % self.temp_dir)
self.DestroyDatastore()
data_store.DB = tdb_data_store.TDBDataStore()
data_store.DB.security_manager = test_lib.MockSecurityManager()
def testCorrectDataStore(self):
self.assertTrue(isinstance(data_store.DB, tdb_data_store.TDBDataStore))
def DestroyDatastore(self):
try:
shutil.rmtree(config_lib.CONFIG.Get("Datastore.location"))
except (OSError, IOError):
pass
class TDBDataStoreTest(TDBTestMixin, data_store_test._DataStoreTest):
"""Test the tdb data store."""
class TDBDataStoreBenchmarks(TDBTestMixin,
data_store_test.DataStoreBenchmarks):
"""Benchmark the TDB data store abstraction."""
class TDBDataStoreCSVBenchmarks(TDBTestMixin,
data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the TDB data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
| 2.109375
| 2
|
imaging.py
|
jinjiaho/project57
| 0
|
12785101
|
<gh_stars>0
from PIL import Image
import os
## A library for all your image manipulation needs!
## Requires Pillow for Python 2.7/3.3+
class Imaging(object):
# set proposed dimension of thumbnail
# assume max height/width of 255
def __init__(self, dim=255, path="static/img/items/", fallback="default.thumb"):
self.dim = dim
self.path = path
self.fallback = fallback
# create thumbnail of size `dim` for file args
def thumb(self, *args):
for infile in args:
infile_abs = self.path + infile
outfile = os.path.splitext(infile)[0] + ".thumb"
outfile_abs = self.path + outfile
if infile != outfile:
try:
im = Image.open(infile_abs)
im = im.convert("RGB")
im.thumbnail((self.dim, self.dim), Image.ANTIALIAS)
im.save(outfile_abs, "JPEG")
return outfile
except IOError:
print("PYTHON: Cannot create thumbnail for '%s'" % infile)
return self.fallback
def __str__(self):
return "Dimension: %d\nPath: %s" % self.dim, self.path
if __name__ == '__main__':
print("PYTHON: `Imaging.py` is not meant to be executed directly")
| 3.078125
| 3
|
tests/test_copyright.py
|
jbwang1997/pre-commit-hooks
| 12
|
12785102
|
<filename>tests/test_copyright.py
import os
import os.path as osp
from pre_commit_hooks.check_copyright import check_copyright
def test_copyright():
includes = ['./tests/data']
excludes = ['./tests/data/exclude']
suffixes = ['.py', '.cpp', '.h', '.cu', '.cuh', '.hpp']
contain_copyright = ['./tests/data/contain_copyright']
assert check_copyright(includes, excludes, suffixes) == 1
for dir in includes:
for root, dirs, files in os.walk(dir):
for file in files:
filepath = osp.join(root, file)
with open(filepath, 'r', encoding='utf-8') as f:
lines = f.readlines()
if root not in excludes:
assert lines[0].split(' ').count('Copyright') > 0
else:
assert lines[0].split(' ').count('Copyright') == 0
with open(filepath, 'w', encoding='utf-8') as f:
if root not in excludes and root not in contain_copyright:
f.writelines(lines[1:])
else:
f.writelines(lines)
for dir in contain_copyright:
for root, dirs, files in os.walk(dir):
for file in files:
filepath = osp.join(root, file)
with open(filepath, 'r', encoding='utf-8') as f:
line = f.readline()
assert line.split(' ').count('OpenMMLab.') > 0
| 2.453125
| 2
|
android-runner/ExperimentRunner/MonkeyRunner.py
|
S2-group/mobilesoft-2020-caching-pwa-replication-package
| 0
|
12785103
|
import subprocess
from ExperimentRunner.Script import Script
class MonkeyRunner(Script):
"""
Subclass of `Script` for running MonkeyRunner scripts directly.
As opposed to `MonkeyReplay`, it runs the scripts directly using MonkeyRunner.
Thanks to that it's not necessary to go through a layer of indirection in the
form of JSON files and a custom runner. This results in higher flexibility and
greater control.
Usage:
1. Create a script runnable by MonkeyRunner.
2. Add it to the config file with the type "monkeyrunner".
Important!
The script has to be directly runnable by MonkeyRunner. It means that:
- it has to create an instance of `MonkeyDevice` explicitly in the script,
- all operations are supposed to be invoked on this instance,
- there has to be module-level code running the operations,
- it has to follow any other restrictions specified in the docs.
Docs and examples: https://developer.android.com/studio/test/monkeyrunner/
"""
def __init__(self, path, timeout=0, logcat_regex=None, monkeyrunner_path='monkeyrunner'):
super(MonkeyRunner, self).__init__(path, timeout, logcat_regex)
self.monkeyrunner_path = monkeyrunner_path
def execute_script(self, device, *args, **kwargs):
"""
Run the MonkeyRunner script.
Returns the return value returned by MonkeyRunner.
"""
super(MonkeyRunner, self).execute_script(device, *args, **kwargs)
return subprocess.call([self.monkeyrunner_path, self.path])
| 2.6875
| 3
|
mysite/slhpa/admin.py
|
jeffb4real/SLHPA-Web-App
| 1
|
12785104
|
from django.contrib import admin
from .models import PhotoRecord, KeyValueRecord
admin.site.register(PhotoRecord)
admin.site.register(KeyValueRecord)
| 1.273438
| 1
|
nmastudio/tools/storage.py
|
silviametelli/nmastudio
| 2
|
12785105
|
<reponame>silviametelli/nmastudio
import pandas as pd
from nmastudio.tools.utils import get_network
from collections import OrderedDict
NET_DATA = pd.read_csv('db/psoriasis_wide.csv')
NET_DATA2 = NET_DATA.drop(["TE", "seTE", "n1", "n2"], axis=1)
NET_DATA2 = NET_DATA2.rename(columns={"TE2": "TE", "seTE2": "seTE", "n2.1": "n1", "n2.2": "n2"})
CONSISTENCY_DATA = pd.read_csv('db/consistency/consistency.csv')
DEFAULT_ELEMENTS = USER_ELEMENTS = get_network(df=NET_DATA)
DEFAULT_ELEMENTS2 = USER_ELEMENTS2 = get_network(df=NET_DATA2)
FOREST_DATA = pd.read_csv('db/forest_data/forest_data.csv')
FOREST_DATA_OUT2 = pd.read_csv('db/forest_data/forest_data_outcome2.csv')
FOREST_DATA_PRWS = pd.read_csv('db/forest_data/forest_data_pairwise.csv')
FOREST_DATA_PRWS_OUT2 = pd.read_csv('db/forest_data/forest_data_pairwise_out2.csv')
LEAGUE_TABLE_DATA = pd.read_csv('db/league_table_data/league_table.csv', index_col=0)
CINEMA_NET_DATA1 = pd.read_csv('db/Cinema/cinema_report_PASI90.csv')
CINEMA_NET_DATA2 = pd.read_csv('db/Cinema/cinema_report_SAE.csv')
NETSPLIT_DATA = pd.read_csv('db/consistency/consistency_netsplit.csv')
NETSPLIT_DATA_OUT2 = pd.read_csv('db/consistency/consistency_netsplit_out2.csv')
NETSPLIT_DATA_ALL = pd.read_csv('db/consistency/netsplit_all.csv')
NETSPLIT_DATA_ALL_OUT2 = pd.read_csv('db/consistency/netsplit_all_out2.csv')
RANKING_DATA = pd.read_csv('db/ranking/rank.csv')
FUNNEL_DATA = pd.read_csv('db/funnel/funnel_data.csv')
FUNNEL_DATA_OUT2 = pd.read_csv('db/funnel/funnel_data_out2.csv')
DEFAULT_DATA = OrderedDict(net_data_STORAGE=NET_DATA,
net_data_out2_STORAGE=NET_DATA2,
consistency_data_STORAGE=CONSISTENCY_DATA,
user_elements_STORAGE=USER_ELEMENTS,
user_elements_out2_STORAGE=USER_ELEMENTS2,
forest_data_STORAGE=FOREST_DATA,
forest_data_out2_STORAGE=FOREST_DATA_OUT2,
forest_data_prws_STORAGE=FOREST_DATA_PRWS,
forest_data_prws_out2_STORAGE=FOREST_DATA_PRWS_OUT2,
ranking_data_STORAGE=RANKING_DATA,
funnel_data_STORAGE=FUNNEL_DATA,
funnel_data_out2_STORAGE=FUNNEL_DATA_OUT2,
league_table_data_STORAGE=LEAGUE_TABLE_DATA,
net_split_data_STORAGE=NETSPLIT_DATA,
net_split_data_out2_STORAGE=NETSPLIT_DATA_OUT2,
net_split_ALL_data_STORAGE=NETSPLIT_DATA_ALL,
net_split_ALL_data_out2_STORAGE=NETSPLIT_DATA_ALL_OUT2,
cinema_net_data1_STORAGE=CINEMA_NET_DATA1,
cinema_net_data2_STORAGE=CINEMA_NET_DATA2,
)
OPTIONS_VAR = [{'label': '{}'.format(col), 'value': col} for col in NET_DATA.select_dtypes(['number']).columns]
N_CLASSES = USER_ELEMENTS[-1]["data"]['n_class'] if "n_class" in USER_ELEMENTS[-1]["data"] else 1
| 2.5625
| 3
|
data_structures/tests/test_union_find.py
|
tzaffi/PyAlgo
| 0
|
12785106
|
<filename>data_structures/tests/test_union_find.py
from data_structures.union_find import UnionFind
def test_init():
ds = UnionFind([1, 2, 3])
assert ds._elts == {1, 2, 3}
assert ds._reps == {1, 2, 3}
assert ds._comp_count == 3
assert ds._parent[2] == 2
def test_union_find():
ds = UnionFind([1, 2, 3])
assert ds.find(3) == 3
assert 3 == ds.union(2, 3)
assert ds.find(2) == ds.find(3)
assert ds.find(1) != ds.find(3)
def test_component_n_size():
ds = UnionFind([1, 2, 3])
for i in [1, 2, 3]:
assert ds.component(i) == {i}
assert ds.component_size(i) == 1
assert 3 == ds.union(2, 3)
assert ds.component(1) == {1}
assert ds.component(2) == {2, 3}
assert ds.component_size(1) == 1
assert ds.component_size(2) == 2
def test_connected():
ds = UnionFind([1, 2, 3])
for i in [1, 2, 3]:
assert ds.connected(i, i)
assert not ds.connected(i, i + 1 if i < 3 else 1)
assert 3 == ds.union(2, 3)
assert ds.connected(2, 3)
assert not ds.connected(1, 3)
def test_eq_neq():
ds1 = UnionFind([1, 2, 3])
ds2 = UnionFind([1, 2, 3])
assert ds1 == ds2
assert not ds1 != ds2
ds1.union(2, 3)
assert not ds1 == ds2
assert ds1 != ds2
def test_len():
ds = UnionFind([1, 2, 3])
assert len(ds) == 3
ds.union(2, 3)
assert len(ds) == 2
ds.union(2, 3)
assert len(ds) == 2
ds.union(1, 2)
assert len(ds) == 1
def equal_sets_collection(sets1, sets2):
if len(sets1) != len(sets2):
return False
for s in sets1:
if s not in sets2:
return False
return True
def test_sets():
ds = UnionFind([1, 2, 3])
ds.union(2, 3)
expected = [{1}, {2, 3}]
actual = ds.sets()
assert equal_sets_collection(expected, actual), actual
| 3.046875
| 3
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/contextlib.py
|
bidhata/EquationGroupLeaks
| 9
|
12785107
|
<reponame>bidhata/EquationGroupLeaks
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: contextlib.py
"""Utilities for with-statement contexts. See PEP 343."""
import sys
from functools import wraps
from warnings import warn
__all__ = [
'contextmanager', 'nested', 'closing']
class GeneratorContextManager(object):
"""Helper for @contextmanager decorator."""
def __init__(self, gen):
self.gen = gen
def __enter__(self):
try:
return self.gen.next()
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
self.gen.next()
except StopIteration:
return
raise RuntimeError("generator didn't stop")
else:
if value is None:
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
return exc is not value
except:
if sys.exc_info()[1] is not value:
raise
return
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return GeneratorContextManager(func(*args, **kwds))
return helper
@contextmanager
def nested(*managers):
"""Combine multiple context managers into a single nested context manager.
This function has been deprecated in favour of the multiple manager form
of the with statement.
The one advantage of this function over the multiple manager form of the
with statement is that argument unpacking allows it to be
used with a variable number of context managers as follows:
with nested(*managers):
do_something()
"""
warn('With-statements now directly support multiple context managers', DeprecationWarning, 3)
exits = []
vars = []
exc = (None, None, None)
try:
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
raise exc[0], exc[1], exc[2]
return
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
| 2.921875
| 3
|
ituro/orders/management/commands/linefollowerdeleteorders.py
|
kayduemre/ituro
| 9
|
12785108
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from orders.models import RaceOrder, LineFollowerRaceOrder
class Command(BaseCommand):
args = '<day>'
help = 'Deletes line follower race orders of the specified day.'
def handle(self, *args, **options):
try:
day = int(args[0])
except IndexError:
raise CommandError('Please specify a day.')
if day < 1 or day > 2:
raise CommandError('Day interval is 1 <= day <= 2.')
LineFollowerRaceOrder.objects.filter(stage__order=day).delete()
self.stdout.write(
"Line follower race orders day #{} deleted.".format(day))
| 2.296875
| 2
|
leitor.py
|
rjribeiro/trabalho-formais
| 3
|
12785109
|
<gh_stars>1-10
import re
class Leitor:
def __init__(self, arquivo):
self.__terminais = self.__monta_terminais(arquivo)
self.__variaveis = self.__monta_variaveis(arquivo)
self.__inicial = self.__le_linha(arquivo).strip()[2:-2]
self.__producoes = self.__monta_producoes(arquivo)
def __le_linha(self, arquivo):
"""
Lê uma linha do arquivo e lida com os comentarios, retornado apenas a parte
significativa da linha.
:param arquivo: arquivo txt onde esta a gramatica a ser lida
:type arquivo: file
:return: a linha com os dados uteis
:rtype: str
"""
linha = arquivo.readline()
posicao = linha.find("#")
if posicao == -1:
return linha
elif posicao == 0:
for prefixo in ["#Terminais", "#Variaveis", "#Inicial", "#Regras"]:
if linha.startswith(prefixo):
return prefixo
return linha[:posicao]
def __splita_linha(self, linha):
"""
Separa numa lista cada conjunto de caracteres na linha.
Exemplo: [ S ] > [ NP ] [ VP ] -> ['', ' S ', ' ', ' ', ' NP ', ' ', ' VP ', '\n']
:param linha: str
:return: lista de strings, contendo variaveis e/ou terminais
:rtype: list
"""
# Nas 3 proximas linhas eu troco os caracteres inuteis para "|" com o objetivo de facilitar o split
# Tentei usar o metodo re.split() mas ela retorna o resultado errado
linha = linha.replace("[", "|")
linha = linha.replace("]", "|")
linha = linha.replace(">", "|")
linha = linha.split("|")
return linha
def __monta_terminais(self, arquivo):
"""
Pega os simbolos terminais da gramatica
Exemplo: gramatica_exemplo2.txt -> ['runs', 'barks', 'eats', ... , 'with', 'at']
:param arquivo: arquivo txt onde esta a gramatica a ser lida
:type arquivo: file
:return: conjunto contendo os terminais da gramatica
:rtype: set
"""
self.__le_linha(arquivo)#esse readline eh pra pular a primeira linha do o arquivo, onde ta escrito "#Terminais"
terminais = set()
while True:
linha = self.__le_linha(arquivo)
if linha == '#Variaveis':
break
else:
terminais.add(re.split('[ | ]', linha)[1])
return terminais
def __monta_producao(self, linha):
"""
Formata a lista retornada da funcao splita_linha()
Exemplo: ['', ' S ', ' ', ' ', ' NP ', ' ', ' VP ', '\n'] -> ['S', ['NP', 'VP']]
:param linha: lista que contem os elementos de uma producao
:type linha: list
:return: a producao extraida da lista
:rytpe: tuple
"""
cabeca = linha[1][1:-1] # A cabeca da producao sempre vai estar no indice 1 da lista. Esse slice [1:-1] eh
# para tirar os espacos em branco que cercam ela
corpo = []
for x in linha[4::2]:# Os membros do corpo da producao comecam a partir do indice 4 da lista e vao ateh o final
# dela pulando de 2 em 2 para nao pegar os caracteres inuteis ', ' ', '
corpo.append(x[1:-1]) # Esse slice [1:-1] eh para tirar os espacos em branco
return (cabeca, tuple(corpo))
def __monta_producoes(self, arquivo):
"""
Pega as regras de producao da gramatica
Exemplo: gramatica_exemplo2.txt -> [['S', ['NP', 'VP']], ... , ['N', ['dog']], ['N', ['cat']]
:param arquivo: arquivo txt onde esta a gramatica a ser lida
:type arquivo: file
:return: conjunto contendo as producoes da gramatica
:rtype: set
"""
self.__le_linha(arquivo)
producoes = set()
while True:
linha = self.__le_linha(arquivo)
if linha == '':
break
else:
linha = self.__splita_linha(linha)
producao = self.__monta_producao(linha)
producoes.add(producao)
return producoes
def __monta_variaveis(self, arquivo):
"""
Pega os simbolos variaveis da gramatica
Exemplo: gramatica_exemplo2.txt -> ['VB', 'NP', 'DT', 'VP', 'S', 'PP', 'P']
:param arquivo: arquivo txt onde esta a gramatica a ser lida
:type arquivo: file
:return: lista das variaveis da gramatica
rtype: list
"""
variaveis = set()
while True:
linha = self.__le_linha(arquivo)
if linha == '#Inicial':
break
else:
variaveis.add(re.split('[ | ]', linha)[1])
return variaveis
@property
def terminais(self):
return self.__terminais
@property
def variaveis(self):
return self.__variaveis
@property
def inicial(self):
return self.__inicial
@property
def producoes(self):
return self.__producoes
| 3.1875
| 3
|
assignment1/mapper.py
|
IITDU-BSSE06/ads-demystifying-the-logs-Sabir001
| 0
|
12785110
|
#!/usr/bin/python
import sys
for line in sys.stdin:
data = line.strip().split("- -")
if len(data) == 2:
ipAddress, rest = data
if ipAddress.strip() == "10.99.99.186":
print "{0}".format(ipAddress)
| 3.21875
| 3
|
blender-processing-scripts/render/master_image_from_angle.py
|
MikeFesta/3xr
| 7
|
12785111
|
# SPDX-License-Identifier: Apache-2.0
import bpy
import math
import os
import time
from xrs import automate as xra
## Depricated - used to render asset submissions
# rename
xra.log_info('Rendering Asset Master Image from angle')
arguments = xra.get_command_line_arguments()
working_dir = arguments[0]
asset_name = arguments[1]
submission_id = arguments[2]
theta = float(arguments[3])
phi = float(arguments[4])
radius = float(arguments[5])
asset_blend = working_dir + asset_name + '.blend'
# Save a copy for testing purposes. Can be removed in the future.
# TODO: Remove this when it is not needed anymore
xra.save_as(working_dir, 'master_image_from_angle')
xra.log_info('Linking asset from ' + asset_blend + ' with camera rotation ' + str(theta) + ', ' + str(phi) + ', ' + str(radius))
xra.append_collection(asset_blend, "master")
if ("master" not in bpy.data.collections):
# Exit if the collection couldn't be loaded
xra.quit_with_error("Unable to load master collection")
xra.log_info('Setting Render Engine to Cycles')
xra.set_renderer_to_cycles(64) #TODO: experiment with this number
xra.set_render_resolution(2048, 2048)
xra.log_info('Rendering device: ' + bpy.context.scene.cycles.device)
bpy.context.scene.render.film_transparent = True
# Don't render the backdrop
if "Backdrop" in bpy.data.objects:
bpy.data.objects["Backdrop"].cycles.is_holdout = True
# when camera is under, backface culling is used instead of holdout
if (phi > math.pi):
bpy.data.objects["Floor"].cycles.is_holdout = True
else:
bpy.data.objects["Floor"].cycles.is_holdout = False
class shotClass:
def __init__(self, name, x, z):
self.name = name
self.x = x
self.z = z
orbitX = str(math.floor(math.degrees(theta)))
orbitY = str(math.floor(math.degrees(phi)))
shot = shotClass(("-" + orbitX + "_" + orbitY), theta, phi)
# Camera
bpy.ops.object.camera_add()
bpy.context.scene.camera = bpy.context.active_object
# Join all objects in the master collection into a single object
# this is for camera scaling purposes
xra.join_collection_objects_into_one("master")
# Make sure that it is not hidden from the render (TODO: add to validation)
bpy.data.collections["master"].objects[0].hide_render = False
# Rotate the object and angle the camera (vertically only)
xra.rotate_object_and_angle_camera(
bpy.context.scene.camera,
bpy.data.collections["master"].objects[0],
shot.x,
shot.z
)
# Render Image
xra.log_info('Starting Render')
timer = time.time()
bpy.ops.render.render()
# Image Save Location
xra.log_info('Setting Image Save Location')
# TODO: pass the filename from rabbit
bpy.context.scene.render.filepath = working_dir + asset_name + shot.name + ".png"
bpy.context.scene.render.image_settings.file_format = "PNG"
xra.log_info(bpy.context.scene.render.filepath)
# Save Image
bpy.data.images["Render Result"].save_render(filepath=bpy.context.scene.render.filepath)
xra.log_info(shot.name + " Render Time: " + str(time.time() - timer) + " seconds")
if xra.record_asset_submission_render(submission_id, (asset_name + shot.name + ".png")) == False:
xra.log_error("Unable to record renders on 3xr.com")
# Save again with all of the changes
# TODO: remove this when no longer needed
xra.save_as(working_dir, 'master_image_from_angle')
| 2.046875
| 2
|
examples/add_vm_nic.py
|
oliel/python-ovirt-engine-sdk4
| 3
|
12785112
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import ovirtsdk4 as sdk
import ovirtsdk4.types as types
logging.basicConfig(level=logging.DEBUG, filename='example.log')
# This example will connect to the server and add a network interface
# card to an existing virtual machine.
# Create the connection to the server:
connection = sdk.Connection(
url='https://engine40.example.com/ovirt-engine/api',
username='admin@internal',
password='<PASSWORD>',
ca_file='ca.pem',
debug=True,
log=logging.getLogger(),
)
# Locate the virtual machines service and use it to find the virtual
# machine:
system_service = connection.system_service()
vms_service = system_service.vms_service()
vm = vms_service.list(search='name=myvm')[0]
# In order to specify the network that the new interface will be
# connected to we need to specify the identifier of the virtual network
# interface profile, so we need to find it. We can have duplicate names
# for vnic profiles in different clusters, so we must also find first the
# network by datacenter and cluster:
cluster = system_service.clusters_service().cluster_service(vm.cluster.id).get()
dcs_service = connection.system_service().data_centers_service()
dc = dcs_service.list(search='Clusters.name=%s' % cluster.name)[0]
networks_service = dcs_service.service(dc.id).networks_service()
network = next(
(n for n in networks_service.list()
if n.name == 'mynetwork'),
None
)
profiles_service = connection.system_service().vnic_profiles_service()
profile_id = None
for profile in profiles_service.list():
if profile.name == 'mynetwork':
profile_id = profile.id
break
# Locate the service that manages the network interface cards of the
# virtual machine:
nics_service = vms_service.vm_service(vm.id).nics_service()
# Use the "add" method of the network interface cards service to add the
# new network interface card:
nics_service.add(
types.Nic(
name='mynic',
description='My network interface card',
vnic_profile=types.VnicProfile(
id=profile_id,
),
),
)
# Close the connection to the server:
connection.close()
| 2.28125
| 2
|
python_packaging/sample_restraint/tests/test_binding.py
|
kassonlab/gmxapi
| 43
|
12785113
|
<gh_stars>10-100
# The myplugin module must be locatable by Python.
# If you configured CMake in the build directory ``/path/to/repo/build`` then,
# assuming you are in ``/path/to/repo``, run the tests with something like
# PYTHONPATH=./cmake-build-debug/src/pythonmodule mpiexec -n 2 python -m mpi4py -m pytest tests/
# This test is not currently run automatically in any way. Build the module, point your PYTHONPATH at it,
# and run pytest in the tests directory.
import logging
import os
import gmxapi as gmx
from gmxapi.simulation.context import Context
from gmxapi.simulation.workflow import WorkElement, from_tpr
from gmxapi import version as gmx_version
import pytest
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handler
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
logging.getLogger().addHandler(ch)
logger = logging.getLogger()
def test_import():
# Suppress inspection warning outside of testing context.
# noinspection PyUnresolvedReferences
import myplugin
assert myplugin
@pytest.mark.usefixtures("cleandir")
def test_ensemble_potential_nompi(spc_water_box, mdrun_kwargs):
"""Test ensemble potential without an ensemble.
"""
tpr_filename = spc_water_box
print("Testing plugin potential with input file {}".format(os.path.abspath(tpr_filename)))
assert gmx.version.api_is_at_least(0, 0, 5)
md = from_tpr([tpr_filename], append_output=False, **mdrun_kwargs)
# Create a WorkElement for the potential
params = {'sites': [1, 4],
'nbins': 10,
'binWidth': 0.1,
'min_dist': 0.,
'max_dist': 10.,
'experimental': [1.] * 10,
'nsamples': 1,
'sample_period': 0.001,
'nwindows': 4,
'k': 10000.,
'sigma': 1.}
potential = WorkElement(namespace="myplugin",
operation="ensemble_restraint",
params=params)
# Note that we could flexibly capture accessor methods as workflow elements, too. Maybe we can
# hide the extra Python bindings by letting myplugin.HarmonicRestraint automatically convert
# to a WorkElement when add_dependency is called on it.
potential.name = "ensemble_restraint"
md.add_dependency(potential)
context = Context(md)
with context as session:
session.run()
@pytest.mark.withmpi_only
@pytest.mark.usefixtures("cleandir")
def test_ensemble_potential_withmpi(spc_water_box, mdrun_kwargs):
tpr_filename = spc_water_box
logger.info("Testing plugin potential with input file {}".format(os.path.abspath(tpr_filename)))
assert gmx_version.api_is_at_least(0, 0, 5)
md = from_tpr([tpr_filename, tpr_filename], append_output=False, **mdrun_kwargs)
# Create a WorkElement for the potential
params = {'sites': [1, 4],
'nbins': 10,
'binWidth': 0.1,
'min_dist': 0.,
'max_dist': 10.,
'experimental': [0.5] * 10,
'nsamples': 1,
'sample_period': 0.001,
'nwindows': 4,
'k': 10000.,
'sigma': 1.}
potential = WorkElement(namespace="myplugin",
operation="ensemble_restraint",
params=params)
# Note that we could flexibly capture accessor methods as workflow elements, too. Maybe we can
# hide the extra Python bindings by letting myplugin.HarmonicRestraint automatically convert
# to a WorkElement when add_dependency is called on it.
potential.name = "ensemble_restraint"
md.add_dependency(potential)
context = Context(md)
with context as session:
session.run()
| 1.859375
| 2
|
tests/util/test_filetimes.py
|
dariusbakunas/rawdisk
| 3
|
12785114
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from rawdisk.util.filetimes import dt_to_filetime, filetime_to_dt, UTC, ZERO
from datetime import datetime
class TestFiletimesModule(unittest.TestCase):
def test_dt_to_filetime(self):
value = datetime(2009, 7, 25, 23, 0)
self.assertEqual(128930364000000000, dt_to_filetime(value))
def test_filetime_to_dt(self):
value = 116444736000000000
self.assertEqual(datetime(1970, 1, 1, 0, 0), filetime_to_dt(value))
def test_utc(self):
utc = UTC()
self.assertEqual(utc.tzname(None), "UTC")
self.assertEqual(utc.utcoffset(None), ZERO)
if __name__ == "__main__":
unittest.main()
| 2.8125
| 3
|
migrations/sqlite_versions/2020-05-15_ccc37f794db6_update_add_constraints.py
|
debrief/pepys-import
| 4
|
12785115
|
"""update, add constraints
Revision ID: ccc37f794db6
Revises: <PASSWORD>
Create Date: 2020-05-15 14:02:21.163220
"""
from datetime import datetime
from uuid import uuid4
from alembic import op
from geoalchemy2 import Geometry
from sqlalchemy import DATE, Boolean, Column, DateTime, ForeignKey, Integer, MetaData, String
from sqlalchemy.dialects.sqlite import REAL, TIMESTAMP
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import ( # used to defer fetching attributes unless it's specifically called
declarative_base,
declared_attr,
deferred,
relationship,
)
from sqlalchemy.sql.schema import CheckConstraint
from pepys_import.core.store import constants
from pepys_import.core.store.common_db import (
ActivationMixin,
CommentMixin,
ContactMixin,
DatafileMixin,
ElevationPropertyMixin,
GeometryMixin,
HostedByMixin,
LocationPropertyMixin,
LogMixin,
LogsHoldingMixin,
MediaMixin,
PlatformMixin,
ReferenceDefaultFields,
ReferenceRepr,
SensorMixin,
StateMixin,
TaggedItemMixin,
)
from pepys_import.core.store.db_base import sqlite_naming_convention
from pepys_import.core.store.db_status import TableTypes
from pepys_import.utils.sqlalchemy_utils import UUIDType
Metadata = MetaData(naming_convention=sqlite_naming_convention)
BaseSpatiaLite = declarative_base(metadata=Metadata)
class ClassificationType(BaseSpatiaLite):
__tablename__ = constants.CLASSIFICATION_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 19
class_type_id = Column(UUIDType, primary_key=True, default=uuid4)
class_type = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class ContactType(BaseSpatiaLite):
__tablename__ = constants.CONTACT_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 20
contact_type_id = Column(UUIDType, primary_key=True, default=uuid4)
contact_type = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class ConfidenceLevel(BaseSpatiaLite):
__tablename__ = constants.CONFIDENCE_LEVEL
table_type = TableTypes.REFERENCE
table_type_id = 27
confidence_level_id = Column(UUIDType, primary_key=True, default=uuid4)
level = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class Task(BaseSpatiaLite):
__tablename__ = constants.TASK
table_type = TableTypes.METADATA
table_type_id = 4
task_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False)
parent_id = Column(UUIDType, ForeignKey("Tasks.task_id"), nullable=False)
start = Column(TIMESTAMP, nullable=False)
end = Column(TIMESTAMP, nullable=False)
environment = deferred(Column(String(150)))
location = deferred(Column(String(150)))
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
class CommentType(BaseSpatiaLite):
__tablename__ = constants.COMMENT_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 25
comment_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class MediaType(BaseSpatiaLite):
__tablename__ = constants.MEDIA_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 24
media_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class Privacy(BaseSpatiaLite):
__tablename__ = constants.PRIVACY
table_type = TableTypes.REFERENCE
table_type_id = 22
privacy_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
level = Column(Integer, nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
class State(BaseSpatiaLite, StateMixin, ElevationPropertyMixin, LocationPropertyMixin):
__tablename__ = constants.STATE
table_type = TableTypes.MEASUREMENT
table_type_id = 28
state_id = Column(UUIDType, primary_key=True, default=uuid4)
time = Column(TIMESTAMP, nullable=False)
sensor_id = Column(UUIDType, ForeignKey("Sensors.sensor_id"), nullable=False)
_location = deferred(
Column(
"location",
Geometry(geometry_type="POINT", srid=4326, management=True, spatial_index=False),
)
)
_elevation = deferred(Column("elevation", REAL))
_heading = deferred(Column("heading", REAL))
_course = deferred(Column("course", REAL))
_speed = deferred(Column("speed", REAL))
source_id = Column(UUIDType, ForeignKey("Datafiles.datafile_id"), nullable=False)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"))
created_date = Column(DateTime, default=datetime.utcnow)
@declared_attr
def platform(self):
return relationship(
"Platform",
secondary=constants.SENSOR,
primaryjoin="State.sensor_id == Sensor.sensor_id",
secondaryjoin="Platform.platform_id == Sensor.host",
lazy="joined",
join_depth=1,
uselist=False,
viewonly=True,
)
@declared_attr
def platform_name(self):
return association_proxy("platform", "name")
class Change(BaseSpatiaLite):
__tablename__ = constants.CHANGE
table_type = TableTypes.METADATA
table_type_id = 8
change_id = Column(UUIDType, primary_key=True, default=uuid4)
user = Column(String(150), nullable=False)
modified = Column(DATE, nullable=False)
reason = Column(String(500), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
class Log(BaseSpatiaLite, LogMixin):
__tablename__ = constants.LOG
table_type = TableTypes.METADATA
table_type_id = 9
log_id = Column(UUIDType, primary_key=True, default=uuid4)
table = Column(String(150), nullable=False)
id = Column(UUIDType, nullable=False)
field = Column(String(150))
new_value = Column(String(150))
change_id = Column(UUIDType, ForeignKey("Changes.change_id"), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
class Tag(BaseSpatiaLite):
__tablename__ = constants.TAG
table_type = TableTypes.METADATA
table_type_id = 11
tag_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
class HostedBy(BaseSpatiaLite, HostedByMixin):
__tablename__ = constants.HOSTED_BY
table_type = TableTypes.METADATA
table_type_id = 1
hosted_by_id = Column(UUIDType, primary_key=True, default=uuid4)
subject_id = Column(UUIDType, ForeignKey("Platforms.platform_id"), nullable=False)
host_id = Column(UUIDType, ForeignKey("Platforms.platform_id"), nullable=False)
hosted_from = Column(DATE, nullable=False)
host_to = Column(DATE, nullable=False)
privacy_id = Column(Integer, ForeignKey("Privacies.privacy_id"), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
class Platform(BaseSpatiaLite, PlatformMixin):
__tablename__ = constants.PLATFORM
table_type = TableTypes.METADATA
table_type_id = 3
platform_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False)
pennant = deferred(Column(String(10), nullable=False))
trigraph = deferred(Column(String(3)))
quadgraph = deferred(Column(String(4)))
nationality_id = Column(UUIDType, ForeignKey("Nationalities.nationality_id"), nullable=False)
platform_type_id = Column(
UUIDType, ForeignKey("PlatformTypes.platform_type_id"), nullable=False
)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
class GeometrySubType(BaseSpatiaLite):
__tablename__ = constants.GEOMETRY_SUBTYPE
table_type = TableTypes.REFERENCE
table_type_id = 16
geo_sub_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
parent = Column(
UUIDType, ForeignKey("GeometryTypes.geo_type_id", onupdate="cascade"), nullable=False
)
created_date = Column(DateTime, default=datetime.utcnow)
class Serial(BaseSpatiaLite):
__tablename__ = constants.SERIAL
table_type = TableTypes.METADATA
table_type_id = 37
serial_id = Column(UUIDType, primary_key=True, default=uuid4)
wargame_id = Column(
UUIDType,
ForeignKey("Wargames.wargame_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
serial_number = Column(
String(150),
CheckConstraint("serial_number <> ''", name="ck_Serials_serial_number"),
nullable=False,
)
start = Column(TIMESTAMP, nullable=False)
end = Column(TIMESTAMP, nullable=False)
exercise = Column(String(150))
environment = deferred(Column(String(150)))
location = deferred(Column(String(150)))
privacy_id = Column(
UUIDType,
ForeignKey("Privacies.privacy_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
created_date = Column(DateTime, default=datetime.utcnow)
class Wargame(BaseSpatiaLite):
__tablename__ = constants.WARGAME
table_type = TableTypes.METADATA
table_type_id = 37
wargame_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150), CheckConstraint("name <> ''", name="ck_Wargames_name"), nullable=False
)
series_id = Column(
UUIDType,
ForeignKey("Series.series_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
start = Column(TIMESTAMP, nullable=False)
end = Column(TIMESTAMP, nullable=False)
privacy_id = Column(
UUIDType,
ForeignKey("Privacies.privacy_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
created_date = Column(DateTime, default=datetime.utcnow)
class Series(BaseSpatiaLite):
__tablename__ = constants.SERIES
table_type = TableTypes.METADATA
table_type_id = 36
series_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), CheckConstraint("name <> ''", name="ck_Series_name"), nullable=False)
privacy_id = Column(
UUIDType,
ForeignKey("Privacies.privacy_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
created_date = Column(DateTime, default=datetime.utcnow)
class WargameParticipant(BaseSpatiaLite):
__tablename__ = constants.WARGAME_PARTICIPANT
table_type = TableTypes.METADATA
table_type_id = 38
wargame_participant_id = Column(UUIDType, primary_key=True, default=uuid4)
wargame_id = Column(
UUIDType,
ForeignKey("Wargames.wargame_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
platform_id = Column(
UUIDType,
ForeignKey("Platforms.platform_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
privacy_id = Column(
UUIDType,
ForeignKey("Privacies.privacy_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
created_date = Column(DateTime, default=datetime.utcnow)
class SerialParticipant(BaseSpatiaLite):
__tablename__ = constants.SERIAL_PARTICIPANT
table_type = TableTypes.METADATA
table_type_id = 39
serial_participant_id = Column(UUIDType, primary_key=True, default=uuid4)
wargame_participant_id = Column(
UUIDType,
ForeignKey(
"WargameParticipants.wargame_participant_id", onupdate="cascade", ondelete="cascade"
),
nullable=False,
)
serial_id = Column(
UUIDType,
ForeignKey("Serials.serial_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
start = Column(TIMESTAMP)
end = Column(TIMESTAMP)
force_type_id = Column(
UUIDType,
ForeignKey("ForceTypes.force_type_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
privacy_id = Column(
UUIDType,
ForeignKey("Privacies.privacy_id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
created_date = Column(DateTime, default=datetime.utcnow)
# Reference Tables
class ForceType(BaseSpatiaLite, ReferenceRepr, ReferenceDefaultFields):
__tablename__ = constants.FORCE_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 40
force_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
CheckConstraint("name <> ''", name="ck_ForceTypes_name"),
nullable=False,
unique=True,
)
color = Column(String(10))
created_date = Column(DateTime, default=datetime.utcnow)
class Geometry1(BaseSpatiaLite, GeometryMixin):
__tablename__ = constants.GEOMETRY
table_type = TableTypes.MEASUREMENT
table_type_id = 33
geometry_id = Column(UUIDType, primary_key=True, default=uuid4)
geometry = deferred(
Column(
Geometry(geometry_type="GEOMETRY", management=True, spatial_index=False), nullable=False
)
)
name = Column(String(150), nullable=False)
geo_type_id = Column(UUIDType, ForeignKey("GeometryTypes.geo_type_id"), nullable=False)
geo_sub_type_id = Column(
UUIDType, ForeignKey("GeometrySubTypes.geo_sub_type_id"), nullable=False
)
start = Column(TIMESTAMP)
end = Column(TIMESTAMP)
serial_id = Column(UUIDType, ForeignKey("Serials.serial_id"))
subject_platform_id = Column(UUIDType, ForeignKey("Platforms.platform_id"))
sensor_platform_id = Column(UUIDType, ForeignKey("Platforms.platform_id"))
source_id = Column(UUIDType, ForeignKey("Datafiles.datafile_id"), nullable=False)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"))
created_date = Column(DateTime, default=datetime.utcnow)
class GeometryType(BaseSpatiaLite):
__tablename__ = constants.GEOMETRY_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 15
geo_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class PlatformType(BaseSpatiaLite):
__tablename__ = constants.PLATFORM_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 13
platform_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class Participant(BaseSpatiaLite):
__tablename__ = constants.PARTICIPANT
table_type = TableTypes.METADATA
table_type_id = 5
participant_id = Column(UUIDType, primary_key=True, default=uuid4)
platform_id = Column(UUIDType, ForeignKey("Platforms.platform_id"), nullable=False)
task_id = Column(UUIDType, ForeignKey("Tasks.task_id"), nullable=False)
start = Column(TIMESTAMP)
end = Column(TIMESTAMP)
force = Column(String(150))
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
class TaggedItem(BaseSpatiaLite, TaggedItemMixin):
__tablename__ = constants.TAGGED_ITEM
table_type = TableTypes.METADATA
table_type_id = 12
tagged_item_id = Column(UUIDType, primary_key=True, default=uuid4)
tag_id = Column(UUIDType, ForeignKey("Tags.tag_id"), nullable=False)
item_id = Column(UUIDType, nullable=False)
tagged_by_id = Column(UUIDType, ForeignKey("Users.user_id"), nullable=False)
private = Column(Boolean, nullable=False)
tagged_on = Column(DATE, nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
class SensorType(BaseSpatiaLite):
__tablename__ = constants.SENSOR_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 21
sensor_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class DatafileType(BaseSpatiaLite):
__tablename__ = constants.DATAFILE_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 23
datafile_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class User(BaseSpatiaLite):
__tablename__ = constants.USER
table_type = TableTypes.REFERENCE
table_type_id = 17
user_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class Activation(BaseSpatiaLite, ActivationMixin):
__tablename__ = constants.ACTIVATION
table_type = TableTypes.MEASUREMENT
table_type_id = 30
activation_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False)
sensor_id = Column(UUIDType, ForeignKey("Sensors.sensor_id"), nullable=False)
start = deferred(Column(TIMESTAMP, nullable=False))
end = deferred(Column(TIMESTAMP, nullable=False))
_min_range = deferred(Column("min_range", REAL))
_max_range = deferred(Column("max_range", REAL))
_left_arc = deferred(Column("left_arc", REAL))
_right_arc = deferred(Column("right_arc", REAL))
source_id = Column(UUIDType, ForeignKey("Datafiles.datafile_id"), nullable=False)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"))
created_date = Column(DateTime, default=datetime.utcnow)
class Comment(BaseSpatiaLite, CommentMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sensor_name = "N/A"
__tablename__ = constants.COMMENT
table_type = TableTypes.MEASUREMENT
table_type_id = 32
comment_id = Column(UUIDType, primary_key=True, default=uuid4)
platform_id = Column(UUIDType, ForeignKey("Platforms.platform_id"))
time = Column(TIMESTAMP, nullable=False)
comment_type_id = Column(UUIDType, ForeignKey("CommentTypes.comment_type_id"), nullable=False)
content = Column(String(150), nullable=False)
source_id = Column(UUIDType, ForeignKey("Datafiles.datafile_id"), nullable=False)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"))
created_date = Column(DateTime, default=datetime.utcnow)
class Contact(BaseSpatiaLite, ContactMixin, LocationPropertyMixin, ElevationPropertyMixin):
__tablename__ = constants.CONTACT
table_type = TableTypes.MEASUREMENT
table_type_id = 29
contact_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150))
sensor_id = Column(UUIDType, ForeignKey("Sensors.sensor_id"), nullable=False)
time = Column(TIMESTAMP, nullable=False)
_bearing = deferred(Column("bearing", REAL))
_rel_bearing = deferred(Column("rel_bearing", REAL))
_ambig_bearing = deferred(Column("ambig_bearing", REAL))
_freq = deferred(Column("freq", REAL))
_range = deferred(Column("range", REAL))
_location = deferred(
Column(
"location",
Geometry(geometry_type="POINT", srid=4326, management=True, spatial_index=False),
)
)
_elevation = deferred(Column("elevation", REAL))
_major = deferred(Column("major", REAL))
_minor = deferred(Column("minor", REAL))
_orientation = deferred(Column("orientation", REAL))
classification = deferred(Column(UUIDType, ForeignKey("ClassificationTypes.class_type_id")))
confidence = deferred(Column(UUIDType, ForeignKey("ConfidenceLevels.confidence_level_id")))
contact_type = deferred(Column(UUIDType, ForeignKey("ContactTypes.contact_type_id")))
_mla = deferred(Column("mla", REAL))
_soa = deferred(Column("soa", REAL))
subject_id = Column(UUIDType, ForeignKey("Platforms.platform_id"))
source_id = Column(UUIDType, ForeignKey("Datafiles.datafile_id"), nullable=False)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"))
created_date = deferred(Column(DateTime, default=datetime.utcnow))
@declared_attr
def platform(self):
return relationship(
"Platform",
secondary=constants.SENSOR,
primaryjoin="Contact.sensor_id == Sensor.sensor_id",
secondaryjoin="Platform.platform_id == Sensor.host",
lazy="joined",
join_depth=1,
uselist=False,
viewonly=True,
)
@declared_attr
def platform_name(self):
return association_proxy("platform", "name")
class Sensor(BaseSpatiaLite, SensorMixin):
__tablename__ = constants.SENSOR
table_type = TableTypes.METADATA
table_type_id = 2
sensor_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False)
sensor_type_id = Column(UUIDType, ForeignKey("SensorTypes.sensor_type_id"), nullable=False)
host = Column(UUIDType, ForeignKey("Platforms.platform_id"), nullable=False)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
class Datafile(BaseSpatiaLite, DatafileMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.measurements = dict()
__tablename__ = constants.DATAFILE
table_type = TableTypes.METADATA
table_type_id = 6
datafile_id = Column(UUIDType, primary_key=True, default=uuid4)
simulated = deferred(Column(Boolean, nullable=False))
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"), nullable=False)
datafile_type_id = Column(
UUIDType, ForeignKey("DatafileTypes.datafile_type_id"), nullable=False
)
reference = Column(String(150))
url = Column(String(150))
size = deferred(Column(Integer, nullable=False))
hash = deferred(Column(String(32), nullable=False))
created_date = Column(DateTime, default=datetime.utcnow)
class UnitType(BaseSpatiaLite):
__tablename__ = constants.UNIT_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 18
unit_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class CommodityType(BaseSpatiaLite):
__tablename__ = constants.COMMODITY_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 26
commodity_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
class Media(BaseSpatiaLite, MediaMixin, ElevationPropertyMixin, LocationPropertyMixin):
__tablename__ = constants.MEDIA
table_type = TableTypes.MEASUREMENT
table_type_id = 34
media_id = Column(UUIDType, primary_key=True, default=uuid4)
platform_id = Column(UUIDType, ForeignKey("Platforms.platform_id"))
subject_id = Column(UUIDType, ForeignKey("Platforms.platform_id"))
sensor_id = Column(UUIDType, ForeignKey("Sensors.sensor_id"))
_location = deferred(
Column(
"location",
Geometry(geometry_type="POINT", srid=4326, management=True, spatial_index=False),
)
)
_elevation = deferred(Column("elevation", REAL))
time = Column(TIMESTAMP)
media_type_id = Column(UUIDType, ForeignKey("MediaTypes.media_type_id"), nullable=False)
url = deferred(Column(String(150), nullable=False))
source_id = Column(UUIDType, ForeignKey("Datafiles.datafile_id"), nullable=False)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"))
created_date = Column(DateTime, default=datetime.utcnow)
class LogsHolding(BaseSpatiaLite, LogsHoldingMixin):
__tablename__ = constants.LOGS_HOLDING
table_type = TableTypes.MEASUREMENT
table_type_id = 31
logs_holding_id = Column(UUIDType, primary_key=True, default=uuid4)
time = Column(TIMESTAMP, nullable=False)
commodity_id = Column(UUIDType, ForeignKey("CommodityTypes.commodity_type_id"), nullable=False)
quantity = Column(REAL, nullable=False)
unit_type_id = Column(UUIDType, ForeignKey("UnitTypes.unit_type_id"), nullable=False)
platform_id = Column(UUIDType, ForeignKey("Platforms.platform_id"), nullable=False)
comment = Column(String(150), nullable=False)
source_id = Column(UUIDType, ForeignKey("Datafiles.datafile_id"), nullable=False)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id"))
created_date = Column(DateTime, default=datetime.utcnow)
class Nationality(BaseSpatiaLite):
__tablename__ = constants.NATIONALITY
table_type = TableTypes.REFERENCE
table_type_id = 14
nationality_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False, unique=True)
created_date = Column(DateTime, default=datetime.utcnow)
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("Activations", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Activations_source_id_Datafiles"),
"Datafiles",
["source_id"],
["datafile_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Activations_sensor_id_Sensors"), "Sensors", ["sensor_id"], ["sensor_id"]
)
batch_op.create_foreign_key(
batch_op.f("fk_Activations_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
with op.batch_alter_table("Comments", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Comments_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Comments_platform_id_Platforms"),
"Platforms",
["platform_id"],
["platform_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Comments_source_id_Datafiles"),
"Datafiles",
["source_id"],
["datafile_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Comments_comment_type_id_CommentTypes"),
"CommentTypes",
["comment_type_id"],
["comment_type_id"],
)
with op.batch_alter_table("Contacts", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Contacts_sensor_id_Sensors"), "Sensors", ["sensor_id"], ["sensor_id"]
)
batch_op.create_foreign_key(
batch_op.f("fk_Contacts_source_id_Datafiles"),
"Datafiles",
["source_id"],
["datafile_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Contacts_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Contacts_subject_id_Platforms"),
"Platforms",
["subject_id"],
["platform_id"],
)
with op.batch_alter_table("Datafiles", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Datafiles_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Datafiles_datafile_type_id_DatafileTypes"),
"DatafileTypes",
["datafile_type_id"],
["datafile_type_id"],
)
with op.batch_alter_table("Geometries", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Geometries_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Geometries_task_id_Tasks"), "Tasks", ["task_id"], ["task_id"]
)
batch_op.create_foreign_key(
batch_op.f("fk_Geometries_subject_platform_id_Platforms"),
"Platforms",
["subject_platform_id"],
["platform_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Geometries_geo_sub_type_id_GeometrySubTypes"),
"GeometrySubTypes",
["geo_sub_type_id"],
["geo_sub_type_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Geometries_source_id_Datafiles"),
"Datafiles",
["source_id"],
["datafile_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Geometries_geo_type_id_GeometryTypes"),
"GeometryTypes",
["geo_type_id"],
["geo_type_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Geometries_sensor_platform_id_Platforms"),
"Platforms",
["sensor_platform_id"],
["platform_id"],
)
with op.batch_alter_table("HostedBy", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_HostedBy_subject_id_Platforms"),
"Platforms",
["subject_id"],
["platform_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_HostedBy_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_HostedBy_host_id_Platforms"), "Platforms", ["host_id"], ["platform_id"]
)
with op.batch_alter_table("Logs", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Logs_change_id_Changes"), "Changes", ["change_id"], ["change_id"]
)
with op.batch_alter_table("LogsHoldings", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_LogsHoldings_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_LogsHoldings_platform_id_Platforms"),
"Platforms",
["platform_id"],
["platform_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_LogsHoldings_source_id_Datafiles"),
"Datafiles",
["source_id"],
["datafile_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_LogsHoldings_unit_type_id_UnitTypes"),
"UnitTypes",
["unit_type_id"],
["unit_type_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_LogsHoldings_commodity_id_CommodityTypes"),
"CommodityTypes",
["commodity_id"],
["commodity_type_id"],
)
with op.batch_alter_table("Media", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Media_source_id_Datafiles"), "Datafiles", ["source_id"], ["datafile_id"]
)
batch_op.create_foreign_key(
batch_op.f("fk_Media_sensor_id_Sensors"), "Sensors", ["sensor_id"], ["sensor_id"]
)
batch_op.create_foreign_key(
batch_op.f("fk_Media_privacy_id_Privacies"), "Privacies", ["privacy_id"], ["privacy_id"]
)
batch_op.create_foreign_key(
batch_op.f("fk_Media_media_type_id_MediaTypes"),
"MediaTypes",
["media_type_id"],
["media_type_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Media_subject_id_Platforms"),
"Platforms",
["subject_id"],
["platform_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Media_platform_id_Platforms"),
"Platforms",
["platform_id"],
["platform_id"],
)
with op.batch_alter_table("Participants", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Participants_platform_id_Platforms"),
"Platforms",
["platform_id"],
["platform_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Participants_task_id_Tasks"), "Tasks", ["task_id"], ["task_id"]
)
batch_op.create_foreign_key(
batch_op.f("fk_Participants_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
with op.batch_alter_table("Platforms", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Platforms_nationality_id_Nationalities"),
"Nationalities",
["nationality_id"],
["nationality_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Platforms_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Platforms_platform_type_id_PlatformTypes"),
"PlatformTypes",
["platform_type_id"],
["platform_type_id"],
)
with op.batch_alter_table("Sensors", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Sensors_sensor_type_id_SensorTypes"),
"SensorTypes",
["sensor_type_id"],
["sensor_type_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Sensors_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_Sensors_host_Platforms"), "Platforms", ["host"], ["platform_id"]
)
with op.batch_alter_table("States", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_States_sensor_id_Sensors"), "Sensors", ["sensor_id"], ["sensor_id"]
)
batch_op.create_foreign_key(
batch_op.f("fk_States_privacy_id_Privacies"),
"Privacies",
["privacy_id"],
["privacy_id"],
)
batch_op.create_foreign_key(
batch_op.f("fk_States_source_id_Datafiles"), "Datafiles", ["source_id"], ["datafile_id"]
)
with op.batch_alter_table("TaggedItems", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_TaggedItems_tag_id_Tags"), "Tags", ["tag_id"], ["tag_id"]
)
batch_op.create_foreign_key(
batch_op.f("fk_TaggedItems_tagged_by_id_Users"), "Users", ["tagged_by_id"], ["user_id"]
)
with op.batch_alter_table("Tasks", schema=None) as batch_op:
batch_op.create_foreign_key(
batch_op.f("fk_Tasks_privacy_id_Privacies"), "Privacies", ["privacy_id"], ["privacy_id"]
)
batch_op.create_foreign_key(
batch_op.f("fk_Tasks_parent_id_Tasks"), "Tasks", ["parent_id"], ["task_id"]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("Tasks", schema=None, copy_from=Task.__table__) as batch_op:
batch_op.drop_constraint(batch_op.f("fk_Tasks_parent_id_Tasks"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_Tasks_privacy_id_Privacies"), type_="foreignkey")
with op.batch_alter_table(
"TaggedItems", schema=None, copy_from=TaggedItem.__table__
) as batch_op:
batch_op.drop_constraint(
batch_op.f("fk_TaggedItems_tagged_by_id_Users"), type_="foreignkey"
)
batch_op.drop_constraint(batch_op.f("fk_TaggedItems_tag_id_Tags"), type_="foreignkey")
with op.batch_alter_table("States", schema=None, copy_from=State.__table__) as batch_op:
batch_op.drop_constraint(batch_op.f("fk_States_source_id_Datafiles"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_States_privacy_id_Privacies"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_States_sensor_id_Sensors"), type_="foreignkey")
with op.batch_alter_table("Sensors", schema=None, copy_from=Sensor.__table__) as batch_op:
batch_op.drop_constraint(batch_op.f("fk_Sensors_host_Platforms"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_Sensors_privacy_id_Privacies"), type_="foreignkey")
batch_op.drop_constraint(
batch_op.f("fk_Sensors_sensor_type_id_SensorTypes"), type_="foreignkey"
)
with op.batch_alter_table("Platforms", schema=None, copy_from=Platform.__table__) as batch_op:
batch_op.drop_constraint(
batch_op.f("fk_Platforms_platform_type_id_PlatformTypes"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_Platforms_privacy_id_Privacies"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_Platforms_nationality_id_Nationalities"), type_="foreignkey"
)
with op.batch_alter_table(
"Participants", schema=None, copy_from=Participant.__table__
) as batch_op:
batch_op.drop_constraint(
batch_op.f("fk_Participants_privacy_id_Privacies"), type_="foreignkey"
)
batch_op.drop_constraint(batch_op.f("fk_Participants_task_id_Tasks"), type_="foreignkey")
batch_op.drop_constraint(
batch_op.f("fk_Participants_platform_id_Platforms"), type_="foreignkey"
)
with op.batch_alter_table("Media", schema=None, copy_from=Media.__table__) as batch_op:
batch_op.drop_constraint(batch_op.f("fk_Media_platform_id_Platforms"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_Media_subject_id_Platforms"), type_="foreignkey")
batch_op.drop_constraint(
batch_op.f("fk_Media_media_type_id_MediaTypes"), type_="foreignkey"
)
batch_op.drop_constraint(batch_op.f("fk_Media_privacy_id_Privacies"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_Media_sensor_id_Sensors"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_Media_source_id_Datafiles"), type_="foreignkey")
with op.batch_alter_table(
"LogsHoldings", schema=None, copy_from=LogsHolding.__table__
) as batch_op:
batch_op.drop_constraint(
batch_op.f("fk_LogsHoldings_commodity_id_CommodityTypes"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_LogsHoldings_unit_type_id_UnitTypes"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_LogsHoldings_source_id_Datafiles"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_LogsHoldings_platform_id_Platforms"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_LogsHoldings_privacy_id_Privacies"), type_="foreignkey"
)
with op.batch_alter_table("Logs", schema=None, copy_from=Log.__table__) as batch_op:
batch_op.drop_constraint(batch_op.f("fk_Logs_change_id_Changes"), type_="foreignkey")
with op.batch_alter_table("HostedBy", schema=None, copy_from=HostedBy.__table__) as batch_op:
batch_op.drop_constraint(batch_op.f("fk_HostedBy_host_id_Platforms"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_HostedBy_privacy_id_Privacies"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_HostedBy_subject_id_Platforms"), type_="foreignkey")
with op.batch_alter_table("Geometries", schema=None, copy_from=Geometry1.__table__) as batch_op:
batch_op.drop_constraint(
batch_op.f("fk_Geometries_sensor_platform_id_Platforms"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_Geometries_geo_type_id_GeometryTypes"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_Geometries_source_id_Datafiles"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_Geometries_geo_sub_type_id_GeometrySubTypes"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_Geometries_subject_platform_id_Platforms"), type_="foreignkey"
)
batch_op.drop_constraint(batch_op.f("fk_Geometries_task_id_Tasks"), type_="foreignkey")
batch_op.drop_constraint(
batch_op.f("fk_Geometries_privacy_id_Privacies"), type_="foreignkey"
)
with op.batch_alter_table("Datafiles", schema=None, copy_from=Datafile.__table__) as batch_op:
batch_op.drop_constraint(
batch_op.f("fk_Datafiles_datafile_type_id_DatafileTypes"), type_="foreignkey"
)
batch_op.drop_constraint(
batch_op.f("fk_Datafiles_privacy_id_Privacies"), type_="foreignkey"
)
with op.batch_alter_table("Contacts", schema=None, copy_from=Contact.__table__) as batch_op:
batch_op.drop_constraint(batch_op.f("fk_Contacts_subject_id_Platforms"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_Contacts_privacy_id_Privacies"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_Contacts_source_id_Datafiles"), type_="foreignkey")
batch_op.drop_constraint(batch_op.f("fk_Contacts_sensor_id_Sensors"), type_="foreignkey")
with op.batch_alter_table("Comments", schema=None, copy_from=Comment.__table__) as batch_op:
batch_op.drop_constraint(
batch_op.f("fk_Comments_comment_type_id_CommentTypes"), type_="foreignkey"
)
batch_op.drop_constraint(batch_op.f("fk_Comments_source_id_Datafiles"), type_="foreignkey")
batch_op.drop_constraint(
batch_op.f("fk_Comments_platform_id_Platforms"), type_="foreignkey"
)
batch_op.drop_constraint(batch_op.f("fk_Comments_privacy_id_Privacies"), type_="foreignkey")
with op.batch_alter_table(
"Activations", schema=None, copy_from=Activation.__table__
) as batch_op:
batch_op.drop_constraint(
batch_op.f("fk_Activations_privacy_id_Privacies"), type_="foreignkey"
)
batch_op.drop_constraint(batch_op.f("fk_Activations_sensor_id_Sensors"), type_="foreignkey")
batch_op.drop_constraint(
batch_op.f("fk_Activations_source_id_Datafiles"), type_="foreignkey"
)
# ### end Alembic commands ###
| 1.789063
| 2
|
predictatops/checkdata_runner.py
|
bluetyson/predictatops
| 0
|
12785116
|
# -*- coding: utf-8 -*-
##### import from other modules
from checkdata import *
from configurationplusfiles_runner import input_data_inst, config, output_data_inst
##### running functions called from elsewhere #####
tops = TopsAvailable(input_data_inst, config)
print("finding unique tops:")
tops.find_unique_tops_list()
#### Take out wells with no tops, this assumes some data structures that might not exist in your data, so check code!
tops.take_out_wells_with_no_tops()
tops_counts = tops.get_df_of_top_counts_in_picks_df()
print("tops_counts = ", tops_counts)
print("number of wells with any tops:")
tops.get_number_wells_with_any_top()
#### Will print: returning list of wells names that have the required tops. The length of list is : ### If this number is too small, consider changing the required tops in the configuration object.
test = tops.findWellsWithAllTopsGive()
##### Example use pattern if you just want to initiate Class and run all the functions using variables defined in configruation object
#### This just creates a class instance and then calls run_all()
new_tops2 = TopsAvailable(input_data_inst, config)
wells_with_required_tops = new_tops2.run_all()
print("first well that meets requirements:", wells_with_required_tops[0])
print("number of wells that meet requirements so far:", len(wells_with_required_tops))
print("configuration variables so far, gotten by printing vars(config):", vars(config))
#### Find & understand available curves
curvesInst2 = CurvesAvailable(input_data_inst, config)
curves_results = curvesInst2.run_all()
curves_results.keys()
print(
"curves_results['wellsWithWantedCurves'][0:5]",
curves_results["wellsWithWantedCurves"][0:5],
)
print("len(curves_results['wellsWithWantedCurves'])")
len(curves_results["wellsWithWantedCurves"])
print("vars(curvesInst2).keys()", vars(curvesInst2).keys())
curvesInst2.config.threshold_returnCurvesThatArePresentInThisManyWells = 1916
onlyPlentifulCurvesArray = curvesInst2.getCurvesInMinNumberOfWells()
onlyPlentifulCurvesArray
wells_with_tops_and_curves = list(
set(wells_with_required_tops).intersection(curves_results["wellsWithWantedCurves"])
)
print("len(wells_with_tops_and_curves)", len(wells_with_tops_and_curves))
objectOfCurves = curves_results["objectOfCurves"]
wellsWithNeededCurvesList = findWellsWithCertainCurves(
objectOfCurves, onlyPlentifulCurvesArray
)
print("number of wells with all the required curves is", len(wellsWithNeededCurvesList))
#### NOTE! when we import the wells for real, we should add in the wells that have DEPTH instead of DEPT and rename the curve to DEPT!¶
print(onlyPlentifulCurvesArray)
newCurveList = getCurvesListWithDifferentCurveName(
onlyPlentifulCurvesArray, "DEPT", "DEPTH"
)
print("newCurveList", newCurveList)
wellsWithNeededCurvesListButDEPTHinsteadDEPT = findWellsWithCertainCurves(
objectOfCurves, newCurveList
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
#### Hmmm, zero? Let's see if we can get those 7 wells that we know have DEPTH instead of DEPT to appear if we reduce the other curve names?
wellsWithNeededCurvesListButDEPTHinsteadDEPT0 = findWellsWithCertainCurves(
objectOfCurves, ["GR", "DEPTH"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT1 = findWellsWithCertainCurves(
objectOfCurves, ["GR", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT2 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "NPHI", "GR", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT3 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "GR", "DPHI", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT4 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "GR", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT5 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "GR", "DEPTH"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT6 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "NPHI", "GR", "DPHI", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT7 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "NPHI", "GR", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
#### final try
print("final version:")
wellsWithNeededCurvesList_real = findWellsWithCertainCurves(
objectOfCurves, config.must_have_curves_list
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesList_real),
)
print(
"wellsWithNeededCurvesList_real, first 3 wells:",
wellsWithNeededCurvesList_real[0:3],
)
#### Make list of wells that includes both the minimum required curves & minimum required tops
#### These two lists are different. One is SITEID the other is LAS file name. We'll convert them in the function below and find the ones in common and returnt that as a new list of wells.
# WellsWithGivenTopsCurves = findWellsWithGivenTopsCurves(input_data_inst.wells_df,wells_with_required_tops,wellsWithNeededCurvesList_real)
# print("len(WellsWithGivenTopsCurves)",len(WellsWithGivenTopsCurves))
wells_with_required_tops_and_curves_list = list(
set(wells_with_required_tops).intersection(wellsWithNeededCurvesList_real)
)
print("length wells_test", len(wells_with_required_tops_and_curves_list))
print("wells_test = ", wells_with_required_tops_and_curves_list)
# print("wells with needed curves list real",wellsWithNeededCurvesList_real)
# print("wells wells_with_required_tops",wells_with_required_tops)
#### NOW LETS SAVE RESULTS
print(
"type of wells_with_required_tops_and_curves_list",
type(wells_with_required_tops_and_curves_list),
)
wells_with_required_tops_and_curves_list_df = pd.DataFrame(
np.array(wells_with_required_tops_and_curves_list), columns=["wells"]
)
print("type", type(wells_with_required_tops_and_curves_list_df))
checkdata_path_results = (
output_data_inst.base_path_for_all_results
+ "/"
+ output_data_inst.path_checkData
+ "/"
+ "wellnames_with_required_tops_and_curves_list.h5"
)
print("will now save results in hdf5 file in:", checkdata_path_results)
key_for_file_path_for_results = "wellsWTopsCurves"
print("key for hdf file is", key_for_file_path_for_results)
wells_with_required_tops_and_curves_list_df.to_hdf(
checkdata_path_results, key=key_for_file_path_for_results, mode="w"
)
| 3.03125
| 3
|
remote/python/src/main.py
|
magnusoy/Sparkie
| 15
|
12785117
|
<filename>remote/python/src/main.py
# #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module ...
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Sparkie Quadruped Robot"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__version__ = "1.0.0"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
"""
# Import packages
import cv2
import os
import time
# Importing model and utility
from object_detection.model import ObjectDetector
from communication.server import Server
# Change working directory to get the inference graph and labelmap
os.chdir('C:\\Users\\Petter\\Documents\\Sparkie\\resources')
# Get file paths to frozen inference graph and labelmap
CWD_PATH = os.getcwd()
PATH_TO_CKPT = os.path.join(CWD_PATH, 'model', 'frozen_inference_graph.pb')
PATH_TO_LABELS = os.path.join(CWD_PATH, 'model', 'labelmap.pbtxt')
# Change working directory back to source
os.chdir('C:\\Users\\Petter\\Documents\\Sparkie\\remote\\python\\src')
# Initialize object detector model
object_detector = ObjectDetector(PATH_TO_CKPT, PATH_TO_LABELS)
object_detector.initialize()
# Creates a TCP Server
server = Server(host="0.0.0.0", port=8089)
client_connected = True
# Collects frames recieved from client on server
# Computes the Object detection
# and stores them in file.
if __name__ == "__main__":
while True:
while not client_connected:
client_connected = server.wait_for_connection()
frame = server.get_frame()
client_connected = False
object_detector.run(frame, debug=False)
| 2.09375
| 2
|
AnEnsembleForPointEstimate/PredictWindSpd.py
|
AmateurZhang/EnjoyWithDataOnPowerSystems
| 0
|
12785118
|
<reponame>AmateurZhang/EnjoyWithDataOnPowerSystems
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 2 22:39:11 2017
@author: thuzhang
"""
#Predict WindSpeed(WINDSPD)
#packages
import numpy as np
import pandas as pd
import sklearn.svm as svm
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
def PredictWindSpd(NumberOfDays=2):
Data=[]
OriginData=pd.read_table(r'FormerDataModel.csv',sep=",",header=None)
Data=OriginData.as_matrix().astype(np.float32)
#Get the properties and the target
Y=Data[:,1].astype(np.float32)
X=Data[:,2:2+3*NumberOfDays]
#Machine Learning model builds up
#X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
Length=len(Y)-1
method=GradientBoostingRegressor()
method.fit(X[0:Length,:],Y[0:Length].ravel())
DataForecast=[]
OriginDataFC=pd.read_table(r'FormerDataForecast.csv',sep=",",header=None)
DataForecast=OriginDataFC.as_matrix().astype(np.float32)
X_Pre=DataForecast[(len(DataForecast)-25):(len(DataForecast)-1),2:2+3*NumberOfDays]
Y_Pre=method.predict(X_Pre)
print(Y_Pre)
return Y_Pre
#y_pre=method.predict(X[Length])
#y_all=Y[Length]
#print(y_pre[0])
#print(y_all)
#Result=[]
#Result.append(y_pre)
#Result.append(y_all)
#Score=metrics.accuracy_score(y_pre,Y[Length:])
#print(Score)
#plt.figure()
#set the size of subplots
#left,width=0.1,2.5
#bottom,height=0.11,1
#bottom_h=bottom+height+0.04
#rect_line1=[left,bottom,width,height]
#axs=plt.axes(rect_line1)
#plot1=axs.plot(y_pre[0:LengthOfTime],'-ob')
#plot2=axs.plot(y_all[0:LengthOfTime],'-og',ms=1)
#plt.show()
#Output
#Save=pd.DataFrame(y_all.astype(float))
#Save.to_csv(r'PredictWindSpdData.csv',header=None,index=None)
#PredictWindSpd(1)
| 3.140625
| 3
|
wca/components.py
|
Rajpratik71/workload-collocation-agent
| 40
|
12785119
|
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
try:
import pkg_resources
except ImportError:
# When running from pex use vendored library from pex.
from pex.vendor._vendored.setuptools import pkg_resources
from wca.runners import detection
from wca.runners import allocation
from wca.runners import measurement
from wca.extra import static_allocator
from wca import config
from wca import detectors
from wca import allocators
from wca import mesos
from wca import kubernetes
from wca import storage
from wca.extra import static_node
from wca.extra import numa_allocator
from wca import security
REGISTERED_COMPONENTS = [
measurement.MeasurementRunner,
allocation.AllocationRunner,
detection.DetectionRunner,
mesos.MesosNode,
kubernetes.KubernetesNode,
storage.LogStorage,
storage.KafkaStorage,
storage.FilterStorage,
detectors.NOPAnomalyDetector,
allocators.NOPAllocator,
allocators.AllocationConfiguration,
kubernetes.CgroupDriverType,
static_node.StaticNode,
numa_allocator.NUMAAllocator,
static_allocator.StaticAllocator,
security.SSL,
measurement.TaskLabelRegexGenerator,
]
def register_components(extra_components: List[str]):
for component in REGISTERED_COMPONENTS:
config.register(component)
for component in extra_components:
# Load external class ignored its requirements.
ep = pkg_resources.EntryPoint.parse('external_cls=%s' % component)
cls = ep.resolve()
config.register(cls)
| 1.703125
| 2
|
tensorflow_graphics/datasets/features/camera_feature_test.py
|
Liang813/graphics
| 2,759
|
12785120
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for tensorflow_graphics.datasets.features.camera_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_graphics.datasets.features import camera_feature
class CameraFeatureTest(tfds.testing.FeatureExpectationsTestCase):
"""Test Cases for Camera FeatureConnector."""
def __get_camera_params(self):
pose = {'R': np.eye(3).astype(np.float32),
't': np.zeros(3).astype(np.float32)}
f = 35.
optical_center = (640 / 2, 480 / 2)
return pose, f, optical_center
def test_simple_camera(self):
"""Tests camera parameters with fixed focal length, no skew and no aspect ratio."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_intrinsics = np.asarray([[expected_f, 0, expected_center[0]],
[0, expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f, 'optical_center': expected_center,
'pose': expected_pose}
lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'look_at': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'position': np.array([0, 0, 0], dtype=np.float32)
}
}
raising_pose_entry = {
'f': expected_f,
'optical_center': expected_center,
'pose': np.eye(4)
}
raising_pose_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {'rot': np.eye(3), 'trans': np.zeros(3)}
}
raising_lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'l': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'C': np.array([0, 0, 0], dtype=np.float32)
}
}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=lookat_inputs,
expected=expected_camera
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_lookat_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_entry,
raise_cls=ValueError,
raise_msg='Pose needs to be a dictionary'
),
],
)
def test_camera_with_aspect_ratio_and_skew(self):
"""Tests camera parameters with fixed focal length, aspect_ratio and skew."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_aspect_ratio = expected_center[0] / expected_center[1]
expected_skew = 0.6
expected_intrinsics = np.asarray(
[[expected_f, expected_skew, expected_center[0]],
[0, expected_aspect_ratio * expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_center,
'skew': expected_skew,
'aspect_ratio': expected_aspect_ratio,
'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
],
)
def test_full_camera_calibration_matrix(self):
"""Tests camera parameters with different focal length per camera axis and skew."""
expected_pose, _, expected_optical_center = self.__get_camera_params()
expected_skew = 0.6
expected_f = (35., 40.)
expected_intrinsics = np.array(
[[expected_f[0], expected_skew, expected_optical_center[0]],
[0, expected_f[1], expected_optical_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
raising_inputs = {'f': expected_f,
'aspect_ratio': 1.5,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=raising_inputs,
raise_cls=ValueError,
raise_msg='If aspect ratio is provided, f needs to '
'be a single float',
),
],
)
if __name__ == '__main__':
tfds.testing.test_main()
| 2.3125
| 2
|
MFCN_SMC/code_MFCN/stage3_line_reconnection.py
|
kskim-phd/MFCN
| 0
|
12785121
|
<reponame>kskim-phd/MFCN
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 2020
@author: <NAME> (<EMAIL>)
"""
import header
# common
import torch, torchvision
import numpy as np
# dataset
import mydataset
from torch.utils.data import DataLoader
from PIL import Image
# model
import torch.nn as nn
# post processing
import cv2
import correlation_code
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
def main():
print("\nThird stage inference.py")
##############################################################################################################################
# Semantic segmentation (inference)
# Flag
flag_eval_JI = True # True #False # calculate JI
flag_save_PNG = True # preprocessR2U_Neted, mask
connected_patch = True
# GPU
if torch.cuda.is_available():
device = torch.device("cuda")
num_worker = header.num_worker
else:
device = torch.device("cpu")
num_worker = 0
# Model initialization
net = header.net
print(str(header.test_third_network_name), "inference")
# network to GPU
if torch.cuda.device_count() > 1:
print("GPU COUNT = ", str(torch.cuda.device_count()))
if torch.cuda.device_count() > 1:
net = nn.DataParallel(net)
#FCDenseNet U_Net R2U_Net AttU_Net R2AttU_Net
# Load model
model_dir = header.dir_checkpoint + 'FCDenseNetmodel__1024_noise_RANZCR_100_1024v9.1.pth'
print(model_dir)
if os.path.isfile(model_dir):
print('\n>> Load model - %s' % (model_dir))
checkpoint = torch.load(model_dir)
net.load_state_dict(checkpoint['model_state_dict'])
test_sampler = checkpoint['test_sampler']
print(" >>> Epoch : %d" % (checkpoint['epoch']))
# print(" >>> JI Best : %.3f" % (checkpoint['ji_best']))
else:
print('[Err] Model does not exist in %s' % (
header.dir_checkpoint + test_network_name + header.filename_model))
exit()
# network to GPU
net.to(device)
dir_third_test_path = header.dir_secon_data_root + 'output_inference_segmentation_endtoend' + test_network_name + header.Whole_Catheter + '/second_output_90/' + header.test_secon_network_name + '/data/input_Catheter_' + header.Whole_Catheter
print('\n>> Load dataset -', dir_third_test_path)
testset = mydataset.MyTestDataset(dir_third_test_path, test_sampler)
testloader = DataLoader(testset, batch_size=header.num_batch_test, shuffle=False, num_workers=num_worker,
pin_memory=True)
print(" >>> Total # of test sampler : %d" % (len(testset)))
# inference
print('\n\n>> Evaluate Network')
with torch.no_grad():
# initialize
net.eval()
for i, data in enumerate(testloader, 0):
# forward
outputs = net(data['input'].to(device))
outputs = torch.argmax(outputs.detach(), dim=1)
# one hot
outputs_max = torch.stack(
[mydataset.one_hot(outputs[k], header.num_masks) for k in range(len(data['input']))])
# each case
for k in range(len(data['input'])):
# get size and case id
original_size, dir_case_id, dir_results = mydataset.get_size_id(k, data['im_size'], data['ids'],
header.net_label[1:])
# '''
# post processing
post_output = [post_processing(outputs_max[k][j].numpy(), original_size) for j in
range(1, header.num_masks)] # exclude background
# original image processings
save_dir = header.dir_save
mydataset.create_folder(save_dir)
image_original = testset.get_original(i * header.num_batch_test + k)
# save mask/pre-processed image
if flag_save_PNG:
save_dir = save_dir + '/output_inference_segmentation_endtoend' + str(
test_network_name) + header.Whole_Catheter + "/"+header.test_secon_network_name + 'third_output/' + str(
header.test_third_network_name) + '/Whole'
dir_case_id = dir_case_id.replace('/PICC', '')
dir_case_id = dir_case_id.replace('/Normal', '')
mydataset.create_folder(save_dir)
# '''
Image.fromarray(post_output[0] * 255).convert('L').save(save_dir + dir_case_id + '_mask.jpg')
Image.fromarray(image_original.astype('uint8')).convert('L').save(
save_dir + dir_case_id + '_image.jpg')
if connected_patch:
oup = save_dir.replace('/Whole',
'/First_connected_component')
mydataset.create_folder(oup)
connected_component(save_dir, oup)
print("connected component process complete")
make_correlation_check(oup)
def connected_component(inp, oup):
import os.path
import cv2
import glob
import numpy as np
CAPTCHA_IMAGE_FOLDER = inp
OUTPUT_FOLDER = oup
# Get a list of all the captcha images we need to process
captcha_image_files = glob.glob(os.path.join(CAPTCHA_IMAGE_FOLDER, "*_mask.jpg"))
# loop over the image paths
for (i, captcha_image_file) in enumerate(captcha_image_files):
image = cv2.imread(captcha_image_file, cv2.IMREAD_UNCHANGED)
image_zero = np.zeros(image.shape, dtype=image.dtype)
image_zero[20:-20, 20:-20] = image[20:-20, 20:-20]
image = image_zero
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
im = np.zeros((image.shape), np.uint8)
check_value_list = []
# getting mask with connectComponents
ret, labels = cv2.connectedComponents(binary)
for label in range(1, ret):
checkvalue = np.array(labels, dtype=np.uint8)
checkvalue[labels == label] = 255
checkvalue = checkvalue.sum()
check_value_list.append(checkvalue)
check_value_list.sort()
for label in range(1, ret):
checkvalue = np.array(labels, dtype=np.uint8)
checkvalue[labels == label] = 255
checkvalue = checkvalue.sum()
if check_value_list[-1] == checkvalue:
mask = np.array(labels, dtype=np.uint8)
mask[labels == label] = 255
mask[labels != label] = 0
im = im + mask
filename = os.path.basename(captcha_image_file)
p = os.path.join(OUTPUT_FOLDER, filename.replace('_mask', ''))
cv2.imwrite(p, im)
def post_processing(raw_image, original_size, flag_pseudo=0):
net_input_size = raw_image.shape
raw_image = raw_image.astype('uint8')
# resize
if (flag_pseudo):
raw_image = cv2.resize(raw_image, original_size, interpolation=cv2.INTER_NEAREST)
else:
raw_image = cv2.resize(raw_image, original_size, interpolation=cv2.INTER_NEAREST)
if (flag_pseudo):
raw_image = cv2.resize(raw_image, net_input_size, interpolation=cv2.INTER_NEAREST)
return raw_image
def make_correlation_check(inp):
import pandas as pd
oup = inp.replace('/First_connected_component', '')
for x in header.dir_mask_path:
mask_path = header.dir_First_test_path.replace('/input_Catheter' + header.Data_path, x)
image_name, dice, subin, point_rmse = correlation_code.correlation_Images(inp, mask_path, oup)
df_image = pd.DataFrame([x for x in zip(image_name, dice, subin, point_rmse)],
columns=['image_name', 'dice', 'subin', 'point_rmse'])
df_image.to_excel(oup + "third_image_rmse_jpg.xlsx", sheet_name='Sheet1')
if __name__ == '__main__':
# test_network_name_list = ["FCDenseNet", "U_Net", "AttU_Net"]
test_network_name_list = ["FCDenseNet"]
for test_network_name in test_network_name_list:
main()
| 2.34375
| 2
|
python/sqlflow_submitter/xgboost/feature_column.py
|
brightcoder01/sqlflow
| 0
|
12785122
|
<reponame>brightcoder01/sqlflow<gh_stars>0
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
__all__ = [
'numeric_column',
'bucketized_column',
'categorical_column_with_identity',
'categorical_column_with_vocabulary_list',
'categorical_column_with_hash_bucket',
]
# TODO(sneaxiy): implement faster and proper hash algorithm
def hashing(x):
return hash(x) # use builtin hash function
def apply_transform_on_value(feature, transform_fn):
if len(feature) == 1: # Dense input is like (value, )
return transform_fn(feature[0]),
else: # Sparse input is like (indices, values, dense_shape)
return feature[0], transform_fn(feature[1]), feature[2]
class BaseColumnTransformer(object):
def _set_field_names(self, field_names):
self.field_names = field_names
def get_column_names(self):
raise NotImplementedError()
def __call__(self, inputs):
raise NotImplementedError()
class CategoricalColumnTransformer(BaseColumnTransformer):
pass
class NumericColumnTransformer(BaseColumnTransformer):
def __init__(self, key, shape):
self.key = key
self.shape = shape
def _set_field_names(self, field_names):
BaseColumnTransformer._set_field_names(self, field_names)
self.column_idx = self.field_names.index(self.key)
def __call__(self, inputs):
return inputs[self.column_idx]
def get_column_names(self):
return [self.key]
def numeric_column(key, shape):
return NumericColumnTransformer(key, shape)
class BucketizedColumnTransformer(CategoricalColumnTransformer):
def __init__(self, source_column, boundaries):
assert boundaries == sorted(
boundaries), "Boundaries must be sorted in ascending order"
self.source_column = source_column
self.boundaries = boundaries
def _set_field_names(self, field_names):
CategoricalColumnTransformer._set_field_names(self, field_names)
self.source_column._set_field_names(field_names)
def get_column_names(self):
return self.source_column.get_column_names()
def __call__(self, inputs):
return apply_transform_on_value(
self.source_column(inputs),
lambda x: np.searchsorted(self.boundaries, x))
def bucketized_column(source_column, boundaries):
return BucketizedColumnTransformer(source_column, boundaries)
class CategoricalColumnWithIdentityTransformer(CategoricalColumnTransformer):
def __init__(self, key, num_buckets, default_value=None):
self.key = key
self.num_buckets = num_buckets
self.default_value = default_value
def _set_field_names(self, field_names):
CategoricalColumnTransformer._set_field_names(self, field_names)
self.column_idx = self.field_names.index(self.key)
def get_column_names(self):
return [self.key]
def __call__(self, inputs):
def transform_fn(slot_value):
invalid_index = slot_value < 0 or slot_value >= self.num_buckets
if any(invalid_index):
if self.default_value is not None:
slot_value[invalid_index] = self.default_value
else:
raise ValueError(
'The categorical value of column {} out of range [0, {})'
.format(self.field_names[self.column_idx],
self.num_buckets))
return slot_value
return apply_transform_on_value(inputs[self.column_idx], transform_fn)
def categorical_column_with_identity(key, num_buckets, default_value=None):
return CategoricalColumnWithIdentityTransformer(key, num_buckets,
default_value)
class CategoricalColumnWithVocabularyList(CategoricalColumnTransformer):
def __init__(self, key, vocabulary_list):
self.key = key
self.vocabulary_list = vocabulary_list
def _set_field_names(self, field_names):
CategoricalColumnTransformer._set_field_names(self, field_names)
self.column_idx = self.field_names.index(self.key)
def get_column_names(self):
return [self.key]
def __call__(self, inputs):
def transform_fn(slot_value):
if isinstance(slot_value, np.ndarray):
output = np.ndarray(slot_value.shape)
for i in six.moves.range(slot_value.size):
output[i] = self.vocabulary_list.index(slot_value[i])
else:
output = self.vocabulary_list.index(slot_value)
return output
return apply_transform_on_value(inputs[self.column_idx], transform_fn)
def categorical_column_with_vocabulary_list(key, vocabulary_list):
return CategoricalColumnWithVocabularyList(key, vocabulary_list)
class CategoricalColumnWithHashBucketTransformer(CategoricalColumnTransformer):
def __init__(self, key, hash_bucket_size, dtype='string'):
self.key = key
self.hash_bucket_size = hash_bucket_size
self.dtype = dtype
def _set_field_names(self, field_names):
CategoricalColumnTransformer._set_field_names(self, field_names)
self.column_idx = self.field_names.index(self.key)
def get_column_names(self):
return [self.key]
def __call__(self, inputs):
def transform_fn(slot_value):
if isinstance(slot_value, np.ndarray):
output = np.ndarray(slot_value.shape)
for i in six.moves.range(slot_value.size):
output[i] = hashing(slot_value[i])
else:
output = hashing(slot_value)
output %= self.hash_bucket_size
return output
return apply_transform_on_value(inputs[self.column_idx], transform_fn)
def categorical_column_with_hash_bucket(key, hash_bucket_size, dtype='string'):
return CategoricalColumnWithHashBucketTransformer(key, hash_bucket_size,
dtype)
class ComposedColumnTransformer(BaseColumnTransformer):
def __init__(self, feature_column_names, *columns):
for column in columns:
assert isinstance(column, BaseColumnTransformer)
assert len(columns) != 0, "No feature column found"
self.columns = columns
self._set_field_names(feature_column_names)
def get_column_names(self):
return ['/'.join(column.get_column_names()) for column in self.columns]
def _set_field_names(self, field_names):
BaseColumnTransformer._set_field_names(self, field_names)
for column in self.columns:
column._set_field_names(field_names)
def __call__(self, inputs):
return tuple([column(inputs) for column in self.columns])
| 2.453125
| 2
|
extract_bert_model.py
|
to-aoki/my-pytorch-bert
| 21
|
12785123
|
<reponame>to-aoki/my-pytorch-bert
# Author <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract Bert Model."""
from mptb.models.bert import Config
from mptb.models.pretrain_tasks import BertPretrainingTasks, OnlyMaskedLMTasks
from mptb.models.embed_projection_albert import ProjectionOnlyMaskedLMTasks, ProjectionAlbertPretrainingTasks
from mptb.models.albert import AlbertOnlyMaskedLMTasks, AlbertPretrainingTasks
from mptb.utils import load, save
def extract_model(
config_path='config/bert_base.json',
model_path="pretrain/pretran_on_the_way.pt",
output_path="pretrain/bert_only_model.pt",
load_strict=True,
only_bert=False,
mlm=False,
parallel=False,
model_name='bert',
pad_idx=0
):
config = Config.from_json(config_path)
if mlm and model_name == 'proj':
model = ProjectionOnlyMaskedLMTasks(config, pad_idx=pad_idx)
elif model_name == 'proj':
model = ProjectionAlbertPretrainingTasks(config, pad_idx=pad_idx)
elif mlm and model_name == 'albert':
model = AlbertOnlyMaskedLMTasks(config, pad_idx=pad_idx)
elif model_name == 'albert':
model = AlbertPretrainingTasks(config, pad_idx=pad_idx)
elif mlm:
model = OnlyMaskedLMTasks(config, pad_idx=pad_idx)
else:
model = BertPretrainingTasks(config, pad_idx=pad_idx)
load(model, model_path, 'cpu', strict=load_strict)
if parallel:
model = model.module
if only_bert:
model = model.bert
save(model, output_path)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Extract my-pytorch-bert model.', usage='%(prog)s [options]')
parser.add_argument('--config_path', help='JSON file path for defines networks.', nargs='?',
type=str, default='config/bert_base.json')
parser.add_argument('--model_path', help='my-pytorch-bert model path (include optimizer).', required=True,
type=str)
parser.add_argument('--loose', action='store_true',
help='model load param checking loose')
parser.add_argument('--mlm', action='store_true',
help='load mlm only model.')
parser.add_argument('--parallel', action='store_true',
help='load parallel wrapper model.')
parser.add_argument('--only_bert', action='store_true',
help='Use bert only output.')
parser.add_argument('--output_path', help='Output model path.', required=True,
type=str)
parser.add_argument('--model_name', nargs='?', type=str, default='bert',
help=
'Select from the following name groups model. (bert, proj, albert)'
)
parser.add_argument('--pad_idx', help='[PAD] vocab index', nargs='?',
type=int, default=0)
args = parser.parse_args()
extract_model(config_path=args.config_path, model_path=args.model_path,
load_strict=not args.loose,
output_path=args.output_path, only_bert=args.only_bert,
parallel=args.parallel, mlm=args.mlm, model_name=args.model_name, pad_idx=args.pad_idx)
| 2.203125
| 2
|
gallery/admin.py
|
lukacu/django-gallery
| 2
|
12785124
|
<reponame>lukacu/django-gallery
#!/usr/bin/python
# -*- Mode: python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from django.conf import settings
from django.contrib.admin.util import unquote
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.contrib import admin
from django.forms import ModelForm
from mptt.admin import MPTTModelAdmin
from gallery.models import Album, Image
from imagekit.admin import AdminThumbnail
from mptt.forms import TreeNodeChoiceField
class AlbumAdminForm(ModelForm):
class Meta:
model = Album
def __init__(self, *args, **kwargs):
super(AlbumAdminForm, self).__init__(*args, **kwargs)
q = self.instance.get_descendants(include_self=True).filter(is_public=True).values("id")
self.fields['cover'].queryset = Image.objects.filter(album__in=q, is_public=True).order_by("-date_added")
class AlbumAdmin(MPTTModelAdmin):
list_display = ('title', 'album_cover', 'is_public', 'order', 'move_up_down_links')
list_filter = ['is_public']
mptt_level_indent = 40
form = AlbumAdminForm
def get_urls(self):
from django.conf.urls.defaults import patterns, url
info = self.model._meta.app_label, self.model._meta.module_name
return patterns('',
url(r'^(.+)/move-(up)/$', self.admin_site.admin_view(self.move_album), name='%s_%s_move_up' % info),
url(r'^(.+)/move-(down)/$', self.admin_site.admin_view(self.move_album), name='%s_%s_move_down' % info),
) + super(AlbumAdmin, self).get_urls()
def move_album(self, request, object_id, direction):
obj = get_object_or_404(self.model, pk=unquote(object_id))
if direction == 'up':
relative = obj.get_previous_sibling()
if relative:
obj.move_to(relative, 'left')
else:
relative = obj.get_next_sibling()
if relative:
obj.move_to(relative, 'right')
return HttpResponseRedirect('../../')
def move_up_down_links(self, obj):
var = {'app_label': self.model._meta.app_label, 'module_name': self.model._meta.module_name, 'object_id': obj.id, 'ADMIN_MEDIA_PREFIX': settings.ADMIN_MEDIA_PREFIX }
if obj.get_previous_sibling():
up = '<a href="../../%(app_label)s/%(module_name)s/%(object_id)s/move-up/"><img src="%(ADMIN_MEDIA_PREFIX)simg/admin/arrow-up.gif" alt="Move up" /></a>' % var
else:
up = ''
if obj.get_next_sibling():
down = '<a href="../../%(app_label)s/%(module_name)s/%(object_id)s/move-down/"><img src="%(ADMIN_MEDIA_PREFIX)simg/admin/arrow-down.gif" alt="Move up" /></a>' % var
else:
down = ''
return "%s %s" % (up, down)
move_up_down_links.allow_tags = True
move_up_down_links.short_description = 'Move'
def album_cover(self, obj):
cover = obj.cover_image()
if not cover:
return "<em>Not defined</em>"
return '<img src="%s" alt="%s" style="width: 42px;" />' % (cover.cover_image.url, cover.title)
album_cover.allow_tags = True
album_cover.short_description = 'Cover'
class ImageAdmin(admin.ModelAdmin):
list_display = ('admin_thumbnail', 'title', 'album', 'date_added', 'is_public')
list_display_links = ['title']
list_filter = ['date_added', 'album', 'is_public']
search_fields = ['title', 'title_slug', 'text']
list_per_page = 20
admin_thumbnail = AdminThumbnail(image_field='thumbnail_image', template="gallery/admin/thumbnail.html")
admin.site.register(Album, AlbumAdmin)
admin.site.register(Image, ImageAdmin)
| 2.0625
| 2
|
ilexconf/config.py
|
vduseev/holly-config
| 9
|
12785125
|
from mapz import Mapz
from typing import (
Any,
Hashable,
Mapping,
)
class Config(Mapz):
def __init__(self, *mappings: Mapping[Hashable, Any], **kwargs: Any):
super().__init__(*mappings)
for k, v in kwargs.items():
self.set(
k,
v,
key_prefix="",
key_sep="__",
)
def __repr__(self) -> str:
return f"Config{dict.__repr__(self)}"
| 2.90625
| 3
|
rcnn_dff/symbol/rcnn_iou_loss.py
|
tonysy/mx-rcnn-flow
| 2
|
12785126
|
<filename>rcnn_dff/symbol/rcnn_iou_loss.py
import mxnet as mx
import numpy as np
def check_equal(lst, errstr='check_equal'):
assert len(set(lst)) <= 1, '%s:%s' % (errstr, lst)
class RCNNIoULossOperator(mx.operator.CustomOp):
def __init__(self, num_classes, grad_scale):
super(RCNNIoULossOperator, self).__init__()
self._num_classes = num_classes
self._grad_scale = grad_scale
self._data = None
self._info = None
def forward(self, is_train, req, in_data, out_data, aux):
# data (batch_rois, num_classes * 4): pred_boxes - rois
data = in_data[0].asnumpy()
# bbox_target (batch_rois, num_classes * 4): gt_boxes
label = in_data[1].asnumpy()
# bbox_weight (batch_rois, num_classes * 4): mark to learn regression
bbox_weight = in_data[2].asnumpy()
# rois (batch_rois, 5)
rois = in_data[3].asnumpy()
rois = rois[:, 1:]
IoU = np.zeros(data.shape)
IoULoss = np.zeros(data.shape)
info = np.zeros(data.shape)
for i in range(self._num_classes):
data[:, 4 * i:4 * (i + 1)] += rois
# predicted boxes
w = np.maximum(0, data[:, 4 * i + 2] - data[:, 4 * i + 0])
h = np.maximum(0, data[:, 4 * i + 3] - data[:, 4 * i + 1])
# print max(np.maximum(w, h))
# np.clip(w, -10000, 10000, out=w)
# np.clip(h, -10000, 10000, out=h)
X = w * h
# gt boxes
w_gt = label[:, 4 * i + 2] - data[:, 4 * i + 0]
h_gt = label[:, 4 * i + 3] - data[:, 4 * i + 1]
X_gt = w_gt * h_gt
# calculate iou between pred_boxes and gt_boxes
I_w = np.maximum(0, np.minimum(data[:, 4 * i + 2], label[:, 4 * i + 2]) - np.maximum(data[:, 4 * i + 0], label[:, 4 * i + 0]))
I_h = np.maximum(0, np.minimum(data[:, 4 * i + 3], label[:, 4 * i + 3]) - np.maximum(data[:, 4 * i + 1], label[:, 4 * i + 1]))
I = I_w * I_h
U = X + X_gt - I
IoU[:, 4 * i] = I / (U + 1e-14)
# store info
info[:, 4 * i + 0] = I
info[:, 4 * i + 1] = U
info[:, 4 * i + 2] = I_w
info[:, 4 * i + 3] = I_h
# IoU loss is -ln(IoU)
keep_inds = np.where((bbox_weight > 0) & (IoU > 0))
IoULoss[keep_inds] = -np.log(IoU[keep_inds])
self._data = data
self._info = info
self.assign(out_data[0], req[0], IoULoss)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
data = self._data
info = self._info
label = in_data[1].asnumpy()
bbox_weight = in_data[2].asnumpy()
g_data = np.zeros(in_data[0].shape)
o_grad = out_grad[0].asnumpy()
for i in range(data.shape[0]):
for j in range(self._num_classes):
if bbox_weight[i, 4 * j] > 0:
I = info[i, 4 * j + 0]
U = info[i, 4 * j + 1]
I_w = info[i, 4 * j + 2]
I_h = info[i, 4 * j + 3]
box = data[i, 4 * j:4 * (j + 1)]
gt_box = label[i, 4 * j:4 * (j + 1)]
dXdx = np.zeros(4)
dIdx = np.zeros(4)
if I > 0:
# X = (box[2] - box[1]) * (box[3] - box[1])
dXdx[0] = box[1] - box[3]
dXdx[1] = box[0] - box[2]
dXdx[2] = box[3] - box[1]
dXdx[3] = box[2] - box[0]
# I = (xx2 - xx1) * (yy2 - yy1)
# x1 > x1_gt:
if box[0] > gt_box[0]:
dIdx[0] = -I_h
# y1 > y1_gt
if box[1] > gt_box[1]:
dIdx[1] = -I_w
# x2 < x2_gt
if box[2] < gt_box[2]:
dIdx[2] = I_h
# y2 < y2_gt
if box[3] < gt_box[3]:
dIdx[3] = I_w
# grad = (1 / U) * dXdx - (U + I) / (U * I) * dIdx
grad = (1 / U) * dXdx - (U + I) / (U * I) * dIdx
g_data[i, 4 * j:4 * (j + 1)] = grad
else:
for k in range(0, 4):
g_data[i, 4 * j + k] = 0.03 * np.sign(box[k] - gt_box[k])
# pos = np.where(g_data > 0)
# print np.sum(np.abs(g_data[pos])) / np.prod(g_data[pos].shape)
g_data *= o_grad
g_data *= self._grad_scale
self.assign(in_grad[0], req[0], g_data)
@mx.operator.register("rcnn_iou_loss")
class RCNNIoULossProp(mx.operator.CustomOpProp):
def __init__(self, num_classes='21', grad_scale='1.0'):
super(RCNNIoULossProp, self).__init__(need_top_grad=True)
self._num_classes = int(num_classes)
self._grad_scale = float(grad_scale)
def list_arguments(self):
return ['data', 'bbox_target', 'bbox_weight', 'rois']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
bbox_target_shape = in_shape[1]
bbox_weight_shape = in_shape[2]
rois_shape = in_shape[3]
# share batch size
batch_sizes = [data_shape[0], bbox_target_shape[0],
bbox_weight_shape[0], rois_shape[0]]
check_equal(batch_sizes, 'inconsistent batch size')
out_shape = [data_shape]
return in_shape, out_shape
def create_operator(self, ctx, shapes, dtypes):
return RCNNIoULossOperator(self._num_classes, self._grad_scale)
| 2.125
| 2
|
Python/Ex069.py
|
EspagueteTV/Meus-Estudos-CursoEmVideo
| 0
|
12785127
|
cont_pes = homens = mulheres_20 = 0
while True:
print('-' * 20)
print('CADASTRO DE UMA PESSOA')
print('-' * 20)
idade = int(input('Idade: '))
sexo = ' '
while sexo not in 'MF':
sexo = str(input('Sexo: [M/F]')).strip().upper()[0]
print('-' * 20)
if idade > 18:
cont_pes += 1
if sexo == 'M':
homens += 1
if sexo == 'F' and idade < 20:
mulheres_20 += 1
op = ' '
while op not in 'SsNn':
op = str(input('Quer Continuar ? [S/N] ')).strip().upper()[0]
if op == 'N':
break
print('\n======= FIM DO PROGRAMA =======')
print(f'Há {cont_pes} pessoas maiores de 18 anos ')
print(f'Foram cadastrados {homens} homens')
print(f'Apenas {mulheres_20} mulheres têm menos de 20 anos')
| 3.6875
| 4
|
tests/test_secret_sources.py
|
jroslaniec/top-secret
| 3
|
12785128
|
import os
import shutil
import pytest
from top_secret import FileSecretSource, DirectorySecretSource
from top_secret import SecretMissingError
SECRET_BASE_PATH = os.path.join("/tmp", ".top_secret_test")
@pytest.fixture(scope="module", autouse=True)
def setup_teardown_module():
# Setup
os.makedirs(SECRET_BASE_PATH, exist_ok=True)
yield
# Tear Down
if os.path.exists(SECRET_BASE_PATH):
shutil.rmtree(SECRET_BASE_PATH)
@pytest.fixture(autouse=True)
def setup_function():
for file in os.listdir(SECRET_BASE_PATH):
path = os.path.join(SECRET_BASE_PATH, file)
if os.path.isfile(path):
os.unlink(path)
def test_file_ss_raise_if_file_does_not_exist():
ss = DirectorySecretSource(SECRET_BASE_PATH)
with pytest.raises(SecretMissingError):
ss.get("missing.txt")
def test_file_ss_exists():
ss = DirectorySecretSource(SECRET_BASE_PATH)
with open(os.path.join(SECRET_BASE_PATH, "my_secret.txt"), "w") as fd:
fd.write("secret")
secret = ss.get("my_secret.txt")
assert secret == "secret"
def test_file_ss_stripes_whitespaces():
ss = DirectorySecretSource(SECRET_BASE_PATH)
with open(os.path.join(SECRET_BASE_PATH, "my_secret.txt"), "w") as fd:
fd.write("\t\n secret\t \n\n")
secret = ss.get("my_secret.txt")
assert secret == "secret"
def test_file_ss_with_whitespace_striping():
ss = DirectorySecretSource(SECRET_BASE_PATH, stripe_whitespaces=False)
secret_in_file = "\t\n secret\t \n\n"
with open(os.path.join(SECRET_BASE_PATH, "my_secret.txt"), "w") as fd:
fd.write(secret_in_file)
secret = ss.get("my_secret.txt")
assert secret == secret_in_file
def test_file_ss_postfix():
ss = DirectorySecretSource(SECRET_BASE_PATH, postfix=".txt")
with open(os.path.join(SECRET_BASE_PATH, "my_secret.txt"), "w") as fd:
fd.write("secret")
secret = ss.get("my_secret")
assert secret == "secret"
def test_file_ss_get_secret_by_asb_path():
ss = DirectorySecretSource(SECRET_BASE_PATH)
path = os.path.join(SECRET_BASE_PATH, "my_secret.txt")
secret_in_file = "secret"
with open(path, "w") as fd:
fd.write(secret_in_file)
secret = ss.get(path)
assert secret == secret_in_file
| 2.078125
| 2
|
api/migrations/0078_lightningtalk.py
|
pythonkr/pyconkr-api
| 25
|
12785129
|
<reponame>pythonkr/pyconkr-api<filename>api/migrations/0078_lightningtalk.py
# Generated by Django 2.2 on 2019-08-04 07:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0077_auto_20190729_2144'),
]
operations = [
migrations.CreateModel(
name='LightningTalk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, null=True)),
('submitted_at', models.DateTimeField(blank=True, null=True)),
('accepted', models.BooleanField(default=True)),
('material', models.FileField(blank=True, default='', upload_to='lightningtalk')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.554688
| 2
|
dolosse/data_formats/ldf/header.py
|
Tobias2023/dolosse
| 9
|
12785130
|
<reponame>Tobias2023/dolosse
"""
file: header.py
brief: Defines how to handle an LDF Header buffer
author: <NAME>
date: February 04, 2019
"""
import struct
import dolosse.constants.data as data
class LdfHeader:
"""
Defines a structure that reads the header information from a ORNL HEAD buffer
"""
def __init__(self):
self.run_number = self.max_spill_size = self.run_time = 0
self.format = self.facility = self.start_date = self.end_date = self.title = ''
def read_header(self, stream):
"""
Reads the header out of the provided stream
:param stream: the stream that contains the header to read
:return: a dictionary containing the header information
"""
return {
'buffer_size': struct.unpack('I', stream.read(data.WORD))[0],
'facility': stream.read(data.WORD * 2).decode(),
'format': stream.read(data.WORD * 2).decode(),
'type': stream.read(data.WORD * 4).decode(),
'date': stream.read(data.WORD * 4).decode(),
'title': stream.read(data.WORD * 20).decode(),
'run_number': struct.unpack('I', stream.read(data.WORD))[0],
}
| 2.6875
| 3
|
Views/Roles.py
|
GhostyCatt/TheCloud
| 2
|
12785131
|
# Library Imports
from os import execl
import nextcord, json
from nextcord.ui import button, View, Select
# Custom Imports
from Functions.Embed import *
# Options from Json
with open('Config/Options.json') as RawOptions:
Options = json.load(RawOptions)
# Note: The roles are fetched from IDs. These id's are stored as the values of the options in the dropdowns. To change this, modify the "options" variable in each subclass of Select
# Age roles dropdown
class AgeMenu(Select):
def __init__(self, bot:commands.Bot):
self.bot = bot
options = [
nextcord.SelectOption(label = "- 13", description = "Click to get/remove this role", value = "886537316418609172"),
nextcord.SelectOption(label = "+ 13", description = "Click to get/remove this role", value = "886537379450589215"),
nextcord.SelectOption(label = "+ 16", description = "Click to get/remove this role", value = "886537464452366376"),
nextcord.SelectOption(label = "+ 18", description = "Click to get/remove this role", value = "886537714206392320"),
]
super().__init__(placeholder = 'Age Roles...', min_values = 1, max_values = 1, options = options, custom_id = "AgeRoleMenu2000", row = 3)
async def callback(self, interaction: nextcord.Interaction):
try:
Guild = self.bot.get_guild(Options['Guild']['ID'])
Role = Guild.get_role(int(self.values[0]))
if Role in interaction.user.roles:
await interaction.user.remove_roles(Role)
else:
await interaction.user.add_roles(Role)
await interaction.response.edit_message(embed = interaction.message.embeds[0])
except: pass
# Gender roles dropdown
class SexMenu(Select):
def __init__(self, bot:commands.Bot):
self.bot = bot
options = [
nextcord.SelectOption(label = "Male", description = "Click to get/remove this role", value = "886537847258112071"),
nextcord.SelectOption(label = "Female", description = "Click to get/remove this role", value = "886537907412815912"),
]
super().__init__(placeholder = 'Gender Roles...', min_values = 1, max_values = 1, options = options, custom_id = "SexRoleMenu2000", row = 2)
async def callback(self, interaction: nextcord.Interaction):
try:
Guild = self.bot.get_guild(Options['Guild']['ID'])
Role = Guild.get_role(int(self.values[0]))
if Role in interaction.user.roles:
await interaction.user.remove_roles(Role)
else:
await interaction.user.add_roles(Role)
await interaction.response.edit_message(embed = interaction.message.embeds[0])
except: pass
# Hobby roles dropdown
class InterestMenu(Select):
def __init__(self, bot:commands.Bot):
self.bot = bot
options = [
nextcord.SelectOption(label = "Arts", description = "Click to get/remove this role", value = "886538932018348032"),
nextcord.SelectOption(label = "Sports", description = "Click to get/remove this role", value = "886538985852248094"),
nextcord.SelectOption(label = "Music", description = "Click to get/remove this role", value = "886539050062864404"),
nextcord.SelectOption(label = "Reading", description = "Click to get/remove this role", value = "886539142740209714"),
nextcord.SelectOption(label = "Cooking", description = "Click to get/remove this role", value = "886539267998896128"),
nextcord.SelectOption(label = "Singing", description = "Click to get/remove this role", value = "886539873631211520"),
]
super().__init__(placeholder = 'Interest Roles...', min_values = 1, max_values = 1, options = options, custom_id = "InterestRoleMenu2000", row = 1)
async def callback(self, interaction: nextcord.Interaction):
try:
Guild = self.bot.get_guild(Options['Guild']['ID'])
Role = Guild.get_role(int(self.values[0]))
if Role in interaction.user.roles:
await interaction.user.remove_roles(Role)
else:
await interaction.user.add_roles(Role)
await interaction.response.edit_message(embed = interaction.message.embeds[0])
except: pass
# Mention roles dropdown
class PingMenu(Select):
def __init__(self, bot:commands.Bot):
self.bot = bot
options = [
nextcord.SelectOption(label = "Announcement", description = "Click to get/remove this role", value = "886540581004795904"),
nextcord.SelectOption(label = "Event", description = "Click to get/remove this role", value = "886540636168282132"),
nextcord.SelectOption(label = "Partner", description = "Click to get/remove this role", value = "886540681663873065"),
nextcord.SelectOption(label = "Chat Revive", description = "Click to get/remove this role", value = "886540760583901185")
]
super().__init__(placeholder = 'Mention Roles...', min_values = 1, max_values = 1, options = options, custom_id = "PingRoleMenu2000", row = 0)
async def callback(self, interaction: nextcord.Interaction):
try:
Guild = self.bot.get_guild(Options['Guild']['ID'])
Role = Guild.get_role(int(self.values[0]))
if Role in interaction.user.roles:
await interaction.user.remove_roles(Role)
else:
await interaction.user.add_roles(Role)
await interaction.response.edit_message(embed = interaction.message.embeds[0])
except: pass
# Button array
class RoleView(View):
def __init__(self, bot:commands.Bot):
super().__init__(timeout = None)
self.response = None
self.bot = bot
# Add all the views
self.add_item(AgeMenu(bot))
self.add_item(SexMenu(bot))
self.add_item(InterestMenu(bot))
self.add_item(PingMenu(bot))
| 2.53125
| 3
|
Server_free_login_script/main.py
|
Ahrli/fast_tools
| 1
|
12785132
|
<reponame>Ahrli/fast_tools
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
' a test module '
import time
__author__ = '<NAME>'
import paramiko
# username = "ahrli"
# password = "<PASSWORD>"
'''需要免密码登录服务器列表'''
################## 修改这里 别的不要动 ##########################
# 用户 ip 密码
hostname_list=[
('root','192.168.0.108','123456'),
]
############################################
def ssh_pub(hostname,username,password):
'''生成id_rsa.pub'''
# 创建SSH对象
ssh = paramiko.SSHClient()
# 把要连接的机器添加到known_hosts文件中
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# 连接服务器
ssh.connect(hostname=hostname, port=22, username=username, password=password)
cmd = 'ssh-keygen -t rsa'
# 相当于建立一个terminal 不过每条命令都要自己加\n
chan = ssh.invoke_shell(cmd)
chan.send(cmd + '\n')
time.sleep(0.3)
chan.send('\n')
time.sleep(0.3)
chan.send('\n')
time.sleep(0.3)
chan.send('\n')
time.sleep(0.3)
chan.send('\n')
time.sleep(0.3)
chan.send('\n')
time.sleep(0.3)
# print(chan.recv(1024))
chan.close()
ssh.close()
def ssh_red(hostname,username,password):
'''生成id_rsa.pub'''
# 创建SSH对象
ssh = paramiko.SSHClient()
# 把要连接的机器添加到known_hosts文件中
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# 连接服务器
ssh.connect(hostname=hostname, port=22, username=username, password=password)
cmd2 = 'cd ~/.ssh/;cat id_rsa.pub '
# cmd = 'ls -l;ifconfig' #多个命令用;隔开
# 第一可以写回车命令vim 等操作 错误信息 成功信息
stdin, stdout, stderr = ssh.exec_command(cmd2)
result = stdout.read()
if not result:
result = stderr.read()
ssh.close()
print(result.decode())
return result.decode()
def sftp_file(hostname,username,password):
# 创建SSH对象
t = paramiko.Transport((hostname, 22))
t.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(t)
sftp.put('authorized_keys', '/root/.ssh/authorized_keys')
t.close()
def main():
"""记下秘钥 每台机器发送秘钥"""
for i in hostname_list:
username, hostname, password = i
ssh_pub(hostname, username, password)
a = ssh_red(hostname, username, password)
with open('authorized_keys', 'a+') as f:
f.write(a)
for i in hostname_list:
username, hostname, password = tuple(i.split("@"))
if username not in 'ahrli':
sftp_file(hostname, username, password)
if __name__ == '__main__':
main()
| 1.945313
| 2
|
mysite/article/views.py
|
wuhaoqiu/engr597-stable
| 0
|
12785133
|
from django.shortcuts import render, get_object_or_404, get_list_or_404
from .models import Article,Comment
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from .forms import ShareEmailForm,CommentForm,SearchForm,ArticleForm
# from django.core.mail import send_mail
from django.db.models import Count
from django.contrib.auth.decorators import login_required
from taggit.models import Tag
# for more aggregation function, read /topics/db/aggregation/
# Create your views here.
# tag_slug comes with the URL of this request
@login_required
def article_list(request, tag_slug=None):
all_articles=Article.objects.all()
tag=None
if tag_slug:
tag=get_object_or_404(Tag,slug=tag_slug)
all_articles=all_articles.filter(tags__in=[tag])
# each page only display 6 posts
paginator=Paginator(all_articles,3)
page=request.GET.get('page')
try:
one_page_articles=paginator.page(page)
except PageNotAnInteger:
one_page_articles=paginator.page(1)
except EmptyPage:
#retrieve the last page content if page number beyond range
one_page_articles=paginator.page(paginator.num_pages)
new_article = None
if request.method == 'POST':
article_form = ArticleForm(data=request.POST)
if article_form.is_valid():
# comment_form.save can create a comment object,but donot save to database immediatley
new_article = article_form.save(commit=False)
new_article.author = request.user
cd = article_form.cleaned_data
from django.utils import timezone
from django.contrib import messages
if not Article.objects.filter(publish_time=timezone.now()).filter(label_in_url=cd.get('label_in_url')).exists():
new_article.save()
for each_tag in cd.get('tags'):
new_article.tags.add(each_tag)
messages.success(request, 'profile and user information updated successfully')
from django.http.response import HttpResponseRedirect
from django.urls import reverse
return HttpResponseRedirect(reverse('article:article_list'))
else:
messages.error(request, 'updated failed, may because duplicate slug today')
# if this view is called by GET method, then render a brand new form
else:
article_form = ArticleForm()
return render(request,
'article/articles/article_list.html',
{'articles':one_page_articles,
'tag':tag,
'article_form':article_form})
@login_required
def article_detail(request,year,month,day,label_in_url):
# query the Article table using filter as below
article=get_list_or_404(Article,label_in_url=label_in_url,
publish_time__year=year,
publish_time__month=month,
publish_time__day=day,
)[0]
# list active comments
comments=article.article_comments.all()
# each page only display 6 posts
paginator = Paginator(comments, 6)
page = request.GET.get('page')
try:
one_page_comments = paginator.page(page)
except PageNotAnInteger:
one_page_comments = paginator.page(1)
except EmptyPage:
# retrieve the last page content if page number beyond range
one_page_comments = paginator.page(paginator.num_pages)
new_comment=None
if request.method=='POST':
comment_form=CommentForm(data=request.POST)
if comment_form.is_valid():
# comment_form.save can create a comment object,but donot save to database immediatley
new_comment=comment_form.save(commit=False)
new_comment.article=article
new_comment.user=request.user
new_comment.save()
# prevent submitting same forms again when refresh page
from django.http.response import HttpResponseRedirect
from django.urls import reverse
return HttpResponseRedirect(reverse('article:article_detail',
args=[
article.publish_time.year,
article.publish_time.month,
article.publish_time.day,
article.label_in_url
]))
# if this view is called by GET method, then render a brand new form
else:
comment_form=CommentForm()
# flat=True, let tuple returned by values_list() to a python list
article_tags_list=article.tags.values_list('id',flat=True)
similar_articles=Article.published_set.filter(tags__in=article_tags_list).exclude(id=article.id)
# use Count() to generate a new filed to those retrieved articles, named same_tags, then
# order those articles by this new attribute - same_tags
similar_articles=similar_articles.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish_time')[:3]
# use the object returned by above filter to render detail.html
return render(request,'article/articles/article_detail.html',
{'article':article,
'comments':one_page_comments,
'new_comment':new_comment,
'comment_form':comment_form,
'similar_articles':similar_articles,})
# @login_required
# def share_article(request,article_id):
# # retrieve artivle by its id
# article=get_object_or_404(Article,id=article_id)
# sent=False
# error=''
# sender_address='<EMAIL>'
#
# if request.method=='POST':
# # submitted data by user is stored in request.Post
# form=ShareEmailForm(request.POST)
# if form.is_valid():
# try:
# # .cleaned_data returns a dict containing only
# # valid form field data
# data_from_form=form.cleaned_data
# # use .build_absolute_uri to build a complete URL including
# # HTTP shcema and hostname with post url
# article_url=request.build_absolute_uri(
# article.get_absolute_url()
# )
# subject="user {} whose email is {} recommends this article {}".format(data_from_form['name'],data_from_form['email'],article.title)
# message="read {} at {} \n\n {}'s email_content:{}".format(article.title,article_url,data_from_form['name'],data_from_form['email_content'])
# # here i must 给自己抄送y一份, otherwise, will fail to send
# send_mail(subject,message,sender_address,[sender_address,data_from_form['to']])
# sent=True
# except Exception:
# form=ShareEmailForm()
# error='somthing wrong,failed to send email,sorry'
# else:
# form=ShareEmailForm()
#
# return render(request,'article/articles/share.html',
# {'article':article,
# 'form':form,
# 'sent':sent,
# 'error':error})
| 2.25
| 2
|
snippet/social_oauth/backends.py
|
duoduo369/social_auth_demo
| 45
|
12785134
|
from django.contrib.auth.backends import ModelBackend
from 你个人的.models import UserProfile
from social_auth.models import UserSocialAuth
class OAuth2Backend(ModelBackend):
'''
oauth backend
'''
def authenticate(self, provider=None, uid=None):
try:
user_social = UserSocialAuth.objects.get(provider=provider, uid=uid)
return user_social.user
except UserSocialAuth.DoesNotExist:
return None
| 2.21875
| 2
|
torch_glow/tests/nodes/floor_test.py
|
dreiss/glow
| 1
|
12785135
|
<gh_stars>1-10
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests.utils import jitVsGlow
class TestFloor(unittest.TestCase):
def test_floor(self):
"""Basic test of the PyTorch floor Node on Glow."""
def test_f(a, b):
c = a + b
d = torch.floor(c)
return d
x = torch.randn(3, 4, 5)
y = torch.randn(3, 4, 5)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::floor"})
| 2.421875
| 2
|
konfuzio_sdk/regex.py
|
konfuzio-ai/konfuzio-sdk
| 0
|
12785136
|
"""Generic way to build regex from examples."""
import logging
import regex as re
from typing import List, Dict
import pandas
from tabulate import tabulate
logger = logging.getLogger(__name__)
def merge_regex(regex_tokens: List[str]):
"""Merge a list of regex to one group."""
tokens = r'|'.join(sorted(regex_tokens, key=len, reverse=True))
return f'(?:{tokens})'
def harmonize_whitespaces(text):
"""Convert multiple whitespaces to one."""
single_whitespace_replaced = re.sub(r'(?<! ) (?! )', r'[ ]+', text)
suggestion = re.sub(r' {2,}', r'[ ]{2,}', single_whitespace_replaced)
return suggestion
def escape(string: str):
"""Escape a string, so that it can still be used to create a regex."""
escaped_original = (
string.replace('\\', "\\\\")
.replace('[', r'\[')
.replace(']', r'\]')
.replace('+', r'[\+]')
.replace('*', r'\*')
.replace('|', r'\|')
.replace('\n', '\n')
.replace('-', '[-]')
.replace('.', r'\.')
.replace('$', r'\$')
.replace('(', r'\(')
.replace(')', r'\)')
.replace('@', r'\@')
.replace('?', r'\?')
.replace('!', r'\!')
.replace(',', r'\,')
.replace('#', r'\#')
.replace('{', r'\{')
.replace('}', r'\}')
)
return escaped_original
def plausible_regex(suggestion, string):
"""
Test regex for plausibility.
We keep those tests in production to collect edge cases and always return true.
"""
try:
re.compile(suggestion)
plausibility_run = re.findall(suggestion, string)
if not plausibility_run:
logger.error(
f'Using "{repr(string)}" we found the regex {repr(suggestion)}, which does not match the input.'
)
logger.error(
'We are not able to able to convert your string to a valid regex. Please help to make it happen.'
)
result = ''
else:
result = suggestion
except re.error as e:
logger.exception(f'The proposed regex >>{repr(suggestion)}<< is not a valid regex of string: >>{string}<<')
logger.error('We are not able to able to convert your string to a valid regex. Please help to make it happen.')
logger.error(e)
result = ''
return result
def suggest_regex_for_string(string: str, replace_characters: bool = False, replace_numbers: bool = True):
"""Suggest regex for a given string."""
escaped_original = escape(string)
if replace_characters:
# strict replace capital letters
strict_escaped_capital_letters = re.sub(r'[A-Z\Ä\Ö\Ü]', r'[A-ZÄÖÜ]', escaped_original)
# combine multiple capital letters in sequence
combined_capital_letters = re.sub(r'(\[A-Z\Ä\Ö\Ü\]){2,}', r'[A-ZÄÖÜ]+', strict_escaped_capital_letters)
# escape all lower case letters
escaped_small_letters = re.sub(r'[a-zäöüß]', r'[a-zäöüß]', combined_capital_letters)
# combine multiple lower case letters in sequence
escaped_original = re.sub(r'(\[a-zäöüß\]){2,}', '[a-zäöüß]+', escaped_small_letters)
if replace_numbers:
escaped_original = re.sub('\\d', r'\\d', escaped_original)
# replace multiple whitespaces with r' +'
suggestion = harmonize_whitespaces(escaped_original)
suggestion = plausible_regex(suggestion, string)
return suggestion
def get_best_regex(evaluations: List, log_stats: bool = True, allow_zero_f1score=False) -> List:
"""Optimize selection of one regex in scenarios were we are unsure if all correct Annotations are Labeled."""
df = pandas.DataFrame(evaluations)
if df.empty:
logger.error('We cannot find any regex!')
return []
if not allow_zero_f1score:
df = df.loc[df['f1_score'] > 0]
df = df.sort_values(
[
'total_correct_findings',
'f1_score',
'regex_quality',
'annotation_precision',
'runtime', # take the fastest regex
],
ascending=[0, 0, 0, 0, 1],
).reset_index(drop=True)
df['correct_findings_id'] = df['correct_findings'].apply(lambda x: set(y.id_ for y in x))
df['all_matches_id'] = [set.union(*df.loc[0:i, 'correct_findings_id']) for i in range(len(df.index))]
df['new_matches_id'] = df.all_matches_id - df.all_matches_id.shift(1)
null_mask = df['new_matches_id'].isnull()
df.loc[null_mask, 'new_matches_id'] = df.loc[null_mask]['correct_findings_id']
df.insert(0, 'new_matches_count', df['new_matches_id'].str.len())
df = df.drop(['correct_findings_id', 'correct_findings', 'all_matches_id', 'new_matches_id'], axis=1)
# iterate over sorted df, mark any row if it adds no matching value compared to regex above, we used max windowsize
# matched_document = df.filter(regex=r'document_\d+').rolling(min_periods=1, window=100000000).max()
# any regex witch matches more Documents that the regex before, is a good regex
# relevant_regex = matched_document.sum(axis=1).diff()
# df['matched_annotations_total'] = matched_document.sum(axis=1)
# df['matched_annotations_additional'] = relevant_regex
# get the index of all good regex
index_of_regex = df[df['new_matches_count'] > 0].index
if log_stats:
stats = df.loc[index_of_regex][
['regex', 'runtime', 'annotation_recall', 'annotation_precision', 'f1_score', 'new_matches_count']
]
logger.info(f'\n\n{tabulate(stats, floatfmt=".4f", headers="keys", tablefmt="pipe")}\n')
# best_regex = df.loc[index_of_regex, 'regex'].to_list()
best_regex = df.loc[df['new_matches_count'] > 0, 'regex'].to_list()
return best_regex
def regex_matches(
doctext: str, regex: str, start_chr: int = 0, flags=0, overlapped=False, keep_full_match=True, filtered_group=None
) -> List[Dict]:
"""
Convert a text with the help by one regex to text offsets.
A result of results is a full regex match, matches or (named) groups are separated by keys within this result. The
function regexinfo in konfuzio.wrapper standardizes the information we keep per match.
:param filtered_group: Name of the regex group you want to return as results
:param keep_full_match: Keep the information about the full regex even the regex contains groups
:param overlapped: Allow regex to overlap, e.g. ' ([^ ]*) ' creates an overlap on ' my name '
:param flags: Regex flag to compile regex
:param doctext: A text you want to apply a rgx on
:param regex: The regex, either with groups, named groups or just a regex
:param start_chr: The start chr of the annotation_set, in case the text is a annotation_set within a text
"""
results = []
# compile regex pattern
# will throw an error if the name of the group, ?P<GROUP_NAME>, is not a valid Python variable name,
# e.g. GROUP_NAME starts with a numeric character.
# we catch this error and then add a leading underscore to the group name, making it a valid Python variable name
try:
pattern = re.compile(regex, flags=flags)
except re.error:
# throws error if group name is an invalid Python variable
match = re.search(r'\?P<.*?>', regex) # match the invalid group name
group_name = match.group(0) # get the string representation
group_name = group_name.replace('?P<', '?P<_') # add a leading underscore
regex = re.sub(r'\?P<.*?>', group_name, regex) # replace invalid group name with new one
pattern = re.compile(regex, flags=flags) # try the compile again
for match in pattern.finditer(doctext, overlapped=overlapped):
# hold results per match
_results = []
if match.groups():
# parse named groups, if available
for group_name, group_index in match.re.groupindex.items():
if match[group_index] is not None:
# if one regex group ( a annotation's token) does not match, it returns none
# https://stackoverflow.com/a/59120080
_results.append(
{
'regex_used': repr(regex),
'regex_group': group_name,
'value': match[group_index],
'start_offset': match.regs[group_index][0],
'end_offset': match.regs[group_index][1],
'start_text': start_chr,
}
)
# find unnamed groups if available
unnamed_groups = [x for x in range(1, match.re.groups + 1) if x not in match.re.groupindex.values()]
for group_index in unnamed_groups:
_results.append(
{
'regex_used': repr(regex),
'regex_group': str(group_index),
'value': match[group_index],
'start_offset': match.regs[group_index][0],
'end_offset': match.regs[group_index][1],
'start_text': start_chr,
}
)
if match.groups() and keep_full_match or not match.groups():
_results.append(
{
'regex_used': repr(regex),
'regex_group': '0',
'value': match.group(),
'start_offset': match.span()[0],
'end_offset': match.span()[1],
'start_text': start_chr,
}
)
# if bbox:
# # update each element in _results with bbox
# for res in _results:
# res['bounding_box'] = get_bbox(
# bbox, res['start_offset'] + res['start_text'], res['end_offset'] + res['start_text']
# )
# add results per match to all results
results.extend(_results)
if filtered_group:
# allow to use similar group names, you can use "Ort_" if the group name is "Ort_255_259"
return [result for result in results if filtered_group in result['regex_group']]
else:
return results
def generic_candidate_function(regex, flags=0, overlapped=False, filtered_group=None):
"""Regex approach tob build a candidate function by one regex.
:param filtered_group: If a regex contains multiple named groups, you can filter the respective group by name
:param overlapped: Indicate if regex matches can overlapp.
:param regex: Regex to create a candidate_function.
:param flags: Regex flag which should be considered.
:return: An initialized candidate function.
"""
# function to build candidates
def candidate_function(doctext):
"""
Split the text in candidates and other text chunks.
:param doctext: Text of the candidate
:return: Tuple of list of candidates and other text chunks
"""
annotations = regex_matches(
doctext=doctext,
regex=regex,
flags=flags,
overlapped=overlapped,
keep_full_match=False,
filtered_group=filtered_group,
)
# reduce the available information to value, start_offset and end_offset:
# Due to historical aim of the candidate function to only find regex matches
matches_tuples = [(d['value'], (d['start_offset'], d['end_offset'])) for d in annotations]
candidates = [x for x, y in matches_tuples]
candidates_spans = [y for x, y in matches_tuples]
# Calculate other text bases on spans.
other_text = []
previous = 0
for span in candidates_spans:
other_text.append(doctext[previous : span[0]])
previous = span[1]
other_text.append(doctext[previous:])
return candidates, other_text, candidates_spans
candidate_function.__name__ = f"regex_{regex}"
return candidate_function
| 3.640625
| 4
|
viewers/auger_sync_raster_scan.py
|
ScopeFoundry/FoundryDataBrowser
| 6
|
12785137
|
<gh_stars>1-10
from ScopeFoundry.data_browser import DataBrowserView
import pyqtgraph as pg
import numpy as np
from qtpy import QtWidgets
import h5py
class AugerSyncRasterScanH5(DataBrowserView):
name = 'auger_sync_raster_scan'
def setup(self):
self.settings.New('frame', dtype=int)
#self.settings.New('sub_frame', dtype=int)
self.settings.New('source', dtype=str, choices=('SEM', 'Auger'))
self.settings.New('SEM_chan', dtype=int, vmin=0, vmax=1)
self.settings.New('Auger_chan', dtype=int, vmin=0, vmax=7)
self.settings.New('auto_level', dtype=bool, initial=True)
for name in ['frame', 'source','SEM_chan', 'Auger_chan', 'auto_level']:
self.settings.get_lq(name).add_listener(self.update_display)
self.ui = QtWidgets.QWidget()
self.ui.setLayout(QtWidgets.QVBoxLayout())
self.ui.layout().addWidget(self.settings.New_UI(), stretch=0)
self.info_label = QtWidgets.QLabel()
self.ui.layout().addWidget(self.info_label, stretch=0)
self.imview = pg.ImageView()
self.ui.layout().addWidget(self.imview, stretch=1)
#self.graph_layout = pg.GraphicsLayoutWidget()
#self.graph_layout.addPlot()
def on_change_data_filename(self, fname):
try:
self.dat = h5py.File(fname)
M = self.measurement = self.dat['measurement/auger_sync_raster_scan']
nframe, nsubframe, ny, nx, nadc_chan = M['adc_map'].shape
self.settings.frame.change_min_max(0, nframe-1)
self.settings.sub_frame.change_min_max(0, nsubframe-1)
self.update_display()
except Exception as err:
self.imview.setImage(np.zeros((10,10)))
self.databrowser.ui.statusbar.showMessage("failed to load %s:\n%s" %(fname, err))
raise(err)
def is_file_supported(self, fname):
return "auger_sync_raster_scan.h5" in fname
def update_display(self):
M = self.measurement
ke = M['ke']
ii = self.settings['frame']
jj = 0 #self.settings['sub_frame']
source = self.settings['source']
if source=='SEM':
kk = self.settings['SEM_chan']
im = M['adc_map'][ii, jj, :,:, kk]
ke_info = " ke {:.1f} eV".format(ke[0,ii])
else:
kk = self.settings['Auger_chan']
im = M['auger_chan_map'][ii, jj, :,:, kk]
ke_info = " ke {:.1f} eV".format(ke[kk,ii])
self.imview.setImage(im.T, autoLevels=self.settings['auto_level'], )
info = "Frame {} {} chan {} ".format(ii,source, kk)
self.info_label.setText(info+ke_info)
| 2.171875
| 2
|
src/deleteNode.py
|
JL1829/LeetCode
| 0
|
12785138
|
<gh_stars>0
"""
请编写一个函数,使其可以删除某个链表中给定的(非末尾)节点。
传入函数的**唯一参数** 是 **要被删除的节点**
现在有一链表:
[4, 5, 1, 9]
它表示为:4 -> 5 -> 1 -> 9
删除节点5, 返回:[4, 1, 9], 因为: 5 被移除了,链表表示为: 4 -> 1 -> 9
**注意**
你只知道要被删除的节点,它的上一个节点你不知道。
**test case**
>>> head = ListNode(4)
>>> head.next = ListNode(5)
>>> head.next.next = ListNode(1)
>>> head.next.next.next = ListNode(9)
>>> print(printLinkedList(node=head))
[4, 5, 1, 9]
>>> deleteNode(node=head)
>>> print(printLinkedList(node=head))
[4, 1, 9]
"""
class ListNode:
"""docString Placeholder"""
def __init__(self, value):
self.value = value
self.next = None
def __repr__(self):
return f"List Node with value: {self.value}"
__str__ = __repr__
def deleteNode(node):
node.value = node.next.value
node.next = node.next.next
def printLinkedList(node):
value = []
if not node:
return []
while node:
value.append(node.value)
node = node.next
return value
if __name__ == '__main__':
head1 = ListNode(4)
head1.next = ListNode(5)
head1.next.next = ListNode(1)
head1.next.next.next = ListNode(9)
LinkedList = printLinkedList(node=head1)
print(LinkedList)
| 4.125
| 4
|
sqlite_dissect/carving/__init__.py
|
Defense-Cyber-Crime-Center/sqlite-dissect
| 12
|
12785139
|
"""
__init__.py
This init script will initialize any needed logic for this package.
This package will control signature generation and carving of SQLite files.
"""
| 1.632813
| 2
|
tests/test_cubicbez.py
|
simoncozens/kurbopy
| 0
|
12785140
|
from kurbopy import Point, CubicBez
import math
def test_cubicbez_deriv():
c = CubicBez(
Point(0.0, 0.0),
Point(1.0 / 3.0, 0.0),
Point(2.0 / 3.0, 1.0 / 3.0),
Point(1.0, 1.0),
);
deriv = c.deriv();
n = 10;
for i in range(1, n):
t = 1/(i*n)
delta = 1e-6
p = c.eval(t)
p1 = c.eval(t + delta)
d_approx = (p1.to_vec2() - p.to_vec2()) * (1/delta)
d = deriv.eval(t).to_vec2()
assert (d - d_approx).hypot() < delta * 2.0
def test_cubicbez_arclen():
# y = x^2
c = CubicBez(
Point(0.0, 0.0),
Point(1.0 / 3.0, 0.0),
Point(2.0 / 3.0, 1.0 / 3.0),
Point(1.0, 1.0),
);
true_arclen = 0.5 * math.sqrt(5.0) + 0.25 * math.log(2.0 + math.sqrt(5.0))
for i in range(0, 12):
accuracy = 0.1 ** i
error = c.arclen(accuracy) - true_arclen
assert abs(error) < accuracy
# def test_cubicbez_inv_arclen():
# // y = x^2 / 100
# c = CubicBez(
# Point(0.0, 0.0),
# Point(100.0 / 3.0, 0.0),
# Point(200.0 / 3.0, 100.0 / 3.0),
# Point(100.0, 100.0),
# );
# true_arclen = 100.0 * (0.5 * 5.0f64.sqrt() + 0.25 * (2.0 + 5.0f64.sqrt()).ln());
# for i in 0..12 {
# accuracy = 0.1f64.powi(i);
# n = 10;
# for j in 0..=n {
# arc = (j as f64) * ((n as f64).recip() * true_arclen);
# t = c.inv_arclen(arc, accuracy * 0.5);
# actual_arc = c.subsegment(0.0..t).arclen(accuracy * 0.5);
# assert!(
# (arc - actual_arc).abs() < accuracy,
# "at accuracy {:e, wanted { got {",
# accuracy,
# actual_arc,
# arc
# );
# // corner case: user passes accuracy larger than total arc length
# accuracy = true_arclen * 1.1;
# arc = true_arclen * 0.5;
# t = c.inv_arclen(arc, accuracy);
# actual_arc = c.subsegment(0.0..t).arclen(accuracy);
# assert!(
# (arc - actual_arc).abs() < 2.0 * accuracy,
# "at accuracy {:e, want { got {",
# accuracy,
# actual_arc,
# arc
# );
# def test_cubicbez_signed_area_linear():
# #
# c = CubicBez::new(
# (1.0, 0.0),
# (2.0 / 3.0, 1.0 / 3.0),
# (1.0 / 3.0, 2.0 / 3.0),
# (0.0, 1.0),
# );
# epsilon = 1e-12;
# assert_eq!((Affine::rotate(0.5) * c).signed_area(), 0.5);
# assert!(((Affine::rotate(0.5) * c).signed_area() - 0.5).abs() < epsilon);
# assert!(((Affine::translate((0.0, 1.0)) * c).signed_area() - 1.0).abs() < epsilon);
# assert!(((Affine::translate((1.0, 0.0)) * c).signed_area() - 1.0).abs() < epsilon);
# def test_cubicbez_signed_area():
# // y = 1 - x^3
# c = CubicBez::new((1.0, 0.0), (2.0 / 3.0, 1.0), (1.0 / 3.0, 1.0), (0.0, 1.0));
# epsilon = 1e-12;
# assert!((c.signed_area() - 0.75).abs() < epsilon);
# assert!(((Affine::rotate(0.5) * c).signed_area() - 0.75).abs() < epsilon);
# assert!(((Affine::translate((0.0, 1.0)) * c).signed_area() - 1.25).abs() < epsilon);
# assert!(((Affine::translate((1.0, 0.0)) * c).signed_area() - 1.25).abs() < epsilon);
# def test_cubicbez_nearest():
# fn verify(result: Nearest, expected: f64) {
# assert!(
# (result.t - expected).abs() < 1e-6,
# "got {:? expected {",
# result,
# expected
# );
# // y = x^3
# c = CubicBez::new((0.0, 0.0), (1.0 / 3.0, 0.0), (2.0 / 3.0, 0.0), (1.0, 1.0));
# verify(c.nearest((0.1, 0.001).into(), 1e-6), 0.1);
# verify(c.nearest((0.2, 0.008).into(), 1e-6), 0.2);
# verify(c.nearest((0.3, 0.027).into(), 1e-6), 0.3);
# verify(c.nearest((0.4, 0.064).into(), 1e-6), 0.4);
# verify(c.nearest((0.5, 0.125).into(), 1e-6), 0.5);
# verify(c.nearest((0.6, 0.216).into(), 1e-6), 0.6);
# verify(c.nearest((0.7, 0.343).into(), 1e-6), 0.7);
# verify(c.nearest((0.8, 0.512).into(), 1e-6), 0.8);
# verify(c.nearest((0.9, 0.729).into(), 1e-6), 0.9);
# verify(c.nearest((1.0, 1.0).into(), 1e-6), 1.0);
# verify(c.nearest((1.1, 1.1).into(), 1e-6), 1.0);
# verify(c.nearest((-0.1, 0.0).into(), 1e-6), 0.0);
# a = Affine::rotate(0.5);
# verify((a * c).nearest(a * Point::new(0.1, 0.001), 1e-6), 0.1);
# // ensure to_quads returns something given colinear points
# def test_degenerate_to_quads():
# c = CubicBez::new((0., 9.), (6., 6.), (12., 3.0), (18., 0.0));
# quads = c.to_quads(1e-6).collect::<Vec<_>>();
# assert_eq!(quads.len(), 1, "{:?", &quads);
def test_cubicbez_extrema():
q = CubicBez(Point(0.0, 0.0), Point(0.0, 1.0), Point(1.0, 1.0), Point(1.0, 0.0));
extrema = q.extrema()
assert len(extrema) == 1
assert abs(extrema[0] - 0.5) < 1e-6
q = CubicBez(Point(0.4, 0.5), Point(0.0, 1.0), Point(1.0, 0.0), Point(0.5, 0.4));
extrema = q.extrema();
assert len(extrema) == 4
# def test_cubicbez_toquads():
# // y = x^3
# c = CubicBez::new((0.0, 0.0), (1.0 / 3.0, 0.0), (2.0 / 3.0, 0.0), (1.0, 1.0));
# for i in 0..10 {
# accuracy = 0.1f64.powi(i);
# mut worst: f64 = 0.0;
# for (_count, (t0, t1, q)) in c.to_quads(accuracy).enumerate() {
# epsilon = 1e-12;
# assert!((q.start() - c.eval(t0)).hypot() < epsilon);
# assert!((q.end() - c.eval(t1)).hypot() < epsilon);
# n = 4;
# for j in 0..=n {
# t = (j as f64) * (n as f64).recip();
# p = q.eval(t);
# err = (p.y - p.x.powi(3)).abs();
# worst = worst.max(err);
# assert!(err < accuracy, "got { wanted {", err, accuracy);
| 2.65625
| 3
|
xcp_abcd/utils/qcmetrics.py
|
krmurtha/xcp_abcd
| 5
|
12785141
|
<reponame>krmurtha/xcp_abcd
import nibabel as nb
import numpy as np
def regisQ(bold2t1w_mask,t1w_mask,bold2template_mask,template_mask):
reg_qc ={'coregDice': [dc(bold2t1w_mask,t1w_mask)], 'coregJaccard': [jc(bold2t1w_mask,t1w_mask)],
'coregCrossCorr': [crosscorr(bold2t1w_mask,t1w_mask)],'coregCoverag': [coverage(bold2t1w_mask,t1w_mask)],
'normDice': [dc(bold2template_mask,template_mask)],'normJaccard': [jc(bold2template_mask,template_mask)],
'normCrossCorr': [crosscorr(bold2template_mask,template_mask)], 'normCoverage': [coverage(bold2template_mask,template_mask)],
}
return reg_qc
def dc(input1, input2):
r"""
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in twom j images.
The metric is defined as
.. math::
DC=\frac{2|A\cap B|}{|A|+|B|}
, where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects).
Parameters
----------
input1 : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
input2 : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc : float
The Dice coefficient between the object(s) in ```input1``` and the
object(s) in ```input2```. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric.
"""
input1 = nb.load(input1).get_fdata()
input2 = nb.load(input2).get_fdata()
input1 = np.atleast_1d(input1.astype(np.bool))
input2 = np.atleast_1d(input2.astype(np.bool))
intersection = np.count_nonzero(input1 & input2)
size_i1 = np.count_nonzero(input1)
size_i2 = np.count_nonzero(input2)
try:
dc = 2. * intersection / float(size_i1 + size_i2)
except ZeroDivisionError:
dc = 0.0
return dc
def jc(input1, input2):
r"""
Jaccard coefficient
Computes the Jaccard coefficient between the binary objects in two images.
Parameters
----------
input1: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
input2: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
jc: float
The Jaccard coefficient between the object(s) in `input1` and the
object(s) in `input2`. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric.
"""
input1 = nb.load(input1).get_fdata()
input2 = nb.load(input2).get_fdata()
input1 = np.atleast_1d(input1.astype(np.bool))
input2 = np.atleast_1d(input2.astype(np.bool))
intersection = np.count_nonzero(input1 & input2)
union = np.count_nonzero(input1 | input2)
jc = float(intersection) / float(union)
return jc
def crosscorr(input1, input2):
r"""
cross correlation
computer compute cross correction bewteen input mask
"""
input1 = nb.load(input1).get_fdata()
input2 = nb.load(input2).get_fdata()
input1 = np.atleast_1d(input1.astype(np.bool)).flatten()
input2 = np.atleast_1d(input2.astype(np.bool)).flatten()
cc = np.corrcoef(input1, input2)[0][1]
return cc
def coverage(input1, input2):
"""
estimate the coverage between two mask
"""
input1 = nb.load(input1).get_fdata()
input2 = nb.load(input2).get_fdata()
input1 = np.atleast_1d(input1.astype(np.bool))
input2 = np.atleast_1d(input2.astype(np.bool))
intsec = np.count_nonzero(input1 & input2)
if np.sum(input1) > np.sum(input2):
smallv = np.sum(input2)
else:
smallv = np.sum(input1)
cov = float(intsec)/float(smallv)
return cov
| 2.34375
| 2
|
application.py
|
tg2648/cu-rooms-app
| 0
|
12785142
|
"""
Main entrance to the application
"""
# Local application imports
from app import create_app
application = create_app()
if __name__ == '__main__':
application.run()
| 1.726563
| 2
|
adminlte_base/filters.py
|
kyzima-spb/adminlte-base
| 0
|
12785143
|
"""
Provides ready-made implementations for filters used in templates.
"""
from string import Template
import arrow
from dateutil import tz
from .constants import ThemeColor
__all__ = ('humanize', 'if_true', 'navbar_skin', 'sidebar_skin', 'replace_with_flag')
def humanize(dt, locale='en_us', time_zone=None):
"""The filter converts the date to human readable."""
dt = arrow.get(dt, tz.gettz(time_zone))
return dt.humanize(locale=locale, only_distance=True)
def if_true(value, replace_with=None):
"""Replaces the value with the passed if the value is true."""
if not value:
return ''
if replace_with is None:
return value
return Template(replace_with).safe_substitute(value=value)
def replace_with_flag(locale):
"""The filter replaces the locale with the CSS flag classes of the flag-icon-css library."""
locale = locale.replace('-', '_').lower().rsplit('_', maxsplit=1)
if len(locale) == 2:
return f'flag-icon flag-icon-{locale[-1]}'
return ''
def navbar_skin(color):
"""Returns a collection of classes to style the navigation bar."""
if color:
light = {ThemeColor.LIGHT, ThemeColor.WARNING, ThemeColor.WHITE, ThemeColor.ORANGE}
style = 'light' if color in light else f'dark'
return f'navbar-{style} navbar-{color}'
return ''
def sidebar_skin(color, light=False):
"""Returns a collection of classes to style the main sidebar bar."""
if color:
style = 'light' if light else f'dark'
return f'sidebar-{style}-{color}'
return ''
| 3.109375
| 3
|
redbrick/version_check.py
|
dereklukacs/redbrick-sdk
| 1
|
12785144
|
"""Management of versions to help users update."""
import os
from configparser import ConfigParser
from datetime import datetime
from distutils.version import StrictVersion
import requests
from .utils.logging import print_warning # pylint: disable=cyclic-import
def get_version() -> str:
"""Get current installed version of the SDK."""
with open(
os.path.join(os.path.dirname(__file__), "VERSION"),
"r",
encoding="utf-8",
) as file_:
return file_.read().strip()
def version_check() -> None:
"""Check if current installed version of the SDK is up to date with latest pypi release."""
# Getting latest version on pypi
cache_file = os.path.join(os.path.expanduser("~"), ".redbrickai", "version")
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
cache_config = ConfigParser()
cache_config.read(cache_file)
update_cache = False
if (
"version" not in cache_config
or "current_version" not in cache_config["version"]
or cache_config["version"]["current_version"] != __version__
):
cache_config["version"] = {"current_version": __version__}
update_cache = True
current_timestamp = int(datetime.now().timestamp())
if (
"latest_version" not in cache_config["version"]
or "last_checked" not in cache_config["version"]
or current_timestamp - int(cache_config["version"]["last_checked"]) > 86400
):
url = "https://pypi.org/pypi/redbrick-sdk/json"
data = requests.get(url).json()
versions = list(data["releases"].keys())
versions.sort(key=StrictVersion)
latest_version = versions[-1]
# Comparing with current installed version
if __version__ != latest_version:
warn = (
"You are using version '{}' of the SDK. However, version '{}' is available!\n"
+ "Please update as soon as possible to get the latest features and bug fixes.\n"
+ "You can use 'python -m pip install --upgrade redbrick-sdk'"
+ " to get the latest version."
)
print_warning(warn.format(__version__, latest_version))
cache_config["version"]["latest_version"] = latest_version
cache_config["version"]["last_checked"] = str(current_timestamp)
update_cache = True
if update_cache:
with open(cache_file, "w", encoding="utf-8") as file_:
cache_config.write(file_)
__version__ = get_version()
version_check()
| 2.390625
| 2
|
blitz_api/tests/tests_view_Users.py
|
Jerome-Celle/Blitz-API
| 0
|
12785145
|
<reponame>Jerome-Celle/Blitz-API
import json
from datetime import timedelta
from unittest import mock
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.contrib.auth import get_user_model
from django.core import mail
from django.urls import reverse
from django.test.utils import override_settings
from ..factories import UserFactory, AdminFactory
from ..models import (ActionToken, Organization, Domain,
AcademicField, AcademicLevel)
from ..services import remove_translation_fields
from store.models import Membership
User = get_user_model()
class UsersTests(APITestCase):
@classmethod
def setUpClass(cls):
super(UsersTests, cls).setUpClass()
org = Organization.objects.create(name="random_university")
Domain.objects.create(
name="mailinator.com",
organization_id=org.id
)
AcademicField.objects.create(name="random_field")
cls.academic_level = AcademicLevel.objects.create(name="random_level")
cls.membership = Membership.objects.create(
name="basic_membership",
details="1-Year student membership",
available=True,
price=50,
duration=timedelta(days=365),
)
cls.membership.academic_levels.set([cls.academic_level])
def setUp(self):
self.client = APIClient()
self.user = UserFactory()
self.user.set_password('<PASSWORD>!')
self.user.membership = self.membership
self.user.save()
self.admin = AdminFactory()
self.admin.set_password('<PASSWORD>!')
self.admin.save()
def test_create_new_student_user(self):
"""
Ensure we can create a new user if we have the permission.
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>!',
'phone': '1234567890',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
'name': "random_university"
},
'academic_field': {'name': "random_field"},
'academic_level': {'name': "random_level"},
'gender': "M",
'birthdate': "1999-11-11",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(json.loads(response.content)['phone'], '1234567890')
user = User.objects.get(email="<EMAIL>")
activation_token = ActionToken.objects.filter(
user=user,
type='account_activation',
)
self.assertEqual(1, len(activation_token))
def test_create_new_user(self):
"""
Ensure we can create a new user if we have the permission.
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>!',
'phone': '1234567890',
'first_name': 'Chuck',
'last_name': 'Norris',
'gender': "M",
'birthdate': "1999-11-11",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(json.loads(response.content)['phone'], '1234567890')
user = User.objects.get(email="<EMAIL>")
activation_token = ActionToken.objects.filter(
user=user,
type='account_activation',
)
self.assertEqual(1, len(activation_token))
def test_create_new_student_user_missing_field(self):
"""
Ensure we can't create a student user without academic_* fields.
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>!',
'phone': '1234567890',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
'name': "random_university"
},
'academic_field': {'name': "random_field"},
'gender': "M",
'birthdate': "1999-11-11",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
content = {
'academic_level': ['This field is required.']
}
self.assertEqual(json.loads(response.content), content)
def test_create_new_user_blank_fields(self):
"""
Ensure we can't create a new user with blank fields
"""
data = {
'email': '',
'password': '',
'phone': '',
'first_name': '',
'last_name': '',
'university': {
'name': ""
},
'academic_field': {'name': ""},
'academic_level': {'name': ""},
'gender': "",
'birthdate': "",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
content = {
'academic_field': {'name': ['This field may not be blank.']},
'academic_level': {'name': ['This field may not be blank.']},
'birthdate': [
'Date has wrong format. Use one of these formats instead: '
'YYYY-MM-DD.'
],
'first_name': ['This field may not be blank.'],
'gender': ['"" is not a valid choice.'],
'last_name': ['This field may not be blank.'],
'email': ['This field may not be blank.'],
'password': ['<PASSWORD> field may not be blank.'],
'phone': ['Invalid format.'],
'university': {'name': ['This field may not be blank.']}
}
self.assertEqual(json.loads(response.content), content)
def test_create_new_user_missing_fields(self):
"""
Ensure we can't create a new user without required fields
"""
data = {}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
content = {
'birthdate': ['This field is required.'],
'email': ['This field is required.'],
'first_name': ['This field is required.'],
'gender': ['This field is required.'],
'last_name': ['This field is required.'],
'password': ['<PASSWORD>.']
}
self.assertEqual(json.loads(response.content), content)
def test_create_new_user_weak_password(self):
"""
Ensure we can't create a new user with a weak password
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
"name": "random_university"
},
'academic_field': {'name': "random_field"},
'academic_level': {'name': "random_level"},
'gender': "M",
'birthdate': "1999-11-11",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
content = {"password": ['<PASSWORD>.']}
self.assertEqual(json.loads(response.content), content)
def test_create_new_user_invalid_domain(self):
"""
Ensure we can't create a new user with an invalid domain.
An invalid domain can be defined as:
- Non-existent
- Not matching with selected university
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
"name": "random_university"
},
'academic_field': {'name': "random_field"},
'academic_level': {'name': "random_level"},
'gender': "M",
'birthdate': "1999-11-11",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
content = {'email': ['Invalid domain name.']}
self.assertEqual(json.loads(response.content), content)
def test_create_new_user_invalid_university(self):
"""
Ensure we can't create a new user with an invalid university.
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
"name": "invalid_university"
},
'academic_field': {'name': "random_field"},
'academic_level': {'name': "random_level"},
'gender': "M",
'birthdate': "1999-11-11",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
content = {'university': ['This university does not exist.']}
self.assertEqual(json.loads(response.content), content)
def test_create_new_user_invalid_fields(self):
"""
Ensure we can't create a new user with invalid fields.
Emails are validated at creation time, this is why no email validation
messages are sent in this case.
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
"name": "invalid_university"
},
'academic_field': {'name': "invalid_field"},
'academic_level': {'name': "invalid_level"},
'gender': "invalid_gender",
'birthdate': "invalid_date",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
content = {
'academic_field': ['This academic field does not exist.'],
'academic_level': ['This academic level does not exist.'],
'birthdate': [
'Date has wrong format. Use one of these formats instead: '
'YYYY-MM-DD.'
],
'gender': ['"invalid_gender" is not a valid choice.'],
'university': ['This university does not exist.']
}
self.assertEqual(json.loads(response.content), content)
def test_create_new_user_invalid_phone(self):
"""
Ensure we can't create a new user with an invalid phone number
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>#$%',
'phone': '12345',
'other_phone': '23445dfg',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
"name": "random_university"
},
'academic_field': {'name': "random_field"},
'academic_level': {'name': "random_level"},
'gender': "M",
'birthdate': "1999-11-11",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
content = {
"phone": ['Invalid format.'],
"other_phone": ['Invalid format.']
}
self.assertEqual(json.loads(response.content), content)
def test_create_new_user_duplicate_email(self):
"""
Ensure we can't create a new user with an already existing email
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>!',
'phone': '1234567890',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
"name": "random_university"
},
'academic_field': {'name': "random_field"},
'academic_level': {'name': "random_level"},
'gender': "M",
'birthdate': "1999-11-11",
}
user = UserFactory()
user.email = '<EMAIL>'
user.save()
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
content = {
'email': [
"An account for the specified email address already exists."
]
}
self.assertEqual(json.loads(response.content), content)
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": True,
"AUTO_ACTIVATE_USER": False,
"FRONTEND_INTEGRATION": {
"ACTIVATION_URL": "fake_url",
}
}
)
def test_create_user_activation_email(self):
"""
Ensure that the activation email is sent when user signs up.
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>!',
'phone': '1234567890',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
"name": "random_university"
},
'academic_field': {'name': "random_field"},
'academic_level': {'name': "random_level"},
'gender': "M",
'birthdate': "1999-11-11",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(json.loads(response.content)['phone'], '1234567890')
user = User.objects.get(email="<EMAIL>")
activation_token = ActionToken.objects.filter(
user=user,
type='account_activation',
)
self.assertFalse(user.is_active)
self.assertEqual(1, len(activation_token))
# Test that one message was sent:
self.assertEqual(len(mail.outbox), 1)
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": True,
"AUTO_ACTIVATE_USER": False,
"FRONTEND_INTEGRATION": {
"ACTIVATION_URL": "fake_url",
}
}
)
@mock.patch('blitz_api.services.EmailMessage.send', return_value=0)
def test_create_user_activation_email_failure(self, send):
"""
Ensure that the user is notified that no email was sent.
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>!',
'phone': '1234567890',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
"name": "random_university"
},
'academic_field': {'name': "random_field"},
'academic_level': {'name': "random_level"},
'gender': "M",
'birthdate': "1999-11-11",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
content = {
'detail': "The account was created but no email was "
"sent. If your account is not activated, "
"contact the administration.",
}
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(json.loads(response.content), content)
user = User.objects.get(email="<EMAIL>")
activation_token = ActionToken.objects.filter(
user=user,
type='account_activation',
)
self.assertFalse(user.is_active)
self.assertEqual(1, len(activation_token))
# Test that no email was sent:
self.assertEqual(len(mail.outbox), 0)
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": True,
"AUTO_ACTIVATE_USER": True,
"FRONTEND_INTEGRATION": {
"ACTIVATION_URL": "fake_url",
}
}
)
@mock.patch('blitz_api.services.EmailMessage.send', return_value=0)
def test_create_user_auto_activate(self, services):
"""
Ensure that the user is automatically activated.
"""
data = {
'username': 'John',
'email': '<EMAIL>',
'password': '<PASSWORD>!',
'phone': '1234567890',
'first_name': 'Chuck',
'last_name': 'Norris',
'university': {
"name": "random_university"
},
'academic_field': {'name': "random_field"},
'academic_level': {'name': "random_level"},
'gender': "M",
'birthdate': "1999-11-11",
}
response = self.client.post(
reverse('user-list'),
data,
format='json',
)
content = {
'detail': "The account was created but no email was "
"sent. If your account is not activated, "
"contact the administration.",
}
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(json.loads(response.content), content)
user = User.objects.get(email="<EMAIL>")
activation_token = ActionToken.objects.filter(
user=user,
type='account_activation',
)
self.assertTrue(user.is_active)
self.assertEqual(1, len(activation_token))
# Test that no email was sent:
self.assertEqual(len(mail.outbox), 0)
def test_list_users(self):
"""
Ensure we can list all users.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(reverse('user-list'))
self.assertEqual(json.loads(response.content)['count'], 2)
# Users are ordered alphabetically by email
first_user = json.loads(response.content)['results'][0]
second_user = json.loads(response.content)['results'][1]
self.assertEqual(first_user['email'], self.admin.email)
membership = {
'url': 'http://testserver/memberships/' + str(self.membership.id),
'id': self.membership.id,
'order_lines': [],
'name': 'basic_membership',
'available': True,
'price': '50.00',
'details': '1-Year student membership',
'duration': '365 00:00:00',
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)]
}
self.assertEqual(
remove_translation_fields(second_user['membership']),
membership
)
# Check the system doesn't return attributes not expected
attributes = [
'id',
'url',
'email',
'first_name',
'last_name',
'is_active',
'phone',
'other_phone',
'is_superuser',
'is_staff',
'university',
'last_login',
'date_joined',
'academic_level',
'academic_field',
'gender',
'birthdate',
'groups',
'user_permissions',
'tickets',
'membership',
'membership_end',
'city',
'personnal_restrictions',
'academic_program_code',
'faculty',
'student_number',
'volunteer_for_workplace',
]
for key in first_user.keys():
self.assertTrue(
key in attributes,
'Attribute "{0}" is not expected but is '
'returned by the system.'.format(key)
)
attributes.remove(key)
# Ensure the system returns all expected attributes
self.assertTrue(
len(attributes) == 0,
'The system failed to return some '
'attributes : {0}'.format(attributes)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_users_with_search(self):
"""
Ensure we can list all users.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(reverse('user-list') + '?search=chuck')
self.assertEqual(json.loads(response.content)['count'], 1)
# Users are ordered alphabetically by email
first_user = json.loads(response.content)['results'][0]
self.assertEqual(first_user['email'], self.admin.email)
# Check the system doesn't return attributes not expected
attributes = [
'id',
'url',
'email',
'first_name',
'last_name',
'is_active',
'phone',
'other_phone',
'is_superuser',
'is_staff',
'university',
'last_login',
'date_joined',
'academic_level',
'academic_field',
'gender',
'birthdate',
'groups',
'user_permissions',
'tickets',
'membership',
'membership_end',
'city',
'personnal_restrictions',
'academic_program_code',
'faculty',
'student_number',
'volunteer_for_workplace',
]
for key in first_user.keys():
self.assertTrue(
key in attributes,
'Attribute "{0}" is not expected but is '
'returned by the system.'.format(key)
)
attributes.remove(key)
# Ensure the system returns all expected attributes
self.assertTrue(
len(attributes) == 0,
'The system failed to return some '
'attributes : {0}'.format(attributes)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_users_without_authenticate(self):
"""
Ensure we can't list users without authentication.
"""
response = self.client.get(reverse('user-list'))
content = {"detail": "Authentication credentials were not provided."}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_list_users_without_permissions(self):
"""
Ensure we can't list users without permissions.
"""
self.client.force_authenticate(user=self.user)
response = self.client.get(reverse('user-list'))
content = {
'detail': 'You do not have permission to perform this action.'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| 2.046875
| 2
|
ray_examples/serving/modelserving/model_server_deployments.py
|
mlkimmins/scalingpythonml
| 13
|
12785146
|
import ray
from ray import serve
import requests
import os
import pickle
import numpy as np
import asyncio
# Models locations
RANDOM_FOREST_MODEL_PATH = os.path.join("wine-quality_random_forest.pkl")
XGBOOST_MODEL_PATH = os.path.join("wine-quality_xgboost.pkl")
GRBOOST_MODEL_PATH = os.path.join("wine-quality_grboost.pkl")
# Start Ray
ray.init()
# Start Serve
serve.start()
#define deployments
@serve.deployment(route_prefix="/randomforest")
class RandomForestModel:
def __init__(self, path):
with open(path, "rb") as f:
self.model = pickle.load(f)
async def __call__(self, request):
payload = await request.json()
return self.serve(payload)
def serve(self, request):
input_vector = [
request["fixed acidity"],
request["volatile acidity"],
request["citric acid"],
request["residual sugar"],
request["chlorides"],
request["free sulfur dioxide"],
request["total sulfur dioxide"],
request["density"],
request["pH"],
request["sulphates"],
request["alcohol"],
]
prediction = self.model.predict([input_vector])[0]
return {"result": str(prediction)}
@serve.deployment(route_prefix="/grboost")
class GRBoostModel:
def __init__(self, path):
with open(path, "rb") as f:
self.model = pickle.load(f)
async def __call__(self, request):
payload = await request.json()
return self.serve(payload)
def serve(self, request):
input_vector = np.array([
request["fixed acidity"],
request["volatile acidity"],
request["citric acid"],
request["residual sugar"],
request["chlorides"],
request["free sulfur dioxide"],
request["total sulfur dioxide"],
request["density"],
request["pH"],
request["sulphates"],
request["alcohol"],
])
prediction = self.model.predict(input_vector.reshape(1,11))[0]
return {"result": str(prediction)}
@serve.deployment(route_prefix="/xgboost")
class XGBoostModel:
def __init__(self, path):
with open(path, "rb") as f:
self.model = pickle.load(f)
async def __call__(self, request):
payload = await request.json()
return self.serve(payload)
def serve(self, request):
input_vector = np.array([
request["fixed acidity"],
request["volatile acidity"],
request["citric acid"],
request["residual sugar"],
request["chlorides"],
request["free sulfur dioxide"],
request["total sulfur dioxide"],
request["density"],
request["pH"],
request["sulphates"],
request["alcohol"],
])
prediction = self.model.predict(input_vector.reshape(1,11))[0]
return {"result": str(prediction)}
RandomForestModel.deploy(RANDOM_FOREST_MODEL_PATH)
XGBoostModel.deploy(XGBOOST_MODEL_PATH)
GRBoostModel.deploy(GRBOOST_MODEL_PATH)
@serve.deployment(route_prefix="/speculative")
class Speculative:
def __init__(self):
self.rfhandle = RandomForestModel.get_handle(sync=False)
self.xgboosthandle = XGBoostModel.get_handle(sync=False)
self.grboosthandle = GRBoostModel.get_handle(sync=False)
async def __call__(self, request):
payload = await request.json()
f1, f2, f3 = await asyncio.gather(self.rfhandle.serve.remote(payload),
self.xgboosthandle.serve.remote(payload), self.grboosthandle.serve.remote(payload))
rfresurlt = ray.get(f1)['result']
xgresurlt = ray.get(f2)['result']
grresult = ray.get(f3)['result']
ones = []
zeros = []
if rfresurlt == "1":
ones.append("Random forest")
else:
zeros.append("Random forest")
if xgresurlt == "1":
ones.append("XGBoost")
else:
zeros.append("XGBoost")
if grresult == "1":
ones.append("Gradient boost")
else:
zeros.append("Gradient boost")
if len(ones) >= 2:
return {"result": "1", "methods": ones}
else:
return {"result": "0", "methods": zeros}
Speculative.deploy()
sample_request_input = {
"fixed acidity": -0.70071875,
"volatile acidity": 0.34736425,
"citric acid": -1.34012182,
"residual sugar": -0.16942723,
"chlorides": -0.1586918,
"free sulfur dioxide": 1.06389977,
"total sulfur dioxide": -0.10545198,
"density": -0.66075704,
"pH": 0.70550789,
"sulphates": -0.46118037,
"alcohol": 0.26002813,
}
print(requests.get("http://localhost:8000/randomforest", json=sample_request_input).text)
print(requests.get("http://localhost:8000/grboost", json=sample_request_input).text)
print(requests.get("http://localhost:8000/xgboost", json=sample_request_input).text)
print(requests.get("http://localhost:8000/speculative", json=sample_request_input).text)
| 2.453125
| 2
|
tests/benchmarks.py
|
Bahus/easy_cache
| 34
|
12785147
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import math
from contextlib import contextmanager
from timeit import default_timer
from redis import StrictRedis
import six
from django.conf import settings
# noinspection PyUnresolvedReferences
from six.moves import xrange
from easy_cache import caches
from easy_cache.contrib.redis_cache import RedisCacheInstance
from easy_cache.decorators import ecached
from tests.conf import REDIS_HOST, MEMCACHED_HOST
settings.configure(
DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'locmem',
'KEY_PREFIX': 'custom_prefix',
},
'memcached': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': MEMCACHED_HOST,
'KEY_PREFIX': 'memcached',
},
'redis': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://{}/1'.format(REDIS_HOST),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
}
}
},
ROOT_URLCONF='',
INSTALLED_APPS=()
)
# adds custom redis client
redis_host, redis_port = REDIS_HOST.split(':')
caches['redis_client'] = RedisCacheInstance(
StrictRedis(host=redis_host, port=redis_port),
prefix='bench'
)
def ratio(a, b):
if a > b:
return a / b, 1
elif a < b:
return 1, b / a
else:
return 1, 1
class Stopwatch(object):
def __init__(self, name):
self.name = name
self.t0 = default_timer()
self.laps = []
def __unicode__(self):
m = self.mean()
d = self.stddev()
a = self.median()
fmt = u'%-37s: mean=%0.5f, median=%0.5f, stddev=%0.5f, n=%3d, snr=%8.5f:%8.5f'
return fmt % ((self.name, m, a, d, len(self.laps)) + ratio(m, d))
def __str__(self):
if six.PY2:
return six.binary_type(self.__unicode__())
else:
return self.__unicode__()
def mean(self):
return sum(self.laps) / len(self.laps)
def median(self):
return sorted(self.laps)[int(len(self.laps) / 2)]
def stddev(self):
mean = self.mean()
return math.sqrt(sum((lap - mean) ** 2 for lap in self.laps) / len(self.laps))
def total(self):
return default_timer() - self.t0
def reset(self):
self.t0 = default_timer()
self.laps = []
@contextmanager
def timing(self):
t0 = default_timer()
try:
yield
finally:
te = default_timer()
self.laps.append(te - t0)
c = 0
def time_consuming_operation():
global c
c += 1
a = sum(xrange(1000000))
return str(a)
def test_no_cache():
return time_consuming_operation()
@ecached(cache_alias='default')
def test_locmem_cache():
return time_consuming_operation()
@ecached(cache_alias='memcached')
def test_memcached_cache():
return time_consuming_operation()
@ecached(cache_alias='redis')
def test_redis_cache():
return time_consuming_operation()
@ecached(cache_alias='redis_client')
def test_redis_client_cache():
return time_consuming_operation()
@ecached(cache_alias='default', tags=['tag1', 'tag2'])
def test_locmem_cache_tags():
return time_consuming_operation()
@ecached(cache_alias='memcached', tags=['tag1', 'tag2'])
def test_memcached_cache_tags():
return time_consuming_operation()
@ecached(cache_alias='redis', tags=['tag1', 'tag2'])
def test_redis_cache_tags():
return time_consuming_operation()
@ecached(cache_alias='redis_client', tags=['tag1', 'tag2'])
def test_redis_client_cache_tags():
return time_consuming_operation()
def main():
from django import get_version
import sys
print('=======', 'Python:', sys.version.replace('\n', ''), 'Django:', get_version(), '=======')
global c
n = 100
benchmarks = (
(test_no_cache, n),
(test_locmem_cache, 1),
(test_locmem_cache_tags, 1),
(test_memcached_cache, 1),
(test_memcached_cache_tags, 1),
(test_redis_cache, 1),
(test_redis_cache_tags, 1),
(test_redis_client_cache, 1),
(test_redis_client_cache_tags, 1),
)
def cleanup(function):
if hasattr(function, 'invalidate_cache_by_key'):
function.invalidate_cache_by_key()
if hasattr(function, 'invalidate_cache_by_tags'):
function.invalidate_cache_by_tags()
for method, count in benchmarks:
sw1 = Stopwatch('[cleanup] ' + method.__name__)
cleanup(method)
c = 0
for _ in xrange(n):
with sw1.timing():
method()
cleanup(method)
assert c == n, c
print(sw1)
sw2 = Stopwatch('[ normal] ' + method.__name__)
cleanup(method)
c = 0
for _ in xrange(n):
# skip first time
if _ == 0:
method()
continue
with sw2.timing():
method()
assert c == count, c
print(sw2)
print('mean diff: {:.3} %, median diff: {:.3} %'.format(
float(sw2.mean()) / sw1.mean() * 100,
float(sw2.median()) / sw1.median() * 100,
))
if __name__ == '__main__':
main()
| 1.96875
| 2
|
clothing/migrations/0003_test_data_clothCategories.py
|
AmitAharoni/iWear2021
| 0
|
12785148
|
<gh_stars>0
from django.db import migrations, transaction
from clothing.models import ClothCategory
from iWear.resources.clothCategories import CLOTH_CATEGORIES_LIST
class Migration(migrations.Migration):
dependencies = [
('clothing', '0002_clothingitem_owner'),
]
def generate_clothcategories_data(apps, schema_editor):
with transaction.atomic():
for category in CLOTH_CATEGORIES_LIST:
ClothCategory(category_name=category).save()
operations = [
migrations.RunPython(generate_clothcategories_data),
]
| 2.109375
| 2
|
tf_agents/keras_layers/transformer_encoder_layer.py
|
woerns/agents
| 0
|
12785149
|
<filename>tf_agents/keras_layers/transformer_encoder_layer.py
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-head attention layer as used in the Transformer model"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.utils import common
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.framework import tensor_shape # TF internal
from tensorflow.python.keras import layers # TF internal
# pylint:enable=g-direct-tensorflow-import
__all__ = ["MultiHeadAttention", "TransformerEncoderLayer"]
def _scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dtype=tf.float32):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model, dtype=dtype)
self.wk = tf.keras.layers.Dense(d_model, dtype=dtype)
self.wv = tf.keras.layers.Dense(d_model, dtype=dtype)
self.dense = tf.keras.layers.Dense(d_model, dtype=dtype)
def _split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self._split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self._split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self._split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = _scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
class TransformerEncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1, dtype=tf.float32):
super(TransformerEncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads, dtype=dtype)
self.ffn = tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu', dtype=dtype), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model, dtype=dtype) # (batch_size, seq_len, d_model)
])
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6, dtype=dtype)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6, dtype=dtype)
self.dropout1 = tf.keras.layers.Dropout(rate, dtype=dtype)
self.dropout2 = tf.keras.layers.Dropout(rate, dtype=dtype)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
| 1.789063
| 2
|
hydroffice/ssp_manager/sspmanager_ui.py
|
hydroffice/hyo_sspmanager
| 0
|
12785150
|
from __future__ import absolute_import, division, print_function, unicode_literals
import wx
import logging
log = logging.getLogger(__name__)
MENU_FILE = wx.NewId()
MENU_VIEW = wx.NewId()
MENU_INTERACT = wx.NewId()
MENU_PROC = wx.NewId()
MENU_DB = wx.NewId()
MENU_SERVER = wx.NewId()
MENU_TOOLS = wx.NewId()
MENU_HELP = wx.NewId()
MENU_FILE_IMP = wx.NewId()
MENU_FILE_IMP_DIGI_S = wx.NewId()
MENU_FILE_IMP_UNB = wx.NewId()
MENU_FILE_IMP_SIPPICAN = wx.NewId()
MENU_FILE_IMP_SEABIRD = wx.NewId()
MENU_FILE_IMP_VALEPORT = wx.NewId()
MENU_FILE_IMP_VALE_MIDAS = wx.NewId()
MENU_FILE_IMP_VALE_MON = wx.NewId()
MENU_FILE_IMP_VALE_MINIS = wx.NewId()
MENU_FILE_IMP_TURO = wx.NewId()
MENU_FILE_IMP_DIGIBAR = wx.NewId()
MENU_FILE_IMP_DIGI_PRO = wx.NewId()
MENU_FILE_IMP_CASTAWAY = wx.NewId()
MENU_FILE_IMP_IDRONAUT = wx.NewId()
MENU_FILE_IMP_SAIV = wx.NewId()
MENU_FILE_QUERY = wx.NewId()
MENU_FILE_QUERY_WOA = wx.NewId()
MENU_FILE_QUERY_RTOFS = wx.NewId()
MENU_FILE_QUERY_SIS = wx.NewId()
MENU_FILE_EXPORT = wx.NewId()
MENU_FILE_EXPORT_ASVP = wx.NewId()
MENU_FILE_EXPORT_VEL = wx.NewId()
MENU_FILE_EXPORT_HIPS = wx.NewId()
MENU_FILE_EXPORT_PRO = wx.NewId()
MENU_FILE_EXPORT_IXBLUE = wx.NewId()
MENU_FILE_EXPORT_UNB = wx.NewId()
MENU_FILE_EXPORT_ELAC = wx.NewId()
MENU_FILE_EXPORT_CSV = wx.NewId()
MENU_FILE_EXPORT_CAST = wx.NewId()
MENU_FILE_CLEAR = wx.NewId()
MENU_FILE_EXIT = wx.NewId()
MENU_PROC_INS_ZOOM = wx.NewId()
MENU_PROC_INS_FLAG = wx.NewId()
MENU_PROC_INS_UNFLAG = wx.NewId()
MENU_PROC_INS_INSERT = wx.NewId()
MENU_VIEW_RESET = wx.NewId()
MENU_VIEW_HIDE_WOA = wx.NewId()
MENU_VIEW_HIDE_FLAGGED = wx.NewId()
MENU_VIEW_HIDE_DEPTH = wx.NewId()
MENU_PROC_LOAD_SAL = wx.NewId()
MENU_PROC_LOAD_TEMP_SAL = wx.NewId()
MENU_PROC_LOAD_SURFSP = wx.NewId()
MENU_PROC_EXTEND_CAST = wx.NewId()
MENU_PROC_INSPECTION = wx.NewId()
MENU_PROC_PREVIEW_THINNING = wx.NewId()
MENU_PROC_SEND_PROFILE = wx.NewId()
MENU_PROC_STORE_SSP = wx.NewId()
MENU_PROC_REDO_SSP = wx.NewId()
MENU_PROC_LOG_METADATA = wx.NewId()
# MENU_PROC_EXPRESS = wx.NewId()
MENU_DB_QUERY = wx.NewId()
MENU_DB_QUERY_INTERNAL_DB = wx.NewId()
MENU_DB_QUERY_EXTERNAL_DB = wx.NewId()
MENU_DB_DELETE = wx.NewId()
MENU_DB_DELETE_INTERNAL_DB = wx.NewId()
MENU_DB_DELETE_EXTERNAL_DB = wx.NewId()
MENU_DB_EXPORT = wx.NewId()
MENU_DB_EXPORT_SHP = wx.NewId()
MENU_DB_EXPORT_KML = wx.NewId()
MENU_DB_EXPORT_CSV = wx.NewId()
MENU_DB_PLOT = wx.NewId()
MENU_DB_PLOT_MAP_SSP = wx.NewId()
MENU_DB_PLOT_DAILY_SSP = wx.NewId()
MENU_DB_SAVE_DAILY_SSP = wx.NewId()
MENU_TOOLS_SERVER = wx.NewId()
MENU_TOOLS_SET_REFERENCE_CAST = wx.NewId()
MENU_TOOLS_CLEAR_REFERENCE_CAST = wx.NewId()
MENU_TOOLS_EDIT_REFERENCE_CAST = wx.NewId()
MENU_TOOLS_REFERENCE = wx.NewId()
MENU_TOOLS_MODIFY_SETTINGS = wx.NewId()
MENU_TOOLS_VIEW_SETTINGS = wx.NewId()
MENU_TOOLS_RELOAD_SETTINGS = wx.NewId()
MENU_TOOLS_USER_INPUTS = wx.NewId()
MENU_TOOLS_REF_MON = wx.NewId()
MENU_TOOLS_GEO_MONITOR = wx.NewId()
MENU_SERVER_START = wx.NewId()
MENU_SERVER_SEND = wx.NewId()
MENU_SERVER_STOP = wx.NewId()
MENU_SERVER_LOG_METADATA = wx.NewId()
MENU_HELP_MANUAL = wx.NewId()
MENU_HELP_ABOUT = wx.NewId()
MENUS_ALL = (MENU_FILE_IMP, MENU_FILE_IMP_CASTAWAY, MENU_FILE_IMP_DIGIBAR, MENU_FILE_IMP_DIGI_PRO, MENU_FILE_IMP_DIGI_S,
MENU_FILE_IMP_IDRONAUT, MENU_FILE_IMP_SAIV, MENU_FILE_IMP_SEABIRD, MENU_FILE_IMP_SIPPICAN,
MENU_FILE_IMP_TURO, MENU_FILE_IMP_UNB, MENU_FILE_IMP_VALEPORT,
MENU_FILE_IMP_VALE_MIDAS, MENU_FILE_IMP_VALE_MON, MENU_FILE_IMP_VALE_MINIS,
MENU_FILE_QUERY,
MENU_FILE_EXPORT, MENU_FILE_EXPORT_CAST,
MENU_FILE_EXPORT_ASVP, MENU_FILE_EXPORT_PRO, MENU_FILE_EXPORT_HIPS, MENU_FILE_EXPORT_IXBLUE,
MENU_FILE_EXPORT_VEL, MENU_FILE_EXPORT_UNB, MENU_FILE_EXPORT_ELAC, MENU_FILE_EXPORT_CSV,
MENU_FILE_CLEAR,
MENU_VIEW_RESET, MENU_VIEW_HIDE_WOA, MENU_VIEW_HIDE_FLAGGED, MENU_VIEW_HIDE_DEPTH,
MENU_PROC_LOAD_SAL, MENU_PROC_LOAD_TEMP_SAL, MENU_PROC_LOAD_SURFSP, MENU_PROC_EXTEND_CAST,
MENU_PROC_INSPECTION, MENU_PROC_INS_ZOOM, MENU_PROC_INS_FLAG, MENU_PROC_INS_UNFLAG, MENU_PROC_INS_INSERT,
# MENU_PROC_EXPRESS,
MENU_PROC_PREVIEW_THINNING, MENU_PROC_SEND_PROFILE,
MENU_PROC_STORE_SSP, MENU_PROC_REDO_SSP, MENU_PROC_LOG_METADATA,
MENU_DB_QUERY,
MENU_DB_DELETE,
MENU_DB_EXPORT,
MENU_DB_PLOT,
MENU_SERVER_START, MENU_SERVER_SEND, MENU_SERVER_STOP, MENU_SERVER_LOG_METADATA,
MENU_TOOLS_GEO_MONITOR, MENU_TOOLS_REF_MON,
MENU_TOOLS_SET_REFERENCE_CAST, MENU_TOOLS_EDIT_REFERENCE_CAST, MENU_TOOLS_CLEAR_REFERENCE_CAST,
MENU_TOOLS_MODIFY_SETTINGS, MENU_TOOLS_VIEW_SETTINGS, MENU_TOOLS_RELOAD_SETTINGS,
MENU_TOOLS_USER_INPUTS)
MENUS_DISABLED_ON_CLOSED = (
MENU_FILE_EXPORT_CAST, MENU_FILE_CLEAR,
MENU_VIEW_RESET, MENU_VIEW_HIDE_WOA, MENU_VIEW_HIDE_FLAGGED, MENU_VIEW_HIDE_DEPTH,
MENU_PROC_LOAD_SAL, MENU_PROC_LOAD_TEMP_SAL, MENU_PROC_LOAD_SURFSP,
MENU_PROC_EXTEND_CAST, MENU_PROC_INSPECTION,
MENU_PROC_INS_ZOOM, MENU_PROC_INS_FLAG, MENU_PROC_INS_INSERT, MENU_PROC_INS_UNFLAG,
MENU_PROC_PREVIEW_THINNING, MENU_PROC_SEND_PROFILE,
MENU_PROC_STORE_SSP, MENU_PROC_REDO_SSP,
# MENU_PROC_EXPRESS,
MENU_TOOLS_SET_REFERENCE_CAST,
MENU_SERVER_SEND, MENU_SERVER_STOP)
MENUS_DISABLED_ON_OPEN = (MENU_SERVER_SEND, MENU_SERVER_STOP)
MENUS_DISABLED_ON_SERVER = (
MENU_FILE_IMP, # all import
MENU_FILE_QUERY, # all query
MENU_FILE_EXPORT, # all export
MENU_FILE_CLEAR,
MENU_PROC_LOG_METADATA, MENU_TOOLS_SET_REFERENCE_CAST, MENU_TOOLS_EDIT_REFERENCE_CAST,
MENU_TOOLS_CLEAR_REFERENCE_CAST, MENU_FILE_IMP_DIGI_S, MENU_FILE_IMP_SEABIRD,
# MENU_PROC_EXPRESS,
MENU_PROC_LOAD_SAL, MENU_PROC_LOAD_TEMP_SAL, MENU_PROC_LOAD_SURFSP, MENU_PROC_EXTEND_CAST,
MENU_PROC_INSPECTION, MENU_PROC_PREVIEW_THINNING, MENU_PROC_SEND_PROFILE, MENU_PROC_REDO_SSP,
MENU_DB_QUERY,
MENU_DB_DELETE,
MENU_DB_EXPORT,
MENU_DB_PLOT,
MENU_SERVER_START)
class SSPManagerBase(wx.Frame):
def __init__(self, *args, **kwds):
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
# Menu Bar
self.SVPEditorFrame_menubar = wx.MenuBar()
# ### FILE ###
self.FileMenu = wx.Menu()
# File/Import
FileImp = wx.Menu()
self.FileImpCastaway = wx.MenuItem(FileImp, MENU_FILE_IMP_CASTAWAY, "Castaway (.csv)",
"Import a Castaway cast", wx.ITEM_NORMAL)
FileImp.AppendItem(self.FileImpCastaway)
FileImpDigi = wx.Menu()
self.FileImpDigibarPro = wx.MenuItem(FileImpDigi, MENU_FILE_IMP_DIGI_PRO, "Digibar Pro (.txt)",
"Import a Digibar Pro cast", wx.ITEM_NORMAL)
FileImpDigi.AppendItem(self.FileImpDigibarPro)
self.FileImpDigibarS = wx.MenuItem(FileImpDigi, MENU_FILE_IMP_DIGI_S, "Digibar S (.csv)",
"Import a Digibar S cast", wx.ITEM_NORMAL)
FileImpDigi.AppendItem(self.FileImpDigibarS)
FileImp.AppendMenu(MENU_FILE_IMP_DIGIBAR, "Digibar", FileImpDigi, "Import Digibar formats")
self.FileImpIdronaut = wx.MenuItem(FileImp, MENU_FILE_IMP_IDRONAUT, "Idronaut (*.txt)",
"Import an Idronaut cast", wx.ITEM_NORMAL)
FileImp.AppendItem(self.FileImpIdronaut)
self.FileImpSaiv = wx.MenuItem(FileImp, MENU_FILE_IMP_SAIV, "Saiv (*.txt)",
"Import a Saiv cast", wx.ITEM_NORMAL)
FileImp.AppendItem(self.FileImpSaiv)
self.FileImpSeabird = wx.MenuItem(FileImp, MENU_FILE_IMP_SEABIRD, "Seabird (.cnv)",
"Import a Seabird cast", wx.ITEM_NORMAL)
FileImp.AppendItem(self.FileImpSeabird)
self.FileImpSippican = wx.MenuItem(FileImp, MENU_FILE_IMP_SIPPICAN, "Sippican (.edf)",
"Import a Sippican cast", wx.ITEM_NORMAL)
FileImp.AppendItem(self.FileImpSippican)
self.FileImpTuro = wx.MenuItem(FileImp, MENU_FILE_IMP_TURO, "Turo (.nc)",
"Import a Turo cast", wx.ITEM_NORMAL)
FileImp.AppendItem(self.FileImpTuro)
self.FileImpUNB = wx.MenuItem(FileImp, MENU_FILE_IMP_UNB, "UNB (.unb)",
"Import a UNB cast", wx.ITEM_NORMAL)
FileImp.AppendItem(self.FileImpUNB)
FileImpVale = wx.Menu()
self.FileImpValeMidas = wx.MenuItem(FileImpVale, MENU_FILE_IMP_VALE_MIDAS, "Midas (.000)",
"Import a Valeport Midas cast", wx.ITEM_NORMAL)
FileImpVale.AppendItem(self.FileImpValeMidas)
self.FileImpValeMonitor = wx.MenuItem(FileImpVale, MENU_FILE_IMP_VALE_MON, "Monitor (.000)",
"Import a Valeport Monitor cast", wx.ITEM_NORMAL)
FileImpVale.AppendItem(self.FileImpValeMonitor)
self.FileImpValeMiniS = wx.MenuItem(FileImpVale, MENU_FILE_IMP_VALE_MINIS, "MiniSVP (.txt)",
"Import a Valeport MiniSVP cast", wx.ITEM_NORMAL)
FileImpVale.AppendItem(self.FileImpValeMiniS)
FileImp.AppendMenu(MENU_FILE_IMP_VALEPORT, "Valeport", FileImpVale, "Import Valeport formats")
self.FileMenu.AppendMenu(MENU_FILE_IMP, "Import cast", FileImp, "Import an SSP cast")
# File/Query
FileQuery = wx.Menu()
self.FileQuerySis = wx.MenuItem(FileQuery, MENU_FILE_QUERY_SIS, "Kongsberg SIS",
"Retrieve the SSP cast in use by SIS", wx.ITEM_NORMAL)
FileQuery.AppendItem(self.FileQuerySis)
self.FileQueryRtofs = wx.MenuItem(FileQuery, MENU_FILE_QUERY_RTOFS, "RTOFS atlas",
"Retrieve a predicted RTOFS-based SSP", wx.ITEM_NORMAL)
FileQuery.AppendItem(self.FileQueryRtofs)
self.FileQueryWoa = wx.MenuItem(FileQuery, MENU_FILE_QUERY_WOA, "WOA09 atlas",
"Retrieve statistical info about the SSP in the area", wx.ITEM_NORMAL)
FileQuery.AppendItem(self.FileQueryWoa)
self.FileMenu.AppendMenu(MENU_FILE_QUERY, "Query from", FileQuery,
"Retrieve SSP info from external sources")
# File / Export
FileExp = wx.Menu()
self.FileExpCast = wx.MenuItem(FileExp, MENU_FILE_EXPORT_CAST, "Export selected formats",
"Export the current SSP in the selected formats", wx.ITEM_NORMAL)
FileExp.AppendItem(self.FileExpCast)
FileExp.AppendSeparator()
self.FileExpHips = wx.MenuItem(FileExp, MENU_FILE_EXPORT_HIPS, "Caris HIPS (.svp)",
"Export the current SSP as Caris HIPS format", wx.ITEM_CHECK)
FileExp.AppendItem(self.FileExpHips)
self.FileExpCsv = wx.MenuItem(FileExp, MENU_FILE_EXPORT_CSV, "Comma-separated (.csv)",
"Export the current SSP as comma-separated format", wx.ITEM_CHECK)
FileExp.AppendItem(self.FileExpCsv)
self.FileExpElac = wx.MenuItem(FileExp, MENU_FILE_EXPORT_ELAC, "Elac (.sva)",
"Export the current SSP as Elac format", wx.ITEM_CHECK)
FileExp.AppendItem(self.FileExpElac)
self.FileExpVel = wx.MenuItem(FileExp, MENU_FILE_EXPORT_VEL, "Hypack (.vel)",
"Export the current SSP as Hypack format", wx.ITEM_CHECK)
FileExp.AppendItem(self.FileExpVel)
self.FileExpIxblue = wx.MenuItem(FileExp, MENU_FILE_EXPORT_IXBLUE, "IXBLUE (.txt)",
"Export the current SSP as IXBLUE format", wx.ITEM_CHECK)
FileExp.AppendItem(self.FileExpIxblue)
self.FileExpAsvp = wx.MenuItem(FileExp, MENU_FILE_EXPORT_ASVP, "Kongsberg (.asvp)",
"Export the current SSP as Kongsberg format", wx.ITEM_CHECK)
FileExp.AppendItem(self.FileExpAsvp)
self.FileExpPro = wx.MenuItem(FileExp, MENU_FILE_EXPORT_PRO, "Sonardyne (.pro)",
"Export the current SSP as Sonardyne format", wx.ITEM_CHECK)
FileExp.AppendItem(self.FileExpPro)
self.FileExpUnb = wx.MenuItem(FileExp, MENU_FILE_EXPORT_UNB, "UNB (.unb)",
"Export the current SSP as UNB format", wx.ITEM_CHECK)
FileExp.AppendItem(self.FileExpUnb)
self.FileMenu.AppendMenu(MENU_FILE_EXPORT, "Export SSP", FileExp,
"Export the current SSP")
self.FileClear = wx.MenuItem(self.FileMenu, MENU_FILE_CLEAR, "Clear",
"Clear the loaded cast", wx.ITEM_NORMAL)
self.FileMenu.AppendItem(self.FileClear)
self.FileMenu.AppendSeparator()
self.FileExit = wx.MenuItem(self.FileMenu, MENU_FILE_EXIT, "Exit",
"Quit SSP Manager", wx.ITEM_NORMAL)
self.FileMenu.AppendItem(self.FileExit)
self.SVPEditorFrame_menubar.Append(self.FileMenu, "File")
# ### VIEW ###
self.ViewMenu = wx.Menu()
self.ResetView = wx.MenuItem(self.ViewMenu, MENU_VIEW_RESET, "Reset plot view",
"Reset the plot view", wx.ITEM_NORMAL)
self.ViewMenu.AppendItem(self.ResetView)
self.ViewMenu.AppendSeparator()
self.ViewHideWOA = wx.MenuItem(self.ViewMenu, MENU_VIEW_HIDE_WOA, "Hide WOA info",
"Hide the visualization of WOA info", wx.ITEM_CHECK)
self.ViewMenu.AppendItem(self.ViewHideWOA)
self.HideFlagged = wx.MenuItem(self.ViewMenu, MENU_VIEW_HIDE_FLAGGED, "Hide flagged data",
"Hide all the flagged data", wx.ITEM_CHECK)
self.ViewMenu.AppendItem(self.HideFlagged)
self.HideDepth = wx.MenuItem(self.ViewMenu, MENU_VIEW_HIDE_DEPTH, "Hide depth",
"Hide the depth visualization on the plot", wx.ITEM_CHECK)
self.ViewMenu.AppendItem(self.HideDepth)
self.SVPEditorFrame_menubar.Append(self.ViewMenu, "View")
# ### Process ###
self.ProcessMenu = wx.Menu()
self.ProcessLoadSal = wx.MenuItem(self.ProcessMenu, MENU_PROC_LOAD_SAL, "Load salinity",
"Load salinity from reference cast [XBT only]", wx.ITEM_NORMAL)
self.ProcessMenu.AppendItem(self.ProcessLoadSal)
self.ProcessLoadTempSal = wx.MenuItem(self.ProcessMenu, MENU_PROC_LOAD_TEMP_SAL,
"Load temperature/salinity",
"Load temperature and salinity from reference cast [SVP and XBT only]",
wx.ITEM_NORMAL)
self.ProcessMenu.AppendItem(self.ProcessLoadTempSal)
self.ProcessLoadSurfSpeed = wx.MenuItem(self.ProcessMenu, MENU_PROC_LOAD_SURFSP, "Get surface sound speed",
"Get the surface sound speed value from SIS", wx.ITEM_NORMAL)
self.ProcessMenu.AppendItem(self.ProcessLoadSurfSpeed)
self.ProcessMenu.AppendSeparator()
self.ProcessExtend = wx.MenuItem(self.ProcessMenu, MENU_PROC_EXTEND_CAST, "Extend cast",
"Extend the cast using the reference cast", wx.ITEM_NORMAL)
self.ProcessMenu.AppendItem(self.ProcessExtend)
self.ProcessInspection = wx.Menu()
self.PlotZoom = wx.MenuItem(self.ProcessInspection, MENU_PROC_INS_ZOOM, "Zoom",
"Zoom on plot by mouse selection", wx.ITEM_RADIO)
self.ProcessInspection.AppendItem(self.PlotZoom)
self.PlotFlag = wx.MenuItem(self.ProcessInspection, MENU_PROC_INS_FLAG, "Flag",
"Flag samples on plot by mouse selection", wx.ITEM_RADIO)
self.ProcessInspection.AppendItem(self.PlotFlag)
self.PlotUnflag = wx.MenuItem(self.ProcessInspection, MENU_PROC_INS_UNFLAG, "Unflag",
"Unflag samples on plot by mouse selection", wx.ITEM_RADIO)
self.ProcessInspection.AppendItem(self.PlotUnflag)
self.PlotInsert = wx.MenuItem(self.ProcessInspection, MENU_PROC_INS_INSERT, "Insert",
"Insert a sample by mouse clicking", wx.ITEM_RADIO)
self.ProcessInspection.AppendItem(self.PlotInsert)
self.ProcessMenu.AppendMenu(MENU_PROC_INSPECTION, "Visual inspection", self.ProcessInspection,
"Visual inspection of the resulting profile")
self.ProcessMenu.AppendSeparator()
self.ProcessPreviewThinning = wx.MenuItem(self.ProcessMenu, MENU_PROC_PREVIEW_THINNING, "Preview thinning",
"Preview the thinning required by some client types", wx.ITEM_NORMAL)
self.ProcessMenu.AppendItem(self.ProcessPreviewThinning)
self.ProcessSendProfile = wx.MenuItem(self.ProcessMenu, MENU_PROC_SEND_PROFILE, "Send SSP",
"Send the current SSP to the clients", wx.ITEM_NORMAL)
self.ProcessMenu.AppendItem(self.ProcessSendProfile)
self.ProcessMenu.AppendSeparator()
self.ProcessStoreDb = wx.MenuItem(self.ProcessMenu, MENU_PROC_STORE_SSP, "Store SSP",
"Locally store the current SSP data", wx.ITEM_NORMAL)
self.ProcessMenu.AppendItem(self.ProcessStoreDb)
self.ProcessRedoSsp = wx.MenuItem(self.ProcessMenu, MENU_PROC_REDO_SSP, "Redo processing",
"Redo the processing by reloading the stored raw data", wx.ITEM_NORMAL)
self.ProcessMenu.AppendItem(self.ProcessRedoSsp)
self.ProcessLogMetadata = wx.MenuItem(self.ProcessMenu, MENU_PROC_LOG_METADATA, "Log processing metadata",
"Store the processing metadata in the log DB", wx.ITEM_CHECK)
self.ProcessMenu.AppendItem(self.ProcessLogMetadata)
# self.ProcessMenu.AppendSeparator()
# self.ProcessExpressMode = wx.MenuItem(self.ProcessMenu, MENU_PROC_EXPRESS, "Express mode",
# "Activate the express mode (be careful!)", wx.ITEM_NORMAL)
# self.ProcessMenu.AppendItem(self.ProcessExpressMode)
self.SVPEditorFrame_menubar.Append(self.ProcessMenu, "Process")
# ### DATABASE ###
self.DbMenu = wx.Menu()
# Query
DbQuery = wx.Menu()
self.DbQueryInternalDb = wx.MenuItem(DbQuery, MENU_DB_QUERY_INTERNAL_DB, "Internal DB",
"Retrieve the locally stored SSP", wx.ITEM_NORMAL)
DbQuery.AppendItem(self.DbQueryInternalDb)
self.DbQueryExternalDb = wx.MenuItem(DbQuery, MENU_DB_QUERY_EXTERNAL_DB, "External DB",
"Retrieve a SSP stored in the select DB", wx.ITEM_NORMAL)
DbQuery.AppendItem(self.DbQueryExternalDb)
self.DbMenu.AppendMenu(MENU_DB_QUERY, "Query from", DbQuery,
"Retrieve SSP info from databases")
# Db/Delete
DbDelete = wx.Menu()
self.DbDeleteInternalDb = wx.MenuItem(DbDelete, MENU_DB_DELETE_INTERNAL_DB, "Internal DB",
"Delete a locally stored SSP", wx.ITEM_NORMAL)
DbDelete.AppendItem(self.DbDeleteInternalDb)
self.DbDeleteExternalDb = wx.MenuItem(DbDelete, MENU_DB_DELETE_EXTERNAL_DB, "External DB",
"Delete a SSP stored in the select DB", wx.ITEM_NORMAL)
DbDelete.AppendItem(self.DbDeleteExternalDb)
self.DbMenu.AppendMenu(MENU_DB_DELETE, "Delete SSP", DbDelete, "")
# Db/Export
DbExport = wx.Menu()
self.DbExportShp = wx.MenuItem(DbExport, MENU_DB_EXPORT_SHP, "Shapefile",
"Export all the stored SSPs as a Shapefile", wx.ITEM_NORMAL)
DbExport.AppendItem(self.DbExportShp)
self.DbExportKml = wx.MenuItem(DbExport, MENU_DB_EXPORT_KML, "KML",
"Export all the stored SSPs as a KML file", wx.ITEM_NORMAL)
DbExport.AppendItem(self.DbExportKml)
self.DbExportCsv = wx.MenuItem(DbExport, MENU_DB_EXPORT_CSV, "CSV",
"Export all the stored SSPs as a Comma-Separated file", wx.ITEM_NORMAL)
DbExport.AppendItem(self.DbExportCsv)
self.DbMenu.AppendMenu(MENU_DB_EXPORT, "Export", DbExport, "")
# Db/Plot
DbPlot = wx.Menu()
self.DbPlotMapSsp = wx.MenuItem(DbPlot, MENU_DB_PLOT_MAP_SSP, "Map all SSPs",
"Create a map with all the stored SSPs", wx.ITEM_NORMAL)
DbPlot.AppendItem(self.DbPlotMapSsp)
self.DbPlotDailySsp = wx.MenuItem(DbPlot, MENU_DB_PLOT_DAILY_SSP, "Create daily plot",
"Create a SSP plot for each day", wx.ITEM_NORMAL)
DbPlot.AppendItem(self.DbPlotDailySsp)
self.DbSaveDailySsp = wx.MenuItem(DbPlot, MENU_DB_SAVE_DAILY_SSP, "Save daily plot",
"Save a SSP plot for each day", wx.ITEM_NORMAL)
DbPlot.AppendItem(self.DbSaveDailySsp)
self.DbMenu.AppendMenu(MENU_DB_PLOT, "Plot", DbPlot, "")
self.SVPEditorFrame_menubar.Append(self.DbMenu, "Database")
# ### Tools ###
self.ToolsMenu = wx.Menu()
ServerMenu = wx.Menu()
self.ToolsServerStart = wx.MenuItem(ServerMenu, MENU_SERVER_START, "Start server",
"Start SIS server mode", wx.ITEM_NORMAL)
ServerMenu.AppendItem(self.ToolsServerStart)
self.ToolsServerSend = wx.MenuItem(ServerMenu, MENU_SERVER_SEND, "Force send",
"Force to send a SSP", wx.ITEM_NORMAL)
ServerMenu.AppendItem(self.ToolsServerSend)
self.ToolsServerStop = wx.MenuItem(ServerMenu, MENU_SERVER_STOP, "Stop server",
"Stop SIS server mode", wx.ITEM_NORMAL)
ServerMenu.AppendItem(self.ToolsServerStop)
ServerMenu.AppendSeparator()
self.ServerLogMetadata = wx.MenuItem(ServerMenu, MENU_SERVER_LOG_METADATA, "Log server metadata",
"Store the server metadata in the log DB", wx.ITEM_CHECK)
ServerMenu.AppendItem(self.ServerLogMetadata)
self.ToolsMenu.AppendMenu(MENU_TOOLS_SERVER, "Server", ServerMenu, "")
self.ToolsMenu.AppendSeparator()
self.ToolsRefMon = wx.MenuItem(self.ToolsMenu, MENU_TOOLS_REF_MON, "Refraction Monitor",
"Open the refraction monitor", wx.ITEM_NORMAL)
self.ToolsMenu.AppendItem(self.ToolsRefMon)
self.ToolsGeoMap = wx.MenuItem(self.ToolsMenu, MENU_TOOLS_GEO_MONITOR, "Geo Monitor",
"Open the Geo Monitor", wx.ITEM_NORMAL)
self.ToolsMenu.AppendItem(self.ToolsGeoMap)
self.ToolsMenu.AppendSeparator()
ReferenceMenu = wx.Menu()
self.ToolsSetReferenceCast = wx.MenuItem(self.ToolsMenu, MENU_TOOLS_SET_REFERENCE_CAST,
"Set as reference cast",
"Set the current SSP as reference cast", wx.ITEM_NORMAL)
ReferenceMenu.AppendItem(self.ToolsSetReferenceCast)
self.ToolsEditReferenceCast = wx.MenuItem(self.ToolsMenu, MENU_TOOLS_EDIT_REFERENCE_CAST,
"Edit the reference cast",
"Edit the current reference cast", wx.ITEM_NORMAL)
ReferenceMenu.AppendItem(self.ToolsEditReferenceCast)
self.ToolsClearReferenceCast = wx.MenuItem(self.ToolsMenu, MENU_TOOLS_CLEAR_REFERENCE_CAST,
"Clear the reference cast",
"Clear the current reference cast", wx.ITEM_NORMAL)
ReferenceMenu.AppendItem(self.ToolsClearReferenceCast)
self.ToolsMenu.AppendMenu(MENU_TOOLS_REFERENCE, "Reference cast", ReferenceMenu,
"Actions about a reference cast")
self.ToolsMenu.AppendSeparator()
self.ToolsUserInputs = wx.MenuItem(self.ToolsMenu, MENU_TOOLS_USER_INPUTS, "Monitor user inputs",
"Provide information about user inputs", wx.ITEM_NORMAL)
self.ToolsMenu.AppendItem(self.ToolsUserInputs)
self.ToolsModifySettings = wx.MenuItem(self.ToolsMenu, MENU_TOOLS_MODIFY_SETTINGS, "Modify SSP settings",
"Open tool to modify SSP settings", wx.ITEM_NORMAL)
self.ToolsMenu.AppendItem(self.ToolsModifySettings)
self.ToolsViewSettings = wx.MenuItem(self.ToolsMenu, MENU_TOOLS_VIEW_SETTINGS, "View SSP settings",
"Show SSP settings information", wx.ITEM_NORMAL)
self.ToolsMenu.AppendItem(self.ToolsViewSettings)
self.ToolsReloadSettings = wx.MenuItem(self.ToolsMenu, MENU_TOOLS_RELOAD_SETTINGS, "Reload SSP settings",
"Reload SSP settings information", wx.ITEM_NORMAL)
self.ToolsMenu.AppendItem(self.ToolsReloadSettings)
self.SVPEditorFrame_menubar.Append(self.ToolsMenu, "Tools")
self.HelpMenu = wx.Menu()
self.HelpManual = wx.MenuItem(self.HelpMenu, MENU_HELP_MANUAL, "Manual",
"Open the manual", wx.ITEM_NORMAL)
self.HelpMenu.AppendItem(self.HelpManual)
self.HelpMenu.AppendSeparator()
self.HelpAbout = wx.MenuItem(self.HelpMenu, MENU_HELP_ABOUT, "About",
"Info about the application", wx.ITEM_NORMAL)
self.HelpMenu.AppendItem(self.HelpAbout)
self.SVPEditorFrame_menubar.Append(self.HelpMenu, "Help")
self.SetMenuBar(self.SVPEditorFrame_menubar)
self.frame_statusbar = self.CreateStatusBar(2)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_MENU, self.on_file_import_castaway, self.FileImpCastaway)
self.Bind(wx.EVT_MENU, self.on_file_import_digibar_pro, self.FileImpDigibarPro)
self.Bind(wx.EVT_MENU, self.on_file_import_digibar_s, self.FileImpDigibarS)
self.Bind(wx.EVT_MENU, self.on_file_import_sippican, self.FileImpSippican)
self.Bind(wx.EVT_MENU, self.on_file_import_seabird, self.FileImpSeabird)
self.Bind(wx.EVT_MENU, self.on_file_import_turo, self.FileImpTuro)
self.Bind(wx.EVT_MENU, self.on_file_import_unb, self.FileImpUNB)
self.Bind(wx.EVT_MENU, self.on_file_import_valeport_midas, self.FileImpValeMidas)
self.Bind(wx.EVT_MENU, self.on_file_import_valeport_monitor, self.FileImpValeMonitor)
self.Bind(wx.EVT_MENU, self.on_file_import_valeport_minisvp, self.FileImpValeMiniS)
self.Bind(wx.EVT_MENU, self.on_file_import_idronaut, self.FileImpIdronaut)
self.Bind(wx.EVT_MENU, self.on_file_import_saiv, self.FileImpSaiv)
self.Bind(wx.EVT_MENU, self.on_file_query_woa09, self.FileQueryWoa)
self.Bind(wx.EVT_MENU, self.on_file_query_rtofs, self.FileQueryRtofs)
self.Bind(wx.EVT_MENU, self.on_file_query_sis, self.FileQuerySis)
self.Bind(wx.EVT_MENU, self.on_file_export_cast, self.FileExpCast)
self.Bind(wx.EVT_MENU, self.on_file_export_asvp, self.FileExpAsvp)
self.Bind(wx.EVT_MENU, self.on_file_export_pro, self.FileExpPro)
self.Bind(wx.EVT_MENU, self.on_file_export_vel, self.FileExpVel)
self.Bind(wx.EVT_MENU, self.on_file_export_ixblue, self.FileExpIxblue)
self.Bind(wx.EVT_MENU, self.on_file_export_hips, self.FileExpHips)
self.Bind(wx.EVT_MENU, self.on_file_export_unb, self.FileExpUnb)
self.Bind(wx.EVT_MENU, self.on_file_export_elac, self.FileExpElac)
self.Bind(wx.EVT_MENU, self.on_file_export_csv, self.FileExpCsv)
self.Bind(wx.EVT_MENU, self.on_file_clear, self.FileClear)
self.Bind(wx.EVT_MENU, self.on_file_exit, self.FileExit)
self.Bind(wx.EVT_MENU, self.on_plot_zoom, self.PlotZoom)
self.Bind(wx.EVT_MENU, self.on_plot_flag, self.PlotFlag)
self.Bind(wx.EVT_MENU, self.on_plot_unflag, self.PlotUnflag)
self.Bind(wx.EVT_MENU, self.on_plot_insert, self.PlotInsert)
self.Bind(wx.EVT_MENU, self.on_reset_view, self.ResetView)
self.Bind(wx.EVT_MENU, self.on_view_hide_woa, self.ViewHideWOA)
self.Bind(wx.EVT_MENU, self.on_view_hide_flagged, self.HideFlagged)
self.Bind(wx.EVT_MENU, self.on_view_hide_depth, self.HideDepth)
self.Bind(wx.EVT_MENU, self.on_process_load_salinity, self.ProcessLoadSal)
self.Bind(wx.EVT_MENU, self.on_process_load_temp_and_sal, self.ProcessLoadTempSal)
self.Bind(wx.EVT_MENU, self.on_process_load_surface_ssp, self.ProcessLoadSurfSpeed)
self.Bind(wx.EVT_MENU, self.on_process_extend, self.ProcessExtend)
self.Bind(wx.EVT_MENU, self.on_process_preview_thinning, self.ProcessPreviewThinning)
self.Bind(wx.EVT_MENU, self.on_process_send_profile, self.ProcessSendProfile)
self.Bind(wx.EVT_MENU, self.on_process_store_db, self.ProcessStoreDb)
self.Bind(wx.EVT_MENU, self.on_process_redo_processing, self.ProcessRedoSsp)
self.Bind(wx.EVT_MENU, self.on_process_log_metadata, self.ProcessLogMetadata)
# self.Bind(wx.EVT_MENU, self.on_process_express_mode, self.ProcessExpressMode)
self.Bind(wx.EVT_MENU, self.on_db_query_internal_db, self.DbQueryInternalDb)
self.Bind(wx.EVT_MENU, self.on_db_query_external_db, self.DbQueryExternalDb)
self.Bind(wx.EVT_MENU, self.on_db_delete_internal, self.DbDeleteInternalDb)
self.Bind(wx.EVT_MENU, self.on_db_delete_external, self.DbDeleteExternalDb)
self.Bind(wx.EVT_MENU, self.on_db_export_shp, self.DbExportShp)
self.Bind(wx.EVT_MENU, self.on_db_export_kml, self.DbExportKml)
self.Bind(wx.EVT_MENU, self.on_db_export_csv, self.DbExportCsv)
self.Bind(wx.EVT_MENU, self.on_db_plot_map_ssp, self.DbPlotMapSsp)
self.Bind(wx.EVT_MENU, self.on_db_plot_daily_ssp, self.DbPlotDailySsp)
self.Bind(wx.EVT_MENU, self.on_db_save_daily_ssp, self.DbSaveDailySsp)
self.Bind(wx.EVT_MENU, self.on_tools_refraction_monitor, self.ToolsRefMon)
self.Bind(wx.EVT_MENU, self.on_tools_geo_monitor, self.ToolsGeoMap)
self.Bind(wx.EVT_MENU, self.on_tools_server_start, self.ToolsServerStart)
self.Bind(wx.EVT_MENU, self.on_tools_server_send, self.ToolsServerSend)
self.Bind(wx.EVT_MENU, self.on_tools_server_stop, self.ToolsServerStop)
self.Bind(wx.EVT_MENU, self.on_tools_server_log_metadata, self.ServerLogMetadata)
self.Bind(wx.EVT_MENU, self.on_tools_set_reference_cast, self.ToolsSetReferenceCast)
self.Bind(wx.EVT_MENU, self.on_tools_edit_reference_cast, self.ToolsEditReferenceCast)
self.Bind(wx.EVT_MENU, self.on_tools_clear_reference_cast, self.ToolsClearReferenceCast)
self.Bind(wx.EVT_MENU, self.on_tools_user_inputs, self.ToolsUserInputs)
self.Bind(wx.EVT_MENU, self.on_tools_modify_settings, self.ToolsModifySettings)
self.Bind(wx.EVT_MENU, self.on_tools_view_settings, self.ToolsViewSettings)
self.Bind(wx.EVT_MENU, self.on_tools_reload_settings, self.ToolsReloadSettings)
self.Bind(wx.EVT_MENU, self.on_help_manual, self.HelpManual)
self.Bind(wx.EVT_MENU, self.on_help_about, self.HelpAbout)
def __set_properties(self):
self.SetTitle("SSP Manager")
# self.SetSize((1100, 700))
self.frame_statusbar.SetStatusWidths([-1, 400])
SSPManFrame_statusbar_fields = ["", ""]
for i in range(len(SSPManFrame_statusbar_fields)):
self.frame_statusbar.SetStatusText(SSPManFrame_statusbar_fields[i], i)
def __do_layout(self):
sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer_1)
self.Layout()
def on_file_query_woa09(self, event):
log.info("Event handler 'on_file_query_woa09' not implemented!")
event.Skip()
def on_file_query_rtofs(self, event):
log.info("Event handler 'on_file_query_rtofs' not implemented!")
event.Skip()
def on_file_query_sis(self, event):
log.info("Event handler 'on_file_query_sis' not implemented!")
event.Skip()
def on_process_store_db(self, event):
log.info("Event handler 'on_process_store_db' not implemented!")
event.Skip()
def on_process_log_metadata(self, event):
log.info("Event handler 'on_process_log_metadata' not implemented!")
event.Skip()
def on_file_import_castaway(self, event):
log.info("Event handler 'on_file_import_castaway' not implemented!")
event.Skip()
def on_file_import_digibar_pro(self, event):
log.info("Event handler 'on_file_import_digibar_pro' not implemented!")
event.Skip()
def on_file_import_digibar_s(self, event):
log.info("Event handler 'on_file_import_digibar_s' not implemented!")
event.Skip()
def on_file_import_sippican(self, event):
log.info("Event handler 'on_file_import_sippican' not implemented!")
event.Skip()
def on_file_import_seabird(self, event):
log.info("Event handler 'on_file_import_seabird' not implemented!")
event.Skip()
def on_file_import_turo(self, event):
log.info("Event handler 'on_file_import_turo' not implemented!")
event.Skip()
def on_file_import_unb(self, event):
log.info("Event handler 'on_file_import_unb' not implemented!")
event.Skip()
def on_file_import_valeport_midas(self, event):
log.info("Event handler 'on_file_import_valeport_midas' not implemented!")
event.Skip()
def on_file_import_valeport_monitor(self, event):
log.info("Event handler 'on_file_import_valeport_monitor' not implemented!")
event.Skip()
def on_file_import_valeport_minisvp(self, event):
log.info("Event handler 'on_file_import_valeport_minisvp' not implemented!")
event.Skip()
def on_file_import_idronaut(self, event):
log.info("Event handler 'on_file_import_idronaut' not implemented!")
event.Skip()
def on_file_import_saiv(self, event):
log.info("Event handler 'on_file_import_saiv' not implemented!")
event.Skip()
def on_file_export_cast(self, event):
log.info("Event handler 'on_file_export_cast' not implemented!")
event.Skip()
def on_file_export_asvp(self, event):
log.info("Event handler 'on_file_export_asvp' not implemented!")
event.Skip()
def on_file_export_pro(self, event):
log.info("Event handler 'on_file_export_pro' not implemented!")
event.Skip()
def on_file_export_vel(self, event):
log.info("Event handler 'on_file_export_vel' not implemented!")
event.Skip()
def on_file_export_ixblue(self, event):
log.info("Event handler 'on_file_export_ixblue' not implemented!")
event.Skip()
def on_file_export_hips(self, event):
log.info("Event handler 'on_file_export_hips' not implemented!")
event.Skip()
def on_file_export_unb(self, event):
log.info("Event handler 'on_file_export_unb' not implemented!")
event.Skip()
def on_file_export_elac(self, event):
log.info("Event handler 'on_file_export_elac' not implemented!")
event.Skip()
def on_file_export_csv(self, event):
log.info("Event handler 'on_file_export_csv' not implemented!")
event.Skip()
def on_file_clear(self, event):
log.info("Event handler 'on_file_clear' not implemented!")
event.Skip()
def on_file_exit(self, event):
log.info("Event handler 'on_file_exit' not implemented!")
event.Skip()
def on_plot_zoom(self, event):
log.info("Event handler 'on_plot_zoom' not implemented!")
event.Skip()
def on_plot_flag(self, event):
log.info("Event handler 'on_plot_flag' not implemented!")
event.Skip()
def on_plot_unflag(self, event):
log.info("Event handler 'on_plot_unflag' not implemented!")
event.Skip()
def on_plot_insert(self, event):
log.info("Event handler 'on_plot_insert' not implemented!")
event.Skip()
def on_reset_view(self, event):
log.info("Event handler 'on_reset_view' not implemented!")
event.Skip()
def on_view_hide_woa(self, event):
log.info("Event handler 'on_view_hide_woa' not implemented!")
event.Skip()
def on_view_hide_flagged(self, event):
log.info("Event handler 'on_view_hide_flagged' not implemented!")
event.Skip()
def on_view_hide_depth(self, event):
log.info("Event handler 'on_view_hide_depth' not implemented!")
event.Skip()
def on_tools_refraction_monitor(self, event):
log.info("Event handler 'on_tools_refraction_monitor' not implemented!")
event.Skip()
def on_tools_geo_monitor(self, event):
log.info("Event handler 'on_tools_geo_monitor' not implemented!")
event.Skip()
# def on_process_express_mode(self, event):
# log.info("Event handler `OnToolsExpress' not implemented!")
# event.Skip()
def on_process_load_salinity(self, event):
log.info("Event handler 'on_process_load_salinity' not implemented!")
event.Skip()
def on_process_load_temp_and_sal(self, event):
log.info("Event handler 'on_process_load_temp_and_sal' not implemented!")
event.Skip()
def on_process_load_surface_ssp(self, event):
log.info("Event handler 'on_process_load_surface_ssp' not implemented!")
event.Skip()
def on_process_extend(self, event):
log.info("Event handler 'on_process_extend' not implemented!")
event.Skip()
def on_process_preview_thinning(self, event):
log.info("Event handler 'on_process_preview_thinning' not implemented!")
event.Skip()
def on_process_send_profile(self, event):
log.info("Event handler 'on_process_send_profile' not implemented!")
event.Skip()
def on_process_redo_processing(self, event):
log.info("Event handler 'on_process_redo_processing' not implemented!")
event.Skip()
def on_db_query_internal_db(self, event):
log.info("Event handler 'on_db_query_internal' not implemented!")
event.Skip()
def on_db_query_external_db(self, event):
log.info("Event handler 'on_db_query_external' not implemented!")
event.Skip()
def on_db_delete_internal(self, event):
log.info("Event handler 'on_db_delete_internal' not implemented!")
event.Skip()
def on_db_delete_external(self, event):
log.info("Event handler 'on_db_delete_external' not implemented!")
event.Skip()
def on_db_export_shp(self, event):
log.info("Event handler 'on_db_export_shp' not implemented!")
event.Skip()
def on_db_export_kml(self, event):
log.info("Event handler 'on_db_export_kml' not implemented!")
event.Skip()
def on_db_export_csv(self, event):
log.info("Event handler 'on_db_export_csv' not implemented!")
event.Skip()
def on_db_plot_map_ssp(self, event):
log.info("Event handler 'on_db_plot_map_ssp' not implemented!")
event.Skip()
def on_db_plot_daily_ssp(self, event):
log.info("Event handler 'on_db_plot_daily_ssp' not implemented!")
event.Skip()
def on_db_save_daily_ssp(self, event):
log.info("Event handler 'on_db_save_daily_ssp' not implemented!")
event.Skip()
def on_tools_user_inputs(self, event):
log.info("Event handler 'on_tools_user_inputs' not implemented!")
event.Skip()
def on_tools_set_reference_cast(self, event):
log.info("Event handler 'on_tools_set_reference_cast' not implemented!")
event.Skip()
def on_tools_edit_reference_cast(self, event):
log.info("Event handler 'on_tools_edit_reference_cast' not implemented!")
event.Skip()
def on_tools_clear_reference_cast(self, event):
log.info("Event handler 'on_tools_clear_reference_cast' not implemented!")
event.Skip()
def on_tools_server_start(self, event):
log.info("Event handler 'on_tools_server_start' not implemented!")
event.Skip()
def on_tools_server_send(self, event):
log.info("Event handler 'on_tools_server_send' not implemented!")
event.Skip()
def on_tools_server_stop(self, event):
log.info("Event handler 'on_tools_server_stop' not implemented!")
event.Skip()
def on_tools_server_log_metadata(self, event):
log.info("Event handler 'on_tools_server_log_metadata' not implemented!")
event.Skip()
def on_tools_modify_settings(self, event):
log.info("Event handler 'on_tools_modify_settings' not implemented!")
event.Skip()
def on_tools_view_settings(self, event):
log.info("Event handler 'on_tools_view_settings' not implemented!")
event.Skip()
def on_tools_reload_settings(self, event):
log.info("Event handler 'on_tools_reload_settings' not implemented!")
event.Skip()
def on_help_manual(self, event):
log.info("Event handler 'on_help_manual' not implemented!")
event.Skip()
def on_help_about(self, event):
log.info("Event handler 'on_help_about' not implemented!")
event.Skip()
| 1.609375
| 2
|