max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/demo.py | JKtubage/Neural-Network-deep-learning | 1 | 12767151 | import mnist_loader
import network
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
# print("training data")
# print(type(training_data))
# print(len(training_data))
# print(training_data[0][0].shape)
# print(training_data[0][1].shape)
net = network.Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data) | 2.703125 | 3 |
sdk/python/pulumi_nomad/acl_token.py | pulumi/pulumi-nomad | 3 | 12767152 | <reponame>pulumi/pulumi-nomad
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AclTokenArgs', 'AclToken']
@pulumi.input_type
class AclTokenArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
global_: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a AclToken resource.
:param pulumi.Input[str] type: `(string: <required>)` - The type of token this is. Use `client`
for tokens that will have policies associated with them. Use `management`
for tokens that can perform any action.
:param pulumi.Input[bool] global_: `(bool: false)` - Whether the token should be replicated to all
regions, or if it will only be used in the region it was created in.
:param pulumi.Input[str] name: `(string: "")` - A human-friendly name for this token.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policies: `(set: [])` - A set of policy names to associate with this
token. Must be set on `client`-type tokens, must not be set on
`management`-type tokens. Policies do not need to exist before being
used here.
"""
pulumi.set(__self__, "type", type)
if global_ is not None:
pulumi.set(__self__, "global_", global_)
if name is not None:
pulumi.set(__self__, "name", name)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
`(string: <required>)` - The type of token this is. Use `client`
for tokens that will have policies associated with them. Use `management`
for tokens that can perform any action.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="global")
def global_(self) -> Optional[pulumi.Input[bool]]:
"""
`(bool: false)` - Whether the token should be replicated to all
regions, or if it will only be used in the region it was created in.
"""
return pulumi.get(self, "global_")
@global_.setter
def global_(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "global_", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
`(string: "")` - A human-friendly name for this token.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
`(set: [])` - A set of policy names to associate with this
token. Must be set on `client`-type tokens, must not be set on
`management`-type tokens. Policies do not need to exist before being
used here.
"""
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@pulumi.input_type
class _AclTokenState:
def __init__(__self__, *,
accessor_id: Optional[pulumi.Input[str]] = None,
create_time: Optional[pulumi.Input[str]] = None,
global_: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
secret_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AclToken resources.
:param pulumi.Input[str] accessor_id: `(string)` - A non-sensitive identifier for this token that
can be logged and shared safely without granting any access to the cluster.
:param pulumi.Input[str] create_time: `(string)` - The timestamp the token was created.
:param pulumi.Input[bool] global_: `(bool: false)` - Whether the token should be replicated to all
regions, or if it will only be used in the region it was created in.
:param pulumi.Input[str] name: `(string: "")` - A human-friendly name for this token.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policies: `(set: [])` - A set of policy names to associate with this
token. Must be set on `client`-type tokens, must not be set on
`management`-type tokens. Policies do not need to exist before being
used here.
:param pulumi.Input[str] secret_id: `(string)` - The token value itself, which is presented for
access to the cluster.
:param pulumi.Input[str] type: `(string: <required>)` - The type of token this is. Use `client`
for tokens that will have policies associated with them. Use `management`
for tokens that can perform any action.
"""
if accessor_id is not None:
pulumi.set(__self__, "accessor_id", accessor_id)
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if global_ is not None:
pulumi.set(__self__, "global_", global_)
if name is not None:
pulumi.set(__self__, "name", name)
if policies is not None:
pulumi.set(__self__, "policies", policies)
if secret_id is not None:
pulumi.set(__self__, "secret_id", secret_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="accessorId")
def accessor_id(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - A non-sensitive identifier for this token that
can be logged and shared safely without granting any access to the cluster.
"""
return pulumi.get(self, "accessor_id")
@accessor_id.setter
def accessor_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accessor_id", value)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - The timestamp the token was created.
"""
return pulumi.get(self, "create_time")
@create_time.setter
def create_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_time", value)
@property
@pulumi.getter(name="global")
def global_(self) -> Optional[pulumi.Input[bool]]:
"""
`(bool: false)` - Whether the token should be replicated to all
regions, or if it will only be used in the region it was created in.
"""
return pulumi.get(self, "global_")
@global_.setter
def global_(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "global_", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
`(string: "")` - A human-friendly name for this token.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
`(set: [])` - A set of policy names to associate with this
token. Must be set on `client`-type tokens, must not be set on
`management`-type tokens. Policies do not need to exist before being
used here.
"""
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - The token value itself, which is presented for
access to the cluster.
"""
return pulumi.get(self, "secret_id")
@secret_id.setter
def secret_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` - The type of token this is. Use `client`
for tokens that will have policies associated with them. Use `management`
for tokens that can perform any action.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class AclToken(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
global_: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a AclToken resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] global_: `(bool: false)` - Whether the token should be replicated to all
regions, or if it will only be used in the region it was created in.
:param pulumi.Input[str] name: `(string: "")` - A human-friendly name for this token.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policies: `(set: [])` - A set of policy names to associate with this
token. Must be set on `client`-type tokens, must not be set on
`management`-type tokens. Policies do not need to exist before being
used here.
:param pulumi.Input[str] type: `(string: <required>)` - The type of token this is. Use `client`
for tokens that will have policies associated with them. Use `management`
for tokens that can perform any action.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AclTokenArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a AclToken resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param AclTokenArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AclTokenArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
global_: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AclTokenArgs.__new__(AclTokenArgs)
__props__.__dict__["global_"] = global_
__props__.__dict__["name"] = name
__props__.__dict__["policies"] = policies
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
__props__.__dict__["accessor_id"] = None
__props__.__dict__["create_time"] = None
__props__.__dict__["secret_id"] = None
super(AclToken, __self__).__init__(
'nomad:index/aclToken:AclToken',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
accessor_id: Optional[pulumi.Input[str]] = None,
create_time: Optional[pulumi.Input[str]] = None,
global_: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
secret_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None) -> 'AclToken':
"""
Get an existing AclToken resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accessor_id: `(string)` - A non-sensitive identifier for this token that
can be logged and shared safely without granting any access to the cluster.
:param pulumi.Input[str] create_time: `(string)` - The timestamp the token was created.
:param pulumi.Input[bool] global_: `(bool: false)` - Whether the token should be replicated to all
regions, or if it will only be used in the region it was created in.
:param pulumi.Input[str] name: `(string: "")` - A human-friendly name for this token.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policies: `(set: [])` - A set of policy names to associate with this
token. Must be set on `client`-type tokens, must not be set on
`management`-type tokens. Policies do not need to exist before being
used here.
:param pulumi.Input[str] secret_id: `(string)` - The token value itself, which is presented for
access to the cluster.
:param pulumi.Input[str] type: `(string: <required>)` - The type of token this is. Use `client`
for tokens that will have policies associated with them. Use `management`
for tokens that can perform any action.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AclTokenState.__new__(_AclTokenState)
__props__.__dict__["accessor_id"] = accessor_id
__props__.__dict__["create_time"] = create_time
__props__.__dict__["global_"] = global_
__props__.__dict__["name"] = name
__props__.__dict__["policies"] = policies
__props__.__dict__["secret_id"] = secret_id
__props__.__dict__["type"] = type
return AclToken(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessorId")
def accessor_id(self) -> pulumi.Output[str]:
"""
`(string)` - A non-sensitive identifier for this token that
can be logged and shared safely without granting any access to the cluster.
"""
return pulumi.get(self, "accessor_id")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
`(string)` - The timestamp the token was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="global")
def global_(self) -> pulumi.Output[Optional[bool]]:
"""
`(bool: false)` - Whether the token should be replicated to all
regions, or if it will only be used in the region it was created in.
"""
return pulumi.get(self, "global_")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
`(string: "")` - A human-friendly name for this token.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def policies(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
`(set: [])` - A set of policy names to associate with this
token. Must be set on `client`-type tokens, must not be set on
`management`-type tokens. Policies do not need to exist before being
used here.
"""
return pulumi.get(self, "policies")
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> pulumi.Output[str]:
"""
`(string)` - The token value itself, which is presented for
access to the cluster.
"""
return pulumi.get(self, "secret_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
`(string: <required>)` - The type of token this is. Use `client`
for tokens that will have policies associated with them. Use `management`
for tokens that can perform any action.
"""
return pulumi.get(self, "type")
| 1.601563 | 2 |
cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.py | MarioCarrilloA/x.stx-update | 0 | 12767153 | <gh_stars>0
#!/usr/bin/env python
#
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import sys
import web
import patch_id_allocator as pida
port = 8888
urls = (
'/get_patch_id', 'get_patch_id',
)
class get_patch_id:
def GET(self):
data = web.input(sw_version=None, prefix="CGCS")
output = pida.get_patch_id(data.sw_version, data.prefix)
return output
def POST(self):
data = web.input(sw_version=None, prefix="CGCS")
output = pida.get_patch_id(data.sw_version, data.prefix)
return output
class MyApplication(web.application):
def run(self, port=8080, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
def main():
app = MyApplication(urls, globals())
app.run(port=port)
if __name__ == "__main__":
main()
| 2.296875 | 2 |
metagraph-plugin/metagraph_katana/plugins/__init__.py | chakpongchung/katana | 64 | 12767154 | ############################
# Libraries used as plugins
############################
# try:
# import katana as _
# has_katana = True
# except ImportError:
# has_katana = False
import metagraph
# Use this as the entry_point object
registry = metagraph.PluginRegistry("metagraph_katana")
def find_plugins():
# Ensure we import all items we want registered
from . import metagraph_katana
registry.register_from_modules(metagraph_katana)
return registry.plugins
| 1.71875 | 2 |
src/06_test/test_06.py | Erfun76/Erfun76 | 0 | 12767155 | import sys
sys.path.insert(0, '../')
import warnings
warnings.simplefilter('ignore')
from get_config import get_config
from utils import fix_seed, rle2mask, mask2rle
from models import build_model
from utils_inference import get_pred_mask, get_rle
from get_fold_idxs_list import get_fold_idxs_list
import numpy as np
import pandas as pd
import os
from os.path import join as opj
import gc
import cv2
import rasterio
from rasterio.windows import Window
import torch
if __name__ == '__main__':
# config
fix_seed(2021)
config = get_config()
INPUT_PATH = config['INPUT_PATH']
OUTPUT_PATH = config['OUTPUT_PATH']
os.makedirs(OUTPUT_PATH, exist_ok=True)
device = config['device']
print(device)
# import data
train_df = pd.read_csv(opj(INPUT_PATH, 'train.csv'))
info_df = pd.read_csv(opj(INPUT_PATH, 'HuBMAP-20-dataset_information.csv'))
sub_df = pd.read_csv(opj(INPUT_PATH, 'sample_submission.csv'))
print('train_df.shape = ', train_df.shape)
print('info_df.shape = ', info_df.shape)
print('sub_df.shape = ', sub_df.shape)
# inference
LOAD_LOCAL_WEIGHT_PATH_LIST = {}
for seed in config['split_seed_list']:
LOAD_LOCAL_WEIGHT_PATH_LIST[seed] = []
for fold in config['FOLD_LIST']:
LOAD_LOCAL_WEIGHT_PATH_LIST[seed].append(
opj(config['model_path'], f'model_seed{seed}_fold{fold}_bestscore.pth'))
model_list = {}
for seed in config['split_seed_list']:
model_list[seed] = []
for path in LOAD_LOCAL_WEIGHT_PATH_LIST[seed]:
print("Loading weights from %s" % path)
model = build_model(model_name=config['model_name'],
resolution=(None, None),
deepsupervision=config['deepsupervision'],
clfhead=config['clfhead'],
clf_threshold=config['clf_threshold'],
load_weights=False).to(device)
model.load_state_dict(torch.load(path))
model.eval()
model_list[seed].append(model)
# pseudo-label for test data
val_patient_numbers_list = [
[68250], # fold0
[65631], # fold1
[67177], # fold2
]
test_patient_numbers_list = [
[63921], # fold0
[63921], # fold1
[63921], # fold2
]
_, _, tst_idxs_list = get_fold_idxs_list(info_df, val_patient_numbers_list, test_patient_numbers_list)
print("test index list: {}".format(tst_idxs_list))
train_df['predicted'] = None
for idx in tst_idxs_list:
print('idx = ', idx)
pred_mask, h, w = get_pred_mask(idx, train_df, info_df, model_list, mode='train')
rle = get_rle(pred_mask, h, w)
train_df.loc[idx, 'predicted'] = rle
train_df.to_csv(opj(OUTPUT_PATH, 'test_mask_predicted.csv'), index=False)
# pseudo-label for test data
# for idx in range(len(sub_df)):
# print('idx = ', idx)
# pred_mask,h,w = get_pred_mask(idx, sub_df, model_list, mode='test')
# rle = get_rle(pred_mask,h,w)
# sub_df.loc[idx,'predicted'] = rle
# sub_df.to_csv(opj(OUTPUT_PATH, 'pseudo_test.csv'), index=False) | 1.867188 | 2 |
tests/settings.py | movermeyer/django-umanage | 4 | 12767156 | <filename>tests/settings.py
import os
DEBUG = False
ALLOWED_HOSTS = ['*']
LANGUAGE_CODE = 'en-us'
ROOT_URLCONF = 'urls'
SECRET_KEY = '<KEY>'
SITE_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
SITE_ID = 1
TIME_ZONE = 'UTC'
USE_I18N = True
UMANAGE_BASE_TEMPLATE = 'base_umanage.html'
UMANAGE_BASE_UNAUTHENTICATED_TEMPLATE = 'base_umanage_unauthenticated.html'
UMANAGE_FROM_EMAIL = '<EMAIL>'
UMANAGE_SITE_ROOT_URI = 'http://somedomain.com'
UMANAGE_SITE_NAME = 'My Site Name'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.humanize',
'django_core',
'umanage',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'umanage.context_processors.common',
)
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'tests/templates'),
os.path.join(SITE_ROOT, 'umanage/templates'),
)
here = lambda *x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': here('test_db.db')
}
}
| 1.851563 | 2 |
src/transformers/models/bartpho/tokenization_bartpho_fast.py | datquocnguyen/transformers | 5 | 12767157 | # coding=utf-8
# Copyright 2021 <NAME> and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" Tokenization classes for BARTpho-syllable model."""
import os
from collections import defaultdict
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
from ...tokenization_utils import AddedToken
from ...tokenization_utils_base import EncodingFast
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_bartpho import BartphoTokenizer
else:
BartphoTokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "sentencepiece.bpe.model",
"monolingual_vocab_file": "dict.txt",
"tokenizer_file": "tokenizer.json",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
"tokenizer_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"vinai/bartpho-syllable": 1024}
class BartphoTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" BARTpho tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
[`XLMRobertaTokenizerFast`]. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = BartphoTokenizer
def __init__(
self,
vocab_file=None,
monolingual_vocab_file=None,
tokenizer_file=None,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs
):
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
vocab_file,
monolingual_vocab_file,
tokenizer_file=tokenizer_file,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
self.vocab_file = vocab_file
self.monolingual_vocab_file = monolingual_vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
def get_added_vocab_hacking(self):
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
Returns:
`Dict[str, int], Dict[int, int]`: The added tokens, and their original and new ids
"""
base_vocab_size = self._tokenizer.get_vocab_size(with_added_tokens=False)
full_vocab_size = self._tokenizer.get_vocab_size(with_added_tokens=True)
if full_vocab_size == base_vocab_size:
return {}, {}
# Tokens in added_vocab should have ids that are equal to or larger than the size of base_vocab
added_vocab = dict(
(self._tokenizer.id_to_token(index), index + 1 - base_vocab_size + self.mask_token_id)
for index in range(base_vocab_size, full_vocab_size)
)
id_mapping = dict((index, self._tokenizer.token_to_id(tok)) for tok, index in added_vocab.items())
return added_vocab, id_mapping
def _decode(
self,
token_ids: Union[int, List[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs
) -> str:
self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
if isinstance(token_ids, int):
token_ids = [token_ids]
# Mapping ids into their original values
_, id_mapping = self.get_added_vocab_hacking()
if len(id_mapping) > 0:
token_ids = [id_mapping[id] if id in id_mapping else id for id in token_ids]
text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def _convert_encoding(
self,
encoding: EncodingFast,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> Tuple[Dict[str, Any], List[EncodingFast]]:
"""
Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list
of encodings, take care of building a batch from overflowing tokens.
Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are
lists (overflows) of lists (tokens).
Output shape: (overflows, sequence length)
"""
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if return_overflowing_tokens and encoding.overflowing is not None:
encodings = [encoding] + encoding.overflowing
else:
encodings = [encoding]
encoding_dict = defaultdict(list)
added_vocab, _ = self.get_added_vocab_hacking()
for e in encodings:
# encoding_dict["input_ids"].append(e.ids)
# Reassign ids of tokens due to the hacking strategy
ids = []
for id, token in zip(e.ids, e.tokens):
if id <= self.mask_token_id:
ids.append(id)
else:
if token.strip() in added_vocab:
ids.append(added_vocab[token.strip()])
else:
ids.append(self.unk_token_id)
encoding_dict["input_ids"].append(ids)
if return_token_type_ids:
encoding_dict["token_type_ids"].append(e.type_ids)
if return_attention_mask:
encoding_dict["attention_mask"].append(e.attention_mask)
if return_special_tokens_mask:
encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
if return_offsets_mapping:
encoding_dict["offset_mapping"].append(e.offsets)
if return_length:
# encoding_dict["length"].append(len(e.ids))
encoding_dict["length"].append(len(ids))
return encoding_dict, encodings
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BARTpho sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTpho does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a "
"slow tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
return
out_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"],
)
out_monolingual_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"],
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(out_monolingual_vocab_file):
copyfile(self.monolingual_vocab_file, out_monolingual_vocab_file)
return (out_vocab_file, out_monolingual_vocab_file)
| 1.773438 | 2 |
site/flask/lib/python2.7/site-packages/openid/consumer/__init__.py | theholyhades1/tartanHacks2015 | 5,079 | 12767158 | <reponame>theholyhades1/tartanHacks2015<filename>site/flask/lib/python2.7/site-packages/openid/consumer/__init__.py<gh_stars>1000+
"""
This package contains the portions of the library used only when
implementing an OpenID consumer.
"""
__all__ = ['consumer', 'discover']
| 1.421875 | 1 |
library/dnsimple.py | kumulustech/kolla-packet | 4 | 12767159 | #!/usr/bin/env python
from ansible.module_utils.basic import *
import requests
import os
from dnsimple import DNSimple
'''
parameters:
api_token DNSimple token for account
account_id DNSimple account id
domain Zone from DNSimple
record JSON object, includes type, name, content, ttl
example record:
record={
'type':'A',
'name':'control',
'content':'192.168.10.1',
'ttl':'60'
}
state = 'present' or 'absent'
'''
def create_record(data):
nonexistant=False
record = {
'type': data['type'],
'name': data['name'],
'content': data['content'],
'ttl': data['ttl']
}
dns=DNSimple(api_token=data['dnsimple_token'],account_id=data['dnsimple_account'])
if 'present' in data['state']:
for n in dns.records(data['domain']):
if record['name'] == n['record']['name']:
res=dns.update_record(data['domain'],n['record']['id'],record)
nonexistant=False
return (True, res['record']['id'], 'record updated')
else:
nonexistant=True
if nonexistant:
res=dns.add_record(data['domain'], record)
return (True, res['record']['id'], 'record added')
return (False, "{}", 'no record added')
def delete_record(data):
dns=DNSimple(api_token=data['dnsimple_token'],account_id=data['dnsimple_account'])
if 'absent' in data['state']:
for n in dns.records(data['domain']):
if data['name'] == n['record']['name']:
dns.delete_record(data['domain'],n['record']['id'])
return (True, None, 'record deleted')
return (False, None, 'no record deleted')
def main():
fields = {
"type": {"required": False, "default": "A", "type": "str"},
"name": {"required": True, "type": "str"},
"content": {"required": True, "type": "str"},
"ttl": {"required": False, "default": "600", "type": "str"},
"domain": {"required": True, "type": "str"},
"dnsimple_token": {"required": True, "type": "str"},
"dnsimple_account": {"required": True, "type": "str"},
"state": {"default": "present","choices": ['present', 'absent'],"type": 'str'}
}
choice_map = {
"present": create_record,
"absent": delete_record
}
module = AnsibleModule(argument_spec=fields)
has_changed, record_id, result = choice_map.get(module.params['state'])(module.params)
module.exit_json(changed=has_changed, record_id=record_id, meta=result)
if __name__ == '__main__':
main() | 2.546875 | 3 |
answer_type/model_utils.py | Supermaxman/epic_qa | 0 | 12767160 | <reponame>Supermaxman/epic_qa<filename>answer_type/model_utils.py<gh_stars>0
from transformers import AutoModelForSequenceClassification, BertModel
from transformers import AdamW, get_linear_schedule_with_warmup
from torch import nn
import torch
import pytorch_lightning as pl
from abc import ABC, abstractmethod
class ATPBert(pl.LightningModule, ABC):
def __init__(
self, pre_model_name, learning_rate, weight_decay, lr_warmup, updates_total, category_map, types_map,
torch_cache_dir, predict_mode=False):
super().__init__()
self.pre_model_name = pre_model_name
self.category_map = category_map
self.types_map = types_map
self.torch_cache_dir = torch_cache_dir
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.lr_warmup = lr_warmup
self.updates_total = updates_total
self.predict_mode = predict_mode
self.score_func = nn.Softmax(dim=-1)
self.category_criterion = nn.CrossEntropyLoss(reduction='none')
self.types_criterion = nn.BCEWithLogitsLoss(reduction='none')
self.save_hyperparameters()
@abstractmethod
def forward(self, input_ids, attention_mask, token_type_ids):
pass
def _loss(self, cat_logits, cat_labels, type_logits, type_labels):
cat_loss = self.category_criterion(
cat_logits,
cat_labels
)
type_loss = self.types_criterion(
type_logits,
type_labels
)
type_loss = type_loss.mean(dim=-1)
loss = cat_loss + type_loss
return loss
def training_step(self, batch, batch_nb):
loss, logits, prediction, correct_count, total_count, accuracy = self._forward_step(batch, batch_nb)
loss = loss.mean()
self.log('train_loss', loss)
self.log('train_accuracy', accuracy)
result = {
'loss': loss
}
return result
def test_step(self, batch, batch_nb):
return self._eval_step(batch, batch_nb, 'test')
def validation_step(self, batch, batch_nb):
return self._eval_step(batch, batch_nb, 'val')
def _forward_step(self, batch, batch_nb):
category_logits, types_logits = self(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
)
if not self.predict_mode:
category_labels = batch['labels']
types_labels = batch['types']
loss = self._loss(
category_logits,
category_labels,
types_logits,
types_labels
)
prediction = category_logits.max(dim=1)[1]
batch_size = category_labels.shape[0]
correct_count = (prediction.eq(category_labels)).float().sum()
total_count = float(batch_size)
accuracy = correct_count / batch_size
return loss, category_labels, prediction, correct_count, total_count, accuracy
else:
return category_logits, types_logits
def _eval_step(self, batch, batch_nb, name):
if not self.predict_mode:
loss, logits, prediction, correct_count, total_count, accuracy = self._forward_step(batch, batch_nb)
result = {
f'{name}_loss': loss.mean(),
f'{name}_batch_loss': loss,
f'{name}_batch_accuracy': accuracy,
f'{name}_correct_count': correct_count,
f'{name}_total_count': total_count,
}
return result
else:
category_logits, types_logits = self._forward_step(batch, batch_nb)
result = {
f'{name}_category_logits': category_logits,
f'{name}_types_logits': types_logits,
f'{name}_ids': batch['ids'],
}
return result
def _eval_epoch_end(self, outputs, name):
if not self.predict_mode:
loss = torch.cat([x[f'{name}_batch_loss'] for x in outputs], dim=0).mean()
correct_count = torch.stack([x[f'{name}_correct_count'] for x in outputs], dim=0).sum()
total_count = sum([x[f'{name}_total_count'] for x in outputs])
accuracy = correct_count / total_count
self.log(f'{name}_loss', loss)
self.log(f'{name}_accuracy', accuracy)
else:
category_logits = torch.cat([x[f'{name}_category_logits'] for x in outputs], dim=0)
types_logits = torch.cat([x[f'{name}_types_logits'] for x in outputs], dim=0)
ids = [ex_id for ex_ids in outputs for ex_id in ex_ids[f'{name}_ids']]
# predictions = {}
cat_id_map = {v: k for k, v in self.category_map.items()}
types_id_map = {v: k for k, v in self.types_map.items()}
types_threshold = 0.5
types_top_k = 10
all_ids = []
all_categories = []
all_types = []
for ex_id, ex_cat_logits, ex_types_logits in zip(ids, category_logits, types_logits):
category_pred_id = torch.argmax(ex_cat_logits).item()
category_pred = cat_id_map[category_pred_id]
ex_types = []
for type_id, ex_type_logit in enumerate(ex_types_logits):
ex_type_logit = ex_type_logit.item()
if ex_type_logit < types_threshold:
continue
type_name = types_id_map[type_id]
ex_types.append((ex_type_logit, type_name))
ex_types = sorted(ex_types, key=lambda x: x[0], reverse=True)
ex_types = ex_types[:types_top_k]
types_pred = [x[1] for x in ex_types]
# predictions[ex_id] = {
# 'category': category_pred,
# 'types': types_pred
# }
all_ids.append(ex_id)
all_categories.append(category_pred)
all_types.append(types_pred)
self.write_prediction_dict(
{
'id': all_ids,
'category': all_categories,
'types': all_types
},
filename='predictions.pt'
)
def validation_epoch_end(self, outputs):
self._eval_epoch_end(outputs, 'val')
def test_epoch_end(self, outputs):
self._eval_epoch_end(outputs, 'test')
def configure_optimizers(self):
params = self._get_optimizer_params(self.weight_decay)
optimizer = AdamW(
params,
lr=self.learning_rate,
weight_decay=self.weight_decay,
correct_bias=False
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.lr_warmup * self.updates_total,
num_training_steps=self.updates_total
)
return [optimizer], [scheduler]
def _get_optimizer_params(self, weight_decay):
param_optimizer = list(self.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_params = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
return optimizer_params
class ATPBertFromLanguageModel(ATPBert):
def __init__(
self, pre_model_name, learning_rate, weight_decay, lr_warmup, updates_total, category_map, types_map,
torch_cache_dir=None, predict_mode=False):
super().__init__(
pre_model_name, learning_rate, weight_decay, lr_warmup, updates_total, category_map, types_map,
torch_cache_dir, predict_mode)
self.bert = BertModel.from_pretrained(
pre_model_name,
cache_dir=torch_cache_dir
)
self.category_classifier = nn.Linear(
self.bert.config.hidden_size,
len(category_map)
)
self.types_classifier = nn.Linear(
self.bert.config.hidden_size,
len(types_map)
)
self.config = self.bert.config
def forward(self, input_ids, attention_mask, token_type_ids):
cls_embeddings = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids
)[0][:, 0]
# [batch_size, 2]
category_logits = self.category_classifier(cls_embeddings)
types_logits = self.types_classifier(cls_embeddings)
return category_logits, types_logits
| 2.3125 | 2 |
chromeos-config/cros_config_host/cros_config_test_schema_unittest.py | strassek/chromiumos-platform2 | 4 | 12767161 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=module-missing-docstring,class-missing-docstring
from __future__ import print_function
import json
import os
import cros_config_test_schema
import libcros_schema
from chromite.lib import cros_test_lib
from chromite.lib import osutils
BASIC_CONFIG = """
mosys-base: &mosys_base_cmds
name: 'mosys'
args:
- 'platform id'
- 'platform name'
nautilus-mosys-base: &nautilus_mosys_cmds
name: 'mosys'
args:
- 'platform version'
cros-config-base: &cros_config_base_cmds
name: 'cros-config'
args:
- '/ brand-name'
cros-config-lte: &cros_config_lte_cmds
name: 'cros-config'
args:
- '/arc/build-properties device'
chromeos:
devices:
- device-name: 'nautilus'
command-groups:
- *mosys_base_cmds
- *nautilus_mosys_cmds
- *cros_config_base_cmds
- device-name: 'nautiluslte'
command-groups:
- *mosys_base_cmds
- *nautilus_mosys_cmds
- *cros_config_base_cmds
- *cros_config_lte_cmds
"""
this_dir = os.path.dirname(__file__)
class ParseArgsTests(cros_test_lib.TestCase):
def testParseArgs(self):
argv = ['-s', 'schema', '-c', 'config', '-f', 'nautilus', '-o', 'output']
opts = cros_config_test_schema.ParseArgs(argv)
self.assertEqual(opts.schema, 'schema')
self.assertEqual(opts.config, 'config')
self.assertEqual(opts.filter, 'nautilus')
self.assertEqual(opts.output, 'output')
class TransformConfigTests(cros_test_lib.TestCase):
def testBasicTransform(self):
result = cros_config_test_schema.TransformConfig(BASIC_CONFIG)
json_dict = json.loads(result)
self.assertEqual(1, len(json_dict))
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(2, len(json_obj.chromeos.devices))
device = json_obj.chromeos.devices[0]
self.assertEqual('nautilus', device.device_name)
self.assertEqual(3, len(device.command_groups))
device = json_obj.chromeos.devices[1]
self.assertEqual('nautiluslte', device.device_name)
self.assertEqual(4, len(device.command_groups))
def testTransformConfig_NoMatch(self):
result = cros_config_test_schema.TransformConfig(
BASIC_CONFIG, device_filter='abc123')
json_dict = json.loads(result)
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(0, len(json_obj.chromeos.devices))
def testTransformConfig_FilterMatch(self):
result = cros_config_test_schema.TransformConfig(
BASIC_CONFIG, device_filter='nautilus')
json_dict = json.loads(result)
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(1, len(json_obj.chromeos.devices))
device = json_obj.chromeos.devices[0]
self.assertEqual('nautilus', device.device_name)
self.assertEqual(3, len(device.command_groups))
class MainTests(cros_test_lib.TempDirTestCase):
def testMainImportNoFilter(self):
output = os.path.join(self.tempdir, 'output.json')
cros_config_test_schema.Start(
os.path.join(this_dir, 'test_data/cros_config_test_device.yaml'),
None,
output,
None)
json_dict = json.loads(osutils.ReadFile(output))
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(2, len(json_obj.chromeos.devices))
device = json_obj.chromeos.devices[0]
self.assertEqual('nautilus', device.device_name)
self.assertEqual(3, len(device.command_groups))
device = json_obj.chromeos.devices[1]
self.assertEqual('nautiluslte', device.device_name)
self.assertEqual(4, len(device.command_groups))
def testMainImportFilterNautilus(self):
output = os.path.join(self.tempdir, 'output.json')
cros_config_test_schema.Start(
os.path.join(this_dir, 'test_data/cros_config_test_device.yaml'),
'nautilus',
output,
None)
json_dict = json.loads(osutils.ReadFile(output))
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(1, len(json_obj.chromeos.devices))
device = json_obj.chromeos.devices[0]
self.assertEqual('nautilus', device.device_name)
self.assertEqual(3, len(device.command_groups))
def testMainImportFilterNautilusLte(self):
output = os.path.join(self.tempdir, 'output.json')
cros_config_test_schema.Start(
os.path.join(this_dir, 'test_data/cros_config_test_device.yaml'),
'nautiluslte',
output,
None)
json_dict = json.loads(osutils.ReadFile(output))
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(1, len(json_obj.chromeos.devices))
device = json_obj.chromeos.devices[0]
self.assertEqual('nautiluslte', device.device_name)
self.assertEqual(4, len(device.command_groups))
if __name__ == '__main__':
cros_test_lib.main(module=__name__)
| 1.96875 | 2 |
caoxu/20180326/h3.py | python20180319howmework/homework | 0 | 12767162 | '''输入一个三位整型数,判断它是否是一个水仙花数(比如153 = 1*1*1 + 5*5*5 + 3*3*'''
from math import *
num1=input("输入一个三位整型数:")
l=len(num1)
num2=int(num1)
num3=num2
res=0
while num2>0:
m=num2%10
num2//=10
res+=pow(m,l)
if res==num3:
print("是水仙花数")
else:
print("不是水仙花数")
| 3.828125 | 4 |
EPro-PnP-Det/epropnp_det/ops/pnp/builder.py | Lakonik/EPro-PnP | 19 | 12767163 | <filename>EPro-PnP-Det/epropnp_det/ops/pnp/builder.py
"""
Copyright (C) 2010-2022 Alibaba Group Holding Limited.
"""
from mmcv.utils import Registry, build_from_cfg
PNP = Registry('pnp')
CAMERA = Registry('camera')
COSTFUN = Registry('cost_fun')
def build_pnp(cfg, **default_args):
return build_from_cfg(cfg, PNP, default_args)
def build_camera(cfg, **default_args):
return build_from_cfg(cfg, CAMERA, default_args)
def build_cost_fun(cfg, **default_args):
return build_from_cfg(cfg, COSTFUN, default_args)
| 1.648438 | 2 |
ABC_A/ABC064_A.py | ryosuke0825/atcoder_python | 0 | 12767164 | rgb = input()
rgb = rgb.replace(" ", "")
if int(rgb) % 4 == 0:
print("YES")
else:
print("NO")
| 3.484375 | 3 |
tests/plugins/test_docker_api.py | rcerven/atomic-reactor | 0 | 12767165 | """
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
from dockerfile_parse import DockerfileParser
from atomic_reactor.plugin import PluginFailedException
from atomic_reactor.build import InsideBuilder, BuildResult
from atomic_reactor.util import ImageName, CommandResult
from atomic_reactor.inner import DockerBuildWorkflow
from tests.docker_mock import mock_docker
from flexmock import flexmock
import pytest
from tests.constants import MOCK_SOURCE
class MockDockerTasker(object):
def inspect_image(self, name):
return {}
def build_image_from_path(self):
return True
class X(object):
pass
class MockInsideBuilder(object):
def __init__(self, failed=False):
self.tasker = MockDockerTasker()
self.base_image = ImageName(repo='Fedora', tag='22')
self.image_id = 'asd'
self.image = 'image'
self.failed = failed
self.df_path = 'some'
self.df_dir = 'some'
def simplegen(x, y):
yield "some\u2018".encode('utf-8')
flexmock(self.tasker, build_image_from_path=simplegen)
@property
def source(self):
result = X()
setattr(result, 'dockerfile_path', '/')
setattr(result, 'path', '/tmp')
return result
def pull_base_image(self, source_registry, insecure=False):
pass
def get_built_image_info(self):
return {'Id': 'some'}
def inspect_built_image(self):
return None
def ensure_not_built(self):
pass
@pytest.mark.parametrize('is_failed', [
True,
False,
])
def test_build(is_failed):
"""
tests docker build api plugin working
"""
flexmock(DockerfileParser, content='df_content')
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
flexmock(CommandResult).should_receive('is_failed').and_return(is_failed)
if is_failed:
flexmock(CommandResult, error_detail="built error")
if is_failed:
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
else:
workflow.build_docker_image()
assert isinstance(workflow.buildstep_result['docker_api'], BuildResult)
assert workflow.build_result
assert workflow.build_result.is_failed() == is_failed
| 1.992188 | 2 |
numpy/testing/tests/test_utils.py | qpython-android/QPypi-numpy | 7 | 12767166 | <filename>numpy/testing/tests/test_utils.py
import numpy as np
from numpy.testing import *
import unittest
class _GenericTest(object):
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
try:
self._assert_func(a, b)
passed = True
except AssertionError:
pass
else:
raise AssertionError("a and b are found equal but are not")
def test_array_rank1_eq(self):
"""Test two equal array of rank 1 are found equal."""
a = np.array([1, 2])
b = np.array([1, 2])
self._test_equal(a, b)
def test_array_rank1_noteq(self):
"""Test two different array of rank 1 are found not equal."""
a = np.array([1, 2])
b = np.array([2, 2])
self._test_not_equal(a, b)
def test_array_rank2_eq(self):
"""Test two equal array of rank 2 are found equal."""
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 2], [3, 4]])
self._test_equal(a, b)
def test_array_diffshape(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array([1, 2])
b = np.array([[1, 2], [1, 2]])
self._test_not_equal(a, b)
def test_objarray(self):
"""Test object arrays."""
a = np.array([1, 1], dtype=np.object)
self._test_equal(a, 1)
class TestEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_equal
def test_generic_rank1(self):
"""Test rank 1 array for all dtypes."""
def foo(t):
a = np.empty(2, t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
a = np.empty((4, 2, 3), t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_nan_array(self):
"""Test arrays with nan values in them."""
a = np.array([1, 2, np.nan])
b = np.array([1, 2, np.nan])
self._test_equal(a, b)
c = np.array([1, 2, 3])
self._test_not_equal(c, b)
def test_string_arrays(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array(['floupi', 'floupa'])
b = np.array(['floupi', 'floupa'])
self._test_equal(a, b)
c = np.array(['floupipi', 'floupa'])
self._test_not_equal(c, b)
def test_recarrays(self):
"""Test record arrays."""
a = np.empty(2, [('floupi', np.float), ('floupa', np.float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
self._test_not_equal(c, b)
class TestAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_almost_equal
class TestRaises(unittest.TestCase):
def setUp(self):
class MyException(Exception):
pass
self.e = MyException
def raises_exception(self, e):
raise e
def does_not_raise_exception(self):
pass
def test_correct_catch(self):
f = raises(self.e)(self.raises_exception)(self.e)
def test_wrong_exception(self):
try:
f = raises(self.e)(self.raises_exception)(RuntimeError)
except RuntimeError:
return
else:
raise AssertionError("should have caught RuntimeError")
def test_catch_no_raise(self):
try:
f = raises(self.e)(self.does_not_raise_exception)()
except AssertionError:
return
else:
raise AssertionError("should have raised an AssertionError")
if __name__ == '__main__':
run_module_suite()
| 3.34375 | 3 |
data/strategies/publishers/sciencedirect.py | jamesrharwood/journal-guidelines | 0 | 12767167 | <gh_stars>0
url = "www.sciencedirect.com/(science/)?journal/(aip/)?{ID}"
extractor_args = dict(restrict_text=[r"guide\s*for\s*authors"])
template = "https://www.elsevier.com/journals/{ID}/{issn_print}/guide-for-authors"
| 1.546875 | 2 |
task_1/preprocess_data.py | icobx/claim_detection | 0 | 12767168 | <reponame>icobx/claim_detection
import os
import re
import sys
import json
import spacy
import pickle
# from urlextract import URLExtract
from tqdm import tqdm
from nltk.corpus import stopwords
from spacy.language import Language
from helper_funcs import *
from sklearn.model_selection import train_test_split
from definitions import INPUT_DATA_PATHS, PROC_DATA_PATH
def preprocess(dataset: str = 'covid_tweets', spacy_nlp: Language = None):
data_paths = INPUT_DATA_PATHS[dataset]
spacy_nlp = spacy.load('en_core_web_lg') \
if spacy_nlp is None else spacy_nlp
stopwords_set = set(stopwords.words('english'))
# TODO: mention getting different txt procerssor for polit. debates
if dataset == 'covid_tweets':
text_proc = get_text_processor(word_stats='twitter')
elif dataset == 'political_debates':
text_proc = get_text_processor(word_stats='english')
for dsplit_type, split_dict in tqdm(data_paths.items(), desc='data split'):
data_dict = {}
lines = None
with open(split_dict['filepath'], 'r') as f:
lines = f.readlines()
# skip header
for line in tqdm(lines[1:], desc='lines', leave=False):
line_split = line.strip().split('\t')
if dataset == 'covid_tweets':
# topic, link not used anywhere
topic, id, link, content = line_split[:4]
claim, label = '0', '0'
if dsplit_type != 'test':
claim, label = line_split[4:]
elif dataset == 'political_debates':
i, id, src, content = line_split[:4]
claim, label = '-1', '0'
if dsplit_type != 'test':
label = line_split[-1]
prcsd = text_proc.pre_process_doc(content)
cleaned = [
word for word in prcsd if not re.search(
'[^a-z0-9\s]+',
word,
)
]
cleaned = [
word for word in cleaned
if len(word) > 2
or word.isnumeric()
]
cleaned_ns = [
word for word in cleaned if word not in stopwords_set
]
spacied = spacy_nlp(' '.join(cleaned))
spacied_ns = spacy_nlp(' '.join(cleaned_ns))
pos, pos_ns = [], []
ner, ner_ns = [], []
pos = [
f'{token.text}_{token.pos_}_{token.tag_}' for token
in spacied
]
pos_ns = [
f'{token.text}_{token.pos_}_{token.tag_}' for token
in spacied_ns
]
ner = [
{
'text': entity.text,
'label': entity.label_,
'start': entity.start_char,
'end': entity.end_char,
} for entity in spacied.ents
]
ner_ns = [
{
'text': entity.text,
'label': entity.label_,
'start': entity.start_char,
'end': entity.end_char,
} for entity in spacied_ns.ents
]
data_dict[id] = {
'text': content,
'processed': prcsd,
'cleaned': cleaned,
'cleaned_ns': cleaned_ns,
'pos': pos,
'pos_ns': pos_ns,
'ner': ner,
'ner_ns': ner_ns,
'claim': claim,
'label': label
}
# data stats
pos_dict, pos_ns_dict = {'1': {}, '0': {}}, {'1': {}, '0': {}}
ner_dict, ner_ns_dict = {'1': {}, '0': {}}, {'1': {}, '0': {}}
# counter of POS and NER
for id, val in tqdm(data_dict.items(), desc='data dict'):
label = str(val['label'])
for pos_tag in val['pos']:
tag = pos_tag.split('_')[1]
pos_dict[label][tag] = 1 \
if tag not in pos_dict[label] \
else pos_dict[label][tag] + 1
for pos_tag in val['pos_ns']:
tag = pos_tag.split('_')[1]
pos_ns_dict[label][tag] = 1 \
if tag not in pos_ns_dict[label] \
else pos_ns_dict[label][tag] + 1
for ner_tag in val['ner']:
tag = ner_tag['label']
ner_dict[label][tag] = 1 \
if tag not in ner_dict[label] \
else ner_dict[label][tag] + 1
for ner_tag in val['ner_ns']:
tag = ner_tag['label']
ner_ns_dict[label][tag] = 1 \
if tag not in ner_ns_dict[label] \
else ner_ns_dict[label][tag] + 1
for k in ['0', '1']:
pos_dict[k] = {
k: v for k, v in sorted(
pos_dict[k].items(),
key=lambda item: item[1]
)
}
pos_ns_dict[k] = {
k: v for k, v in sorted(
pos_ns_dict[k].items(),
key=lambda item: item[1]
)
}
ner_dict[k] = {
k: v for k, v in sorted(
ner_dict[k].items(),
key=lambda item: item[1]
)
}
ner_ns_dict[k] = {
k: v for k, v in sorted(
ner_ns_dict[k].items(),
key=lambda item: item[1]
)
}
with open(
os.path.join(
PROC_DATA_PATH,
f"{dataset}_{split_dict['label']}_data.json"
),
'w',
encoding='utf-8'
) as f:
json.dump(data_dict, f)
# def does_claim_mean_checkw():
# import pandas as pd
# data_path = INPUT_DATA_PATHS['covid_tweets']['train']['filepath']
# df = pd.read_csv(data_path, sep='\t', index_col=False)
# # print(df)
# print(df[df['claim'] == 1]['check_worthiness'].describe())
def combine_debates():
import pandas as pd
data_paths = INPUT_DATA_PATHS['political_debates']
for dsplit_type, dsplit_dict in data_paths.items():
files = os.listdir(dsplit_dict['folderpath'])
if dsplit_type == 'test':
files = [f for f in files if f[-2:] != 'md']
header = ['i', 'src', 'content']
if dsplit_type != 'test':
header.append('worthy')
df_combined = pd.DataFrame()
for f in files:
fsplit = f.split('_')
debate_date = fsplit[0]
if fsplit[-1] == 'combined.tsv':
continue
df = pd.read_csv(
f"{dsplit_dict['folderpath']}/{f}",
sep='\t',
index_col=False,
names=header
)
df = df[df['src'] != 'SYSTEM']
df['id'] = df.apply(
lambda x: f"{debate_date}{x['i']}",
result_type='expand',
axis='columns'
)
header_rear = ['i', 'id', 'src', 'content']
if dsplit_type != 'test':
header_rear.append('worthy')
df = df[header_rear]
# print(df[df['src'] == 'QUESTION']['worthy'].describe())
df_combined = df_combined.append(
df,
ignore_index=True,
)
df_combined.to_csv(
dsplit_dict['filepath'],
sep='\t',
index=False,
)
# combine_debates()
preprocess(dataset='political_debates')
# does_claim_mean_checkw()
| 2.546875 | 3 |
social_media_operations.py | canberkeh/SeleniumTests | 2 | 12767169 | '''
Welcome! You can perform some operations with social medias made below.
'''
import time
from getpass import getpass
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import xlsxwriter
class SocialMediaBot():
'''
1- Choose browser.2- Choose social media.3- Choose operation. Saves tweets on a notepad. Saves followers data on an excel table.
'''
browser_dict = {
"1": "C:\\Users\\admin\\AppData\\Local\\Programs\\Python\\Python39\\chromedriver.exe",
"2": "C:\\Users\\admin\\AppData\\Local\\Programs\\Python\\Python39\\geckodriver.exe"
}
def choose_browser(self):
"""MAIN - At first choose browser """
continue_on = True
while continue_on:
print("Instagram / Twitter Bot ")
print("Choose Browser\n1- Chrome\n2- Firefox ")
print("99- Exit ")
select = input("Make selection (1-2-99) : ")
if select in self.browser_dict:
self.choose_social_media(select, self.browser_dict.get(select))
elif select == "99":
raise SystemExit
else:
print("Wrong Choice ! 1-2 ")
continue
def choose_social_media(self, browser_name, browser_path):
"""Second select social media """
continue_on = True
while continue_on:
print("\nChoose Social Media\n1- Instagram\n2- Twitter")
select = input("Make selection 1/2 : ")
if select == "1":
self.choose_operation_instagram(browser_name, browser_path)
elif select == "2":
self.choose_operation_twitter(browser_name, browser_path)
else:
print("Wrong Choice ! 1-2")
continue
def choose_operation_twitter(self, browser_name, browser_path):
'''
Have to choose operation on twitter.
'''
continue_on = True
while continue_on:
print("\n1- Get followers list and compare\n2- Get tweets on wanted search")
select = input("Make selection 1/2 : ")
if select == "1":
self.twitter_follower_diff(browser_name, browser_path)
elif select == "2":
self.get_tweets(browser_name, browser_path)
else:
print("Wrong Choice ! 1-2")
continue
def choose_operation_instagram(self, browser_name, browser_path):
continue_on = True
while continue_on:
print("\n1- Instagram Login")
select = input("Make selection 1/2/3 : ")
if select == "1":
self.instagram_login(browser_name, browser_path)
elif select == "2":
pass
elif select == "3":
pass
else:
print("Wrong Choice ! 1-2")
continue
def instagram_login(self, browser_name, browser_path):
'''
Logs in instagram. Get follower/following list on an excel table with count numbers.
'''
given_username = input("Enter Instagram Username : ")
given_password = getpass("Enter <PASSWORD>ram Password :")
#KEEP MOVING FROM HERE !!!
print("Instagram Login Bot")
if browser_name == "1":#Open site from browser
browser = webdriver.Chrome(executable_path=browser_path)
elif browser_name == "2":
browser = webdriver.Firefox(executable_path=browser_path)
browser.get("https://www.instagram.com")
time.sleep(8)
browser.find_element_by_name("username").send_keys(given_username)
browser.find_element_by_name('password').send_keys(<PASSWORD>)
browser.find_element_by_xpath('//*[@id="loginForm"]/div/div[3]').click()
time.sleep(4)
browser.get(f"https://www.instagram.com/{given_username}")
time.sleep(50)
def twitter_follower_diff(self, browser_name, browser_path):
'''
Gets followers data on an excel table.
'''
given_username = input("Enter Twitter Username : ")
given_password = getpass("Enter Twitter Password :")
if browser_name == "1":
browser = webdriver.Chrome(executable_path=browser_path)
elif browser_name == "2":
browser = webdriver.Firefox(executable_path=browser_path)
browser.get("https://www.twitter.com") #Go to twitter.com
time.sleep(4) #Wait browser loading
browser.find_element_by_xpath('//*[@id="react-root"]/div/div/div/main/div/div/div/div[1]/div/a[2]/div').click() #click button path
time.sleep(3) #Wait browser loading
browser.find_element_by_name('session[username_or_email]').send_keys(given_username)#Type username to the path
browser.find_element_by_name('session[password]').send_keys(<PASSWORD>) #Type password to the path
browser.find_element_by_xpath('/html/body/div/div/div/div[2]/main/div/div/div[1]/form/div/div[3]/div/div').click() #Login click
time.sleep(5)
followings_list = [] #List of following
followers_list = [] #List of followers
browser.get(f"https://www.twitter.com/{given_username}/following") #Go to following page
time.sleep(5)
last_height_following = browser.execute_script("return document.documentElement.scrollHeight") #Scroll down
while True:
followings = browser.find_elements_by_xpath('//*[@id="react-root"]/div/div/div[2]/main/div/div/div/div[1]/div/div[2]/section//div[@dir="ltr"]')
for following in followings:
each_following = following.text
if each_following not in followings_list:
followings_list.append(each_following) #Append to the followings_list
browser.execute_script("window.scrollTo(0, document.documentElement.scrollHeight); ")#Keep scrolling down
time.sleep(3)
new_height_following = browser.execute_script("return document.documentElement.scrollHeight")
if new_height_following == last_height_following:
break
last_height_following = new_height_following
browser.get(f"https://www.twitter.com/{given_username}/followers")#Go to followers page
time.sleep(5)
last_height_following = browser.execute_script("return document.documentElement.scrollHeight")
while True:
followers = browser.find_elements_by_xpath('//*[@id="react-root"]/div/div/div[2]/main/div/div/div/div[1]/div/div[2]/section//div[@dir="ltr"]')
for follower in followers:
each_follower = follower.text
if each_follower not in followers_list:
followers_list.append(each_follower)
browser.execute_script("window.scrollTo(0, document.documentElement.scrollHeight); ")
time.sleep(3)
new_height_following = browser.execute_script("return document.documentElement.scrollHeight")
if new_height_following == last_height_following:
break
last_height_following = new_height_following
who_not_followed = []
who_not_follows_you = []
for following in followings_list:
follows_you = False
for following_you in followers_list:
if following == following_you:
follows_you = True
if not follows_you:
who_not_follows_you.append(following)
for follow in followers_list:
you_follow = False
for follower in followings_list:
if follow == follower:
you_follow = True
if not you_follow:
who_not_followed.append(follow)
print("following list count : ")
print(len(followings_list))
print(followings_list)
print("\n")
print("followers list count : ")
print(len(followers_list))
print(followers_list)
print("\n")
print(who_not_follows_you)
print(len(who_not_follows_you))
print("\n")
print(who_not_followed)
print(len(who_not_followed))
workbook = xlsxwriter.Workbook(f'{given_username}_data.xlsx')
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': True})
worksheet.write('A1', 'Followers', bold)
worksheet.write('B1', 'Following', bold)
worksheet.write('C1', 'Who Not Follows You', bold)
worksheet.write('D1', 'Who You Not Follow', bold)
worksheet.write('E1', 'Followers Count', bold)
worksheet.write('F1', 'Following Count', bold)
worksheet.write('G1', 'Count : Who Not Follows You', bold)
worksheet.write('H1', 'Count : Who You Not Follow', bold)
worksheet.write('E2', '=COUNTA(A:A) -1', bold)
worksheet.write('F2', '=COUNTA(B:B) -1', bold)
worksheet.write('G2', '=COUNTA(C:C) -1', bold)
worksheet.write('H2', '=COUNTA(D:D) -1', bold)
row = 1 # Start from
col = 0
for item in followers_list: # Iterate over the data and write it out row by row.
worksheet.write(row, col, item)
row += 1
row = 1
col = 1
for item in followings_list:
worksheet.write(row, col, item)
row += 1
row = 1
col = 2
for item in who_not_follows_you:
worksheet.write(row, col, item)
row += 1
row = 1
col = 3
for item in who_not_followed:
worksheet.write(row, col, item)
row += 1
workbook.close()
self.choose_browser()
def get_tweets(self, browser_name, browser_path):
'''
Gets given tweets by given page number.
'''
given_username = input("Enter Twitter Username : ")
given_password = <PASSWORD>("Enter Twitter Password :")
hashtag = input("Type Hashtag or Search Tweets : ")
pages = int(input("How many pages you want to load (1 - ~): "))
filename = input("Type a file name to save : ")
if browser_name == "1":
browser = webdriver.Chrome(executable_path=browser_path)
elif browser_name == "2":
browser = webdriver.Firefox(executable_path=browser_path)
browser.get("https://www.twitter.com") #Go to twitter.com
time.sleep(4) #Wait browser loading
browser.find_element_by_xpath('//*[@id="react-root"]/div/div/div/main/div/div/div/div[1]/div/a[2]/div').click() #click button path
time.sleep(3) #Wait browser loading
browser.find_element_by_name('session[username_or_email]').send_keys(given_username)#Type username to the path
browser.find_element_by_name('session[password]').send_keys(given_password) #Type password to the path
browser.find_element_by_xpath('/html/body/div/div/div/div[2]/main/div/div/div[1]/form/div/div[3]/div/div').click() #Login click
time.sleep(5)
search_input = browser.find_element_by_xpath("//*[@id='react-root']/div/div/div/main/div/div/div/div[2]/div/div[2]/div/div/div/div[1]/div/div/div/form/div[1]/div/div/div[2]/input")
search_input.send_keys(hashtag)
time.sleep(3)
search_input.send_keys(Keys.ENTER)
time.sleep(3)
results = []
browser.implicitly_wait(5)
for tweet in browser.find_elements_by_xpath("//div[@data-testid='tweet']/div[2]/div[2]"):
results.append(tweet.text)
time.sleep(3)
loop_counter = 0
last_height = browser.execute_script("return document.documentElement.scrollHeight")
while True:
if loop_counter > pages:
break
browser.execute_script("window.scrollTo(0,document.documentElement.scrollHeight);")
time.sleep(3)
for tweet in browser.find_elements_by_xpath("//div[@data-testid='tweet']/div[2]/div[2]"):
results.append(tweet.text)
browser.implicitly_wait(5)
new_height = browser.execute_script("return document.documentElement.scrollHeight")
if last_height == new_height:
break
last_height = new_height
loop_counter+=1
count = 1
with open(f"{filename}.txt","w",encoding="UTF-8") as file:
for item in results:
file.write(f"{count}-{item}\n")
count+=1
self.choose_browser()
work = SocialMediaBot()
work.choose_browser()
#onwork
| 3.65625 | 4 |
src/imitation/envs/examples/airl_envs/point_maze_env.py | Cladett/imitation | 1 | 12767170 | <reponame>Cladett/imitation
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
from imitation.envs.examples.airl_envs.dynamic_mjc.mjc_models import point_mass_maze
class PointMazeEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(
self,
direction=1,
maze_length=0.6,
sparse_reward=False,
no_reward=False,
include_vel=False,
episode_length=100,
):
utils.EzPickle.__init__(self)
self.sparse_reward = sparse_reward
self.no_reward = no_reward
self.include_vel = include_vel
self.max_episode_length = episode_length
self.direction = direction
self.length = maze_length
self.episode_length = 0
model = point_mass_maze(direction=self.direction, length=self.length)
with model.asfile() as f:
mujoco_env.MujocoEnv.__init__(self, f.name, 5)
def step(self, a):
vec_dist = self.get_body_com("particle") - self.get_body_com("target")
reward_dist = -np.linalg.norm(vec_dist) # particle to target
reward_ctrl = -np.square(a).sum()
if self.no_reward:
reward = 0
elif self.sparse_reward:
if reward_dist <= 0.1:
reward = 1
else:
reward = 0
else:
reward = reward_dist + 0.001 * reward_ctrl
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
self.episode_length += 1
done = self.episode_length >= self.max_episode_length
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid = -1
self.viewer.cam.distance = 4.0
def reset_model(self):
qpos = self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(
size=self.model.nv, low=-0.01, high=0.01
)
self.set_state(qpos, qvel)
self.episode_length = 0
return self._get_obs()
def _get_obs(self):
obs = [self.get_body_com("particle")]
if self.include_vel:
obs.append(self.sim.data.get_body_xvelp("particle"))
return np.concatenate(obs)
def plot_trajs(self, *args, **kwargs):
pass
| 2.28125 | 2 |
Dreya/Security/SerialReader.py | esemve/Dreya | 1 | 12767171 | <gh_stars>1-10
import serial
import time
import subprocess
class SerialReader:
key1 = ""
key2 = ""
motion = False
temp = ""
@staticmethod
def read():
while (True):
i = 0
try:
proc = subprocess.Popen(['bash','serialreader.sh'],stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if line != '':
line = line.strip().decode("UTF-8")
if line.strip() == "nomotion":
SerialReader.motion = False
if line.strip() == "motion":
SerialReader.motion = True
except Exception as ex:
print('EXCEPTION', ex)
| 2.546875 | 3 |
ai_transformersx/examples/tasks/news_segment/news_data_processor.py | aicanhelp/ai-transformers | 1 | 12767172 | <reponame>aicanhelp/ai-transformers
from tqdm.auto import tqdm
import random
from ..task_base import *
@configclass
class NewsDataArguments:
data_dir: str = field('/app/dataset/news', 'input the data dir for processing')
save_mid: bool = field(True, 'whether cache the middle data for check')
context_min_len: int = field(128, 'context min length')
sentence_min_len: int = field(10, 'sentence min length')
check_min_anyway: int = field(True, 'whether check the sentence min length anyway')
positive_mode: int = field(0, 'positive mode')
negative_mode: int = field(0, 'negative mode')
bar_size: int = field(1000, 'the progress bar size')
class SentenceSplitter:
def __init__(self, sentence_min_len=10, check_min_anyway=False):
self._min_len = sentence_min_len
self._check_min_anyway = check_min_anyway
def split(self, segment: str):
t = segment.replace('\n', '')
sentences = cut_sentences(t)
length = len(sentences)
sents = []
i = 1
l1 = len(sentences[i - 1])
while i < length:
l2 = len(sentences[i])
if l1 > self._min_len or (l1 != l2 and not self._check_min_anyway):
sents.append(sentences[i - 1])
if i == length - 1:
sents.append(sentences[i])
i = i + 1
l1 = l2
continue
start = i - 1
while True:
if i == length or sentences[i - 1][-1] != sentences[i][-1]:
break
i = i + 1
sents.append(''.join(sentences[start:i]))
i = i + 1
l1 = l2
return sents
class SentencesSegment():
def __init__(self, segment: str = None, sentence_min_len=10, exclude_empty_line=True, check_min_anyway=False):
self._exclude_empty_line = exclude_empty_line
self._sentences = self._make_sentences(segment, sentence_min_len, check_min_anyway)
self._size = len(self._sentences)
def _make_sentences(self, segment: str, sentence_min_len, check_min_anyway=False):
if not segment: return []
return SentenceSplitter(sentence_min_len, check_min_anyway).split(segment)
def add_new_sentence(self, sentence: str):
sentence = sentence.strip()
if self._exclude_empty_line and not sentence:
return
self._sentences.append(sentence)
self._size = self._size + 1
def sentences(self, from_index, to_index):
if to_index > self._size: to_index = self._size
if from_index < 0: from_index = 0
if from_index >= to_index: return None
return ''.join(self._sentences[from_index:to_index])
def first_sentences(self, len):
if len < 1: return None
len = self._size if len > self._size else len
return self.sentences(0, len)
def first_sentence(self):
return self._sentences[0]
def sentence(self, s_i):
if s_i >= self._size: return None
return self._sentences[s_i]
def last_sentence(self):
return self._sentences[-1]
def size(self):
return self._size
def all_sentences(self):
return self._sentences
class NewsExampleSegment(SentencesSegment):
def __init__(self, segment: str, context_min_len=50, sentence_min_len=10, exclude_empty_line=True,
check_min_anyway=False):
super().__init__(segment, sentence_min_len, exclude_empty_line, check_min_anyway)
self._context_min_len = context_min_len
self.contexts = []
self._make_contexts()
def _make_contexts(self):
total = self.size()
if total < 1:
return []
length = len(self.first_sentence())
start = 1
while start < total and length < self._context_min_len:
length = length + len(self.sentence(start))
start = start + 1
self.contexts.append((0, start))
last_start = 0
length = length - len(self.sentence(last_start))
for end in range(start + 1, total):
length = length + len(self.sentence(end))
if length < self._context_min_len:
continue
last_start = last_start + 1
self.contexts.append((last_start, end + 1))
length = length - len(self.sentence(last_start))
def context(self, c_start, c_end):
return self.sentences(c_start, c_end)
def last_context(self):
return self.sentences(self.contexts[-1][0], self.contexts[-1][1])
def example(self, c_start, c_end):
return self.context(c_start, c_end), self.sentence(c_end)
@staticmethod
def from_file(file_path, context_min_len=50, sentence_min_len=10,
exclude_empty_line=True,
check_min_anyway=False, line_sentence=True):
if line_sentence:
segment = NewsExampleSegment(None, context_min_len, sentence_min_len, exclude_empty_line, check_min_anyway)
FileLineReader(bar_step_size=-1, exclude_empty_line=True).pipe(
lambda input, result: segment.add_new_sentence(input)
).read(file_path)
return segment
content = []
FileLineReader(bar_step_size=-1, exclude_empty_line=True).pipe(
lambda input, result: content.append(input)
).read(file_path)
return NewsExampleSegment("".join(content), context_min_len, sentence_min_len,
exclude_empty_line,
check_min_anyway)
class NewsExampleGenerator():
def __init__(self, config: NewsDataArguments, type='train'):
self._type = type
self._config = config
self.examples = []
self._last_segment: NewsExampleSegment = None
self._example_id = 0
def add_line(self, new_segment_str: str):
new_segment = NewsExampleSegment(new_segment_str,
context_min_len=self._config.context_min_len,
sentence_min_len=self._config.sentence_min_len)
if not new_segment.size():
return self
if self._last_segment is not None:
# In order to balance the number of classification,
if eval('self._create_positive_examples_' + str(self._config.positive_mode) + '()'):
eval('self._create_negative_examples_' + str(self._config.negative_mode) + '(new_segment)')
self._last_segment = new_segment
return self
def _guid(self):
self._example_id = self._example_id + 1
return "%s-%s" % (self._type, self._example_id)
def _create_positive_examples_0(self):
if len(self._last_segment.contexts) < 2: return False
e_index = random.choice(self._last_segment.contexts[:-1])
guid = self._guid()
text_a, text_b = self._last_segment.example(*e_index)
self.examples.append(InputExample(guid=guid,
text_a=text_a,
text_b=text_b,
label='0'))
return True
def _create_positive_examples_1(self):
for e_index in self._last_segment.contexts[:-1]:
guid = self._guid()
text_a, text_b = self._last_segment.example(*e_index)
self.examples.append(InputExample(guid=guid,
text_a=text_a,
text_b=text_b,
label='0'))
def _create_negative_examples_0(self, new_segment: NewsExampleSegment):
guid = self._guid()
text_a = self._last_segment.last_context()
text_b = new_segment.sentence(0)
self.examples.append(InputExample(guid=guid,
text_a=text_a,
text_b=text_b,
label='1'))
def _create_negative_examples_1(self, new_segment: NewsExampleSegment):
guid = self._guid()
for context in self._last_segment.contexts:
text_a = self._last_segment.context(*context)
for text_b in new_segment.all_sentences():
self.examples.append(InputExample(guid=guid,
text_a=text_a,
text_b=text_b,
label='1'))
class FileNewsExampleProcessor:
def __init__(self, file, config: NewsDataArguments, type='train'):
self._file = file
self._config = config
self._type = type
self._example_generator = NewsExampleGenerator(config, type)
def _make_examples(self):
def handle_line(line, previous):
self._example_generator.add_line(line)
return handle_line
def _save_middle_data(self):
log.info("Save the middle data: ")
with open(join_path(self._config.data_dir, self._type + "_middle_examples.txt"), 'w') as f:
for example in tqdm(self._example_generator.examples, disable=(self._config.bar_size <= 0)):
f.write(example.text_a + '\n')
f.write(example.text_b + '\n')
f.write(example.label + '\n\n')
def get_examples(self):
FileLineReader(self._config.bar_size).pipe(self._make_examples()).read(self._file)
if self._config.save_mid:
self._save_middle_data()
return self._example_generator.examples
class NewsDataProcessor(DataProcessor):
def __init__(self, config: NewsDataArguments):
self._config = config
if self._config is None:
self._config = parse_tasks_args(NewsDataArguments)
def _get_example(self, file_name, type):
return FileNewsExampleProcessor(join_path(self._config.data_dir, file_name), self._config, type).get_examples()
def get_train_examples(self):
return self._get_example('train.txt', 'train')
def get_dev_examples(self):
return self._get_example('dev.txt', 'dev')
def get_labels(self):
return ['0', '1']
def data_dir(self):
return self._config.data_dir
class PredictDataProcessor(DataProcessor):
def __init__(self, segment: NewsExampleSegment):
self._segment = segment
def get_train_examples(self):
return None
def get_dev_examples(self):
return self._create_example_from_article()
def _create_example_from_article(self):
examples = []
for i, e_index in enumerate(self._segment.contexts[:-1]):
text_a, text_b = self._segment.example(*e_index)
examples.append(InputExample(guid=str(i),
text_a=text_a,
text_b=text_b))
return examples
def get_labels(self):
return ['0', '1']
def data_dir(self):
return None
| 2.703125 | 3 |
research/cv/yolox/src/yolox_dataset.py | mindspore-ai/models | 77 | 12767173 | <filename>research/cv/yolox/src/yolox_dataset.py
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================================
""" Yolox dataset module """
import multiprocessing
import random
import os
import numpy as np
import cv2
import mindspore.dataset as de
from pycocotools.coco import COCO
from src.transform import box_candidates, random_affine, TrainTransform, ValTransform
min_keypoints_per_image = 10
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
def has_valid_annotation(anno):
"""Check annotation file."""
# if it's empty, there is no annotation
if not anno:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different criteria for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
def get_mosaic_coordinate(mosaic_image, mosaic_index, xc, yc, w, h, input_h, input_w):
""" Get mosaic coordinate """
# index0 to top left part of image
if mosaic_index == 0:
x1, y1, x2, y2 = max(xc - w, 0), max(yc - h, 0), xc, yc
small_coord = w - (x2 - x1), h - (y2 - y1), w, h
# index1 to top right part of image
elif mosaic_index == 1:
x1, y1, x2, y2 = xc, max(yc - h, 0), min(xc + w, input_w * 2), yc
small_coord = 0, h - (y2 - y1), min(w, x2 - x1), h
# index2 to bottom left part of image
elif mosaic_index == 2:
x1, y1, x2, y2 = max(xc - w, 0), yc, xc, min(input_h * 2, yc + h)
small_coord = w - (x2 - x1), 0, w, min(y2 - y1, h)
# index2 to bottom right part of image
elif mosaic_index == 3:
x1, y1, x2, y2 = xc, yc, min(xc + w, input_w * 2), min(input_h * 2, yc + h) # noqa
small_coord = 0, 0, min(w, x2 - x1), min(y2 - y1, h)
return (x1, y1, x2, y2), small_coord
def adjust_box_anns(bbox, scale_ratio, padw, padh, w_max, h_max):
bbox[:, 0::2] = np.clip(bbox[:, 0::2] * scale_ratio + padw, 0, w_max)
bbox[:, 1::2] = np.clip(bbox[:, 1::2] * scale_ratio + padh, 0, h_max)
return bbox
class COCOYoloXDataset:
""" YoloX Dataset for COCO """
def __init__(self, root, ann_file, remove_images_without_annotations=True,
filter_crowd_anno=True, is_training=True, mosaic=True, img_size=(640, 640),
preproc=None, input_dim=(640, 640), mosaic_prob=1.0, enable_mosaic=True, eable_mixup=True,
mixup_prob=1.0):
self.coco = COCO(ann_file)
self.img_ids = list(self.coco.imgs.keys())
self.filter_crowd_anno = filter_crowd_anno
self.is_training = is_training
self.root = root
self.mosaic = mosaic
self.img_size = img_size
self.preproc = preproc
self.input_dim = input_dim
self.mosaic_prob = mosaic_prob
self.enable_mosaic = enable_mosaic
self.degrees = 10.0
self.translate = 0.1
self.scale = (0.5, 1.5)
self.mixup_scale = (0.5, 1.5)
self.shear = 2.0
self.perspective = 0.0
self.mixup_prob = mixup_prob
self.enable_mixup = eable_mixup
if remove_images_without_annotations:
img_ids = []
for img_id in self.img_ids:
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = self.coco.loadAnns(ann_ids)
if has_valid_annotation(anno):
img_ids.append(img_id)
self.img_ids = img_ids
self.categories = {cat["id"]: cat["name"] for cat in self.coco.cats.values()}
self.cat_ids_to_continuous_ids = {v: i for i, v in enumerate(self.coco.getCatIds())}
self.continuous_ids_cat_ids = {v: k for k, v in self.cat_ids_to_continuous_ids.items()}
def pull_item(self, index):
"""
pull image and label
"""
res, img_info, _ = self.load_anno_from_ids(index)
img = self.load_resized_img(index)
return img, res.copy(), img_info, np.array([self.img_ids[index]])
def mosaic_proc(self, idx):
""" Mosaic data augment """
if self.enable_mosaic and random.random() < self.mosaic_prob:
mosaic_labels = []
input_dim = self.input_dim
input_h, input_w = input_dim[0], input_dim[1]
yc = int(random.uniform(0.5 * input_h, 1.5 * input_h))
xc = int(random.uniform(0.5 * input_w, 1.5 * input_w))
# 3 additional image indices
indices = [idx] + [random.randint(0, len(self.img_ids) - 1) for _ in range(3)]
for i_mosaic, index in enumerate(indices):
img, _labels, _, _ = self.pull_item(index)
h0, w0 = img.shape[:2] # orig hw
scale = min(1. * input_h / h0, 1. * input_w / w0)
img = cv2.resize(
img, (int(w0 * scale), int(h0 * scale)), interpolation=cv2.INTER_LINEAR
)
# generate output mosaic image
(h, w, c) = img.shape[:3]
if i_mosaic == 0:
mosaic_img = np.full((input_h * 2, input_w * 2, c), 114, dtype=np.uint8)
# suffix l means large image, while s means small image in mosaic aug.
(l_x1, l_y1, l_x2, l_y2), (s_x1, s_y1, s_x2, s_y2) = get_mosaic_coordinate(
mosaic_img, i_mosaic, xc, yc, w, h, input_h, input_w
)
mosaic_img[l_y1:l_y2, l_x1:l_x2] = img[s_y1:s_y2, s_x1:s_x2]
padw, padh = l_x1 - s_x1, l_y1 - s_y1
labels = _labels.copy()
# Normalized xywh to pixel xyxy format
if _labels.size > 0:
labels[:, 0] = scale * _labels[:, 0] + padw
labels[:, 1] = scale * _labels[:, 1] + padh
labels[:, 2] = scale * _labels[:, 2] + padw
labels[:, 3] = scale * _labels[:, 3] + padh
mosaic_labels.append(labels)
if mosaic_labels:
mosaic_labels = np.concatenate(mosaic_labels, 0)
np.clip(mosaic_labels[:, 0], 0, 2 * input_w, out=mosaic_labels[:, 0])
np.clip(mosaic_labels[:, 1], 0, 2 * input_h, out=mosaic_labels[:, 1])
np.clip(mosaic_labels[:, 2], 0, 2 * input_w, out=mosaic_labels[:, 2])
np.clip(mosaic_labels[:, 3], 0, 2 * input_h, out=mosaic_labels[:, 3])
mosaic_img, mosaic_labels = random_affine(
mosaic_img,
mosaic_labels,
target_size=(input_w, input_h),
degrees=self.degrees,
translate=self.translate,
scales=self.scale,
shear=self.shear,
)
if (
self.enable_mixup
and not mosaic_labels.size == 0
and random.random() < self.mixup_prob
):
mosaic_img, mosaic_labels = self.mixup(mosaic_img, mosaic_labels, self.input_dim)
mix_img, padded_labels, pre_fg_mask, is_inbox_and_incenter = self.preproc(mosaic_img, mosaic_labels,
self.input_dim)
# -----------------------------------------------------------------
# img_info and img_id are not used for training.
# They are also hard to be specified on a mosaic image.
# -----------------------------------------------------------------
return mix_img, padded_labels, pre_fg_mask, is_inbox_and_incenter
img, label, _, _ = self.pull_item(idx)
img, label, pre_fg_mask, is_inbox_and_incenter = self.preproc(img, label, self.input_dim)
return img, label, pre_fg_mask, is_inbox_and_incenter
def mixup(self, origin_img, origin_labels, input_dim):
""" Mixup data augment """
jit_factor = random.uniform(*self.mixup_scale)
FLIP = random.uniform(0, 1) > 0.5
cp_labels = np.empty(0)
while not cp_labels.size:
cp_index = random.randint(0, self.__len__() - 1)
cp_labels, _, _ = self.load_anno_from_ids(cp_index)
img, cp_labels, _, _ = self.pull_item(cp_index)
if len(img.shape) == 3:
cp_img = np.ones((input_dim[0], input_dim[1], 3), dtype=np.uint8) * 114
else:
cp_img = np.ones(input_dim, dtype=np.uint8) * 114
cp_scale_ratio = min(input_dim[0] / img.shape[0], input_dim[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * cp_scale_ratio), int(img.shape[0] * cp_scale_ratio)),
interpolation=cv2.INTER_LINEAR,
)
cp_img[: int(img.shape[0] * cp_scale_ratio), : int(img.shape[1] * cp_scale_ratio)] = resized_img
cp_img = cv2.resize(
cp_img,
(int(cp_img.shape[1] * jit_factor), int(cp_img.shape[0] * jit_factor)),
)
cp_scale_ratio *= jit_factor
if FLIP:
cp_img = cp_img[:, ::-1, :]
origin_h, origin_w = cp_img.shape[:2]
target_h, target_w = origin_img.shape[:2]
padded_img = np.zeros(
(max(origin_h, target_h), max(origin_w, target_w), 3), dtype=np.uint8
)
padded_img[:origin_h, :origin_w] = cp_img
x_offset, y_offset = 0, 0
if padded_img.shape[0] > target_h:
y_offset = random.randint(0, padded_img.shape[0] - target_h - 1)
if padded_img.shape[1] > target_w:
x_offset = random.randint(0, padded_img.shape[1] - target_w - 1)
padded_cropped_img = padded_img[y_offset: y_offset + target_h, x_offset: x_offset + target_w]
cp_bboxes_origin_np = adjust_box_anns(
cp_labels[:, :4].copy(), cp_scale_ratio, 0, 0, origin_w, origin_h
)
if FLIP:
cp_bboxes_origin_np[:, 0::2] = (origin_w - cp_bboxes_origin_np[:, 0::2][:, ::-1])
cp_bboxes_transformed_np = cp_bboxes_origin_np.copy()
cp_bboxes_transformed_np[:, 0::2] = np.clip(
cp_bboxes_transformed_np[:, 0::2] - x_offset, 0, target_w
)
cp_bboxes_transformed_np[:, 1::2] = np.clip(
cp_bboxes_transformed_np[:, 1::2] - y_offset, 0, target_h
)
keep_list = box_candidates(cp_bboxes_origin_np.T, cp_bboxes_transformed_np.T, 5)
if keep_list.sum() >= 1.0:
cls_labels = cp_labels[keep_list, 4:5].copy()
box_labels = cp_bboxes_transformed_np[keep_list]
labels = np.hstack((box_labels, cls_labels))
origin_labels = np.vstack((origin_labels, labels))
origin_img = origin_img.astype(np.float32)
origin_img = 0.5 * origin_img + 0.5 * padded_cropped_img.astype(np.float32)
return origin_img.astype(np.uint8), origin_labels
def load_anno_from_ids(self, index):
"""
load annotations via ids
"""
img_id = self.img_ids[index]
im_ann = self.coco.loadImgs(img_id)[0]
width = im_ann["width"]
height = im_ann["height"]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
annotations = self.coco.loadAnns(ann_ids)
objs = []
for obj in annotations:
x1 = np.max((0, obj["bbox"][0]))
y1 = np.max((0, obj["bbox"][1]))
x2 = np.min((width, x1 + np.max((0, obj["bbox"][2]))))
y2 = np.min((height, y1 + np.max((0, obj["bbox"][3]))))
if obj["area"] > 0 and x2 >= x1 and y2 >= y1:
obj["clean_bbox"] = [x1, y1, x2, y2]
objs.append(obj)
nums_objs = len(objs)
res = np.zeros((nums_objs, 5))
for ix, obj in enumerate(objs):
cls = self.cat_ids_to_continuous_ids[obj["category_id"]]
res[ix, 0:4] = obj["clean_bbox"]
res[ix, 4] = cls
r = min(self.img_size[0] / height, self.img_size[1] / width)
res[:, :4] *= r
img_info = (height, width)
resize_info = (int(height * r), int(width * r))
return res, img_info, resize_info
def load_resized_img(self, index):
"""
resize to fix size
"""
img_id = self.img_ids[index]
img_path = self.coco.loadImgs(img_id)[0]["file_name"]
img_path = os.path.join(self.root, img_path)
img = cv2.imread(img_path)
img = np.array(img)
r = min(self.img_size[0] / img.shape[0], self.img_size[1] / img.shape[1])
resize_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
interpolation=cv2.INTER_LINEAR,
).astype(np.uint8)
return resize_img
def __getitem__(self, index):
if self.is_training:
img, labels, pre_fg_mask, is_inbox_and_incenter = self.mosaic_proc(index)
return img, labels, pre_fg_mask, is_inbox_and_incenter
img, _, img_info, img_id = self.pull_item(index)
if self.preproc is not None:
img, _ = self.preproc(img, self.input_dim)
img = img.astype(np.float32)
return img, img_info, img_id
def __len__(self):
return len(self.img_ids)
def create_yolox_dataset(image_dir, anno_path, batch_size, device_num, rank,
data_aug=True, is_training=True):
""" create yolox dataset """
from model_utils.config import config
cv2.setNumThreads(0)
if is_training:
filter_crowd = False
remove_empty_anno = False
else:
filter_crowd = False
remove_empty_anno = False
img_size = config.input_size
input_dim = img_size
if is_training:
yolo_dataset = COCOYoloXDataset(root=image_dir, ann_file=anno_path, filter_crowd_anno=filter_crowd,
remove_images_without_annotations=remove_empty_anno, is_training=is_training,
mosaic=data_aug, eable_mixup=data_aug, enable_mosaic=data_aug,
preproc=TrainTransform(config=config), img_size=img_size, input_dim=input_dim)
else:
yolo_dataset = COCOYoloXDataset(
root=image_dir, ann_file=anno_path, filter_crowd_anno=filter_crowd,
remove_images_without_annotations=remove_empty_anno, is_training=is_training, mosaic=False,
eable_mixup=False,
img_size=img_size, input_dim=input_dim, preproc=ValTransform(legacy=False)
)
cores = multiprocessing.cpu_count()
num_parallel_workers = int(cores / device_num)
if is_training:
dataset_column_names = ["image", "labels", "pre_fg_mask", "is_inbox_and_inCenter"]
ds = de.GeneratorDataset(yolo_dataset, column_names=dataset_column_names,
num_parallel_workers=min(8, num_parallel_workers),
python_multiprocessing=True,
shard_id=rank, num_shards=device_num, shuffle=True)
ds = ds.batch(batch_size, drop_remainder=True)
else: # for val
ds = de.GeneratorDataset(yolo_dataset, column_names=["image", "image_shape", "img_id"],
num_parallel_workers=min(32, num_parallel_workers), shuffle=False)
ds = ds.batch(batch_size, drop_remainder=False)
ds = ds.repeat(1)
return ds
| 2.390625 | 2 |
official/cv/pwcnet/src/pwc_modules.py | mindspore-ai/models | 77 | 12767174 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore
import mindspore.nn as nn
import mindspore.ops as P
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True):
if isReLU:
return nn.SequentialCell(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2, has_bias=True, pad_mode="pad"),
nn.LeakyReLU(0.1)
)
return nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2, has_bias=True, pad_mode="pad")
def upsample2d_as(inputs, target_as):
_, _, h1, w1 = P.Shape()(target_as)
_, _, h2, _ = P.Shape()(inputs)
resize = (h1 + 0.0) / (h2 + 0.0)
return P.ResizeBilinear((h1, w1))(inputs) * resize
class FeatureExtractor(nn.Cell):
'''Feature extract network'''
def __init__(self, num_chs):
super(FeatureExtractor, self).__init__()
self.num_chs = num_chs
self.convs = nn.CellList()
for _, (ch_in, ch_out) in enumerate(zip(num_chs[:-1], num_chs[1:])):
layer = nn.SequentialCell(
conv(ch_in, ch_out, stride=2),
conv(ch_out, ch_out)
)
self.convs.append(layer)
def construct(self, x):
feature_pyramid = []
feature_pyramid_tmp = []
for _conv in self.convs:
x = _conv(x)
feature_pyramid_tmp.append(x)
feature_pyramid.append(feature_pyramid_tmp[5])
feature_pyramid.append(feature_pyramid_tmp[4])
feature_pyramid.append(feature_pyramid_tmp[3])
feature_pyramid.append(feature_pyramid_tmp[2])
feature_pyramid.append(feature_pyramid_tmp[1])
feature_pyramid.append(feature_pyramid_tmp[0])
return feature_pyramid
# Warping layer ---------------------------------
def get_grid(x):
batch_size, height, width, _ = P.Shape()(x)
tmp1 = nn.Range(batch_size)()
tmp2 = nn.Range(height)()
tmp3 = nn.Range(width)()
inputs = (tmp1, tmp2, tmp3)
Bg, Yg, Xg = P.Meshgrid(indexing='ij')(inputs)
return Bg, Yg, Xg
def nearest_warp(x, flow):
grid_b, grid_y, grid_x = get_grid(x)
flow = flow.astype("Int32")
warped_gy = P.Add()(grid_y, flow[:, :, :, 1])
warped_gx = P.Add()(grid_x, flow[:, :, :, 0])
_, h, w, _ = P.Shape()(x)
warped_gy = mindspore.ops.clip_by_value(warped_gy, 0, h-1)
warped_gx = mindspore.ops.clip_by_value(warped_gx, 0, w-1)
warped_indices = P.Stack(3)([grid_b, warped_gy, warped_gx])
warped_x = P.GatherNd()(x, warped_indices)
return warped_x
def bilinear_warp(x, flow):
_, h, w, _ = P.Shape()(x)
grid_b, grid_y, grid_x = get_grid(x)
grid_b = grid_b.astype("float32")
grid_y = grid_y.astype("float32")
grid_x = grid_x.astype("float32")
temp1 = P.Unstack(-1)(flow)
fx = temp1[0]
fy = temp1[1]
fx_0 = P.Floor()(fx)
fx_1 = fx_0+1
fy_0 = P.Floor()(fy)
fy_1 = fy_0+1
# warping indices
h_lim = h-1
w_lim = w-1
gy_0 = mindspore.ops.clip_by_value(grid_y + fy_0, 0., h_lim)
gy_1 = mindspore.ops.clip_by_value(grid_y + fy_1, 0., h_lim)
gx_0 = mindspore.ops.clip_by_value(grid_x + fx_0, 0., w_lim)
gx_1 = mindspore.ops.clip_by_value(grid_x + fx_1, 0., w_lim)
g_00 = P.Stack(3)([grid_b, gy_0, gx_0]).astype("Int32")
g_01 = P.Stack(3)([grid_b, gy_0, gx_1]).astype("Int32")
g_10 = P.Stack(3)([grid_b, gy_1, gx_0]).astype("Int32")
g_11 = P.Stack(3)([grid_b, gy_1, gx_1]).astype("Int32")
# gather contents
x_00 = P.GatherNd()(x, g_00)
x_01 = P.GatherNd()(x, g_01)
x_10 = P.GatherNd()(x, g_10)
x_11 = P.GatherNd()(x, g_11)
# coefficients
c_00 = P.ExpandDims()((fy_1 - fy) * (fx_1 - fx), 3)
c_01 = P.ExpandDims()((fy_1 - fy) * (fx - fx_0), 3)
c_10 = P.ExpandDims()((fy - fy_0) * (fx_1 - fx), 3)
c_11 = P.ExpandDims()((fy - fy_0) * (fx - fx_0), 3)
return c_00 * x_00 + c_01 * x_01 + c_10 * x_10 + c_11 * x_11
class WarpingLayer(nn.Cell):
'''define warplayer'''
def __init__(self, warp_type='nearest'):
super(WarpingLayer, self).__init__()
self.warp = warp_type
def construct(self, x, flow):
x = mindspore.ops.Transpose()(x, (0, 2, 3, 1))
flow = mindspore.ops.Transpose()(flow, (0, 2, 3, 1))
if self.warp == 'nearest':
x_warped = nearest_warp(x, flow)
else:
x_warped = bilinear_warp(x, flow)
x_warped = mindspore.ops.Transpose()(x_warped, (0, 3, 1, 2))
return x_warped
class OpticalFlowEstimator(nn.Cell):
'''define OpticalFlowEstimator'''
def __init__(self, ch_in):
super(OpticalFlowEstimator, self).__init__()
self.convs = nn.Sequential(
conv(ch_in, 128),
conv(128, 128),
conv(128, 96),
conv(96, 64),
conv(64, 32)
)
self.conv_last = conv(32, 2, isReLU=False)
def construct(self, x):
x_intm = self.convs(x)
return x_intm, self.conv_last(x_intm)
class FlowEstimatorDense(nn.Cell):
'''define FlowEstimator network'''
def __init__(self, ch_in):
super(FlowEstimatorDense, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(ch_in + 128, 128)
self.conv3 = conv(ch_in + 256, 96)
self.conv4 = conv(ch_in + 352, 64)
self.conv5 = conv(ch_in + 416, 32)
self.conv_last = conv(ch_in + 448, 2, isReLU=False)
self.concat = P.Concat(1)
def construct(self, x):
x1 = self.concat([self.conv1(x), x])
x2 = self.concat([self.conv2(x1), x1])
x3 = self.concat([self.conv3(x2), x2])
x4 = self.concat([self.conv4(x3), x3])
x5 = self.concat([self.conv5(x4), x4])
x_out = self.conv_last(x5)
return x5, x_out
class ContextNetwork(nn.Cell):
'''context network'''
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.SequentialCell(
conv(ch_in, 128, 3, 1, 1),
conv(128, 128, 3, 1, 2),
conv(128, 128, 3, 1, 4),
conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16),
conv(64, 32, 3, 1, 1),
conv(32, 2, isReLU=False)
)
def construct(self, x):
return self.convs(x)
| 2.03125 | 2 |
205_Isomorphic Strings/205_Aaron.py | ktapsyman/LeetCodez | 0 | 12767175 | <gh_stars>0
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t):
return False
# if it is a large {key: value} in dict, the list way is slower than dict way
# usedList = []
hashTable = {}
valueTable = {}
for i in range(len(s)):
if s[i] in hashTable:
if hashTable[s[i]] != t[i]:
return False
else:
continue
else:
# if t[i] in usedList:
if t[i] in valueTable:
return False
# usedList.append(t[i])
hashTable[s[i]] = t[i]
valueTable[t[i]] = s[i]
return True
if __name__ == "__main__":
s = 'egg'
t = 'add'
res = Solution()
print(res.isIsomorphic(s, t))
| 3.515625 | 4 |
src/garnn/utils.py | LeviBorodenko/garnn | 24 | 12767176 | <reponame>LeviBorodenko/garnn
# needed for RaggedTensorFeeder
import itertools
import random
from collections import namedtuple
import numpy as np
def pow_2_reduce(n: int, batch_size: int):
""" returns the largest integer of the form
2^m that is less than or equal to n.
If this integer is greater than the allowed batch_size,
then we simply return the batch_size
"""
max_power = int(2 ** np.floor(np.log(n) / np.log(2)))
return min(max_power, batch_size)
class RaggedTensorFeeder(object):
"""Given a ragged_data list of data points with
varying first dimension and constant inner dimensions and a
list of constant shape labels such that labels[i] corresponds
to ragged_data[i] -- RaggedTensorFeeder is able to generate batches
that of maximal size batch_size such that an tensorflow RNN
that only operates on non-ragged tensors can still learn.
[description]
Arguments:
ragged_data {list} -- ragged array of input data
labels {list} -- constant shape labels
Keyword Arguments:
batch_size {int} -- Maximal batch size (default: {32})
Raises:
ValueError -- [description]
"""
def __init__(self, ragged_data: list, labels: list, batch_size: int = 32):
super(RaggedTensorFeeder, self).__init__()
self.ragged_data = ragged_data
self.labels = labels
self.batch_size = batch_size
# label[i] must correspond to ragged_data[i]
if len(labels) != len(ragged_data):
raise ValueError("Must have same amount of data and labels.")
# zip data with their corresponding labels
self.data_set = list(zip(ragged_data, labels))
# Sorting-function that returns the number of timesteps of data_set[i]
self.sort_func = lambda e: e[0].shape[0]
def _get_distribution(self):
"""creates a dict with keys being time lengths and
values are how many signals of such length are in the ragged_data.
[description]
"""
distr = {}
self.groups = {}
for key, group in itertools.groupby(self.data_set, self.sort_func):
group = list(group)
distr[key] = len(group)
self.groups[key] = group
return distr
@property
def time_distr(self):
return self._get_distribution()
def _get_sample_info(self):
"""returns a dict of
time_seq_length : namedtuple[batch_size, poportion, group]
where batch_size is the largest permissible power of 2 greater
or equal to the number of sequences that have the same length.
Proportion tells us how many % of the total data have this length.
Group is the subset of data_set that has said length
[description]
"""
distr = {}
lengths = []
proportions = []
total = len(self.data_set)
Entry = namedtuple("Entry", ["batch_size", "proportion", "group"])
for length in self.time_distr:
num_entries = self.time_distr[length]
size = pow_2_reduce(num_entries, self.batch_size)
prop = num_entries / total
group = self.groups[length]
distr[length] = Entry(batch_size=size, proportion=prop, group=group)
proportions.append(prop)
lengths.append(length)
self.proportions = proportions
self.lengths = lengths
return distr
def sample(self):
"""samples one batch by choosing a length
with distr. given by the proportions in sample_info
and batch_size also given by sample_info.
[description]
"""
# Sort data_set according to the above function
self.data_set = sorted(self.data_set, key=self.sort_func)
self.sample_info = self._get_sample_info()
# sample random length
length = np.random.choice(a=self.lengths, p=self.proportions)
info = self.sample_info[length]
valid_data = info.group
batch_size = info.batch_size
sampled_data = random.sample(valid_data, batch_size)
batch_data = [elem[0] for elem in sampled_data]
batch_labels = [elem[1] for elem in sampled_data]
# remove sampled data from data set
data_set = []
for elem in self.data_set:
data_point, label = elem
for selected in batch_data:
match_found = False
if np.all(data_point == selected):
match_found = True
break
if not match_found:
data_set.append(elem)
self.data_set = data_set
return np.stack(batch_data), np.stack(batch_labels)
def gen_samples(self):
while len(self.data_set) > 0:
yield self.sample()
self.data_set = list(zip(self.ragged_data, self.labels))
| 3.140625 | 3 |
scripts/scrape_lections.py | sevazhidkov/ted-analysis | 1 | 12767177 | import time
import random
import json
import re
import requests
import lxml.html
RATINGS = ['Funny', 'Confusing', 'Unconvincing', 'Informative', 'Fascinating',
'Persuasive', 'Obnoxious', 'Courageous', 'Beautiful', 'Longwinded',
'Inspiring', 'Ingenious', 'Jaw-dropping', 'OK']
LECTION_TOPICS_REGEX = re.compile(r'<meta content="(.+)" name="keywords" />')
lections = []
def proccess_lection(lection_id):
global lections
try:
ted_url = 'http://www.ted.com/talks/{}'
response = requests.get(ted_url.format(lection_id))
assert response.status_code != 429
html = response.text
except Exception:
print('Trying again for', lection_id)
time.sleep(2 + random.randint(1, 10))
proccess_lection(lection_id)
return
lection_page = lxml.html.document_fromstring(html)
lection = {'id': lection_id, 'topics': [], 'ratings': {}}
# Получим рейтинг лекции по разным параметрам
for rating in RATINGS:
# Создадим регулярное выражение для поиска рейтинга
regex = re.compile(r'"name":"{}","count":(\d+)'.format(rating))
match = re.search(regex, lection_page.text_content())
if match is None:
print('No rating for', rating, 'in', lection_id)
return
rating_count = match.group(1)
lection['ratings'][rating] = int(rating_count)
# Получаем список тем лекции
match = re.search(LECTION_TOPICS_REGEX, html)
if match is None:
print('No topics for', lection_id)
return
topics_list = match.group(1)
for topic in topics_list.split(', '):
# Убираем ненужные категории
if topic not in ['TED', 'talks', 'TED Conference']:
lection['topics'].append(topic)
lections.append(lection)
threads = []
# Существует не более 2500 лекций
for i in range(1, 2500):
proccess_lection(i)
if i % 10 == 0:
print(i, 'lections done.')
print(len(lections))
result_file = open('data/lections.json', 'w')
result_file.write(json.dumps(lections))
result_file.close()
| 2.9375 | 3 |
angr/angr/procedures/linux_kernel/brk.py | Ruide/angr-dev | 1 | 12767178 | import angr
class brk(angr.SimProcedure):
"""
This implements the brk system call.
"""
IS_SYSCALL = True
#pylint:disable=arguments-differ
def run(self, new_brk):
return self.state.posix.set_brk(new_brk)
| 2.234375 | 2 |
smart_compose/api/models/payload.py | Jwuthri/SmartCompose | 0 | 12767179 | <filename>smart_compose/api/models/payload.py
from typing import List
from pydantic import BaseModel
class SentimentPayload(BaseModel):
text: str
def payload_to_list(payload: SentimentPayload) -> List:
return [payload.text]
| 2.09375 | 2 |
dht/value_stores/memory.py | leonkoens/dht | 1 | 12767180 | <filename>dht/value_stores/memory.py
import logging
class MemoryStore:
def __init__(self):
logging.info("Memory Value Store created")
| 2.15625 | 2 |
o3d/build/file_exists.py | rwatson/chromium-capsicum | 11 | 12767181 | <filename>o3d/build/file_exists.py
#!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import sys
sys.stdout.write(str(os.path.exists(sys.argv[1])))
sys.exit(0)
| 2.015625 | 2 |
Pygame/JonathanGame/analogclock1.py | youaresherlock/PythonPractice | 0 | 12767182 | import sys, random, math, pygame
from pygame import locals
from datetime import datetime, time, date
# main program begins
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Analog Clock")
font = pygame.font.Font(None, 36)
orange = 220, 180, 0
white = 255, 255, 255
yellow = 255, 255, 0
pink = 255, 100, 100
pos_x = 300
pos_y = 250
radius = 250
angle = 360
def print_text(font, x, y, text, color=white):
imgText = font.render(text, True, color)
screen.blit(imgText, (x, y))
def wrap_angle(angle):
return abs(angle % 360)
# repeating loop
while True:
for event in pygame.event.get():
if event.type == locals.QUIT:
sys.exit()
keys = pygame.key.get_pressed()
if keys[locals.K_ESCAPE]:
sys.exit()
screen.fill((0, 0, 100))
# draw one step around the circle
pygame.draw.circle(screen, white, (pos_x, pos_y), radius, 6)
# draw the clock numbers 1-12
for n in range(1, 13):
angle = math.radians(n * (360 / 12) - 90)
x = math.cos(angle) * (radius - 50) - 10
y = math.sin(angle) * (radius - 50) - 10
print_text(font, pos_x + x, pos_y + y, str(n))
# get the time of day
today = datetime.today()
hours = today.hour % 12
minutes = today.minute
seconds = today.second
# draw the hours hand
hour_angle = wrap_angle(hours * (360 / 12) - 90)
hour_angle = math.radians(hour_angle)
hour_x = math.cos(hour_angle) * (radius - 80)
hour_y = math.sin(hour_angle) * (radius - 80)
target = (pos_x + hour_x, pos_y + hour_y)
pygame.draw.line(screen, pink, (pos_x, pos_y), target, 25)
# draw the minutes hand
min_angle = wrap_angle(minutes * (360 / 60) - 90)
min_angle = math.radians(min_angle)
min_x = math.cos(min_angle) * (radius - 60)
min_y = math.sin(min_angle) * (radius - 60)
target = (pos_x + min_x, pos_y + min_y)
pygame.draw.line(screen, orange, (pos_x, pos_y), target, 12)
# draw the seconds hand
sec_angle = wrap_angle(seconds * (360 / 60) - 90)
sec_angle = math.radians(sec_angle)
sec_x = math.cos(sec_angle) * (radius - 40)
sec_y = math.sin(sec_angle) * (radius - 40)
target = (pos_x + sec_x, pos_y + sec_y)
pygame.draw.line(screen, yellow, (pos_x, pos_y), target, 6)
# cover the center
pygame.draw.circle(screen, white, (pos_x, pos_y), 20)
print_text(font, 0, 0, str(hours) + ":" + str(minutes) + ":" + str(seconds))
pygame.display.update()
| 3.71875 | 4 |
jp.atcoder/abc246/abc246_d/30649952.py | kagemeka/atcoder-submissions | 1 | 12767183 | def main() -> None:
n = int(input())
def f(a: int, b: int) -> int:
return a**3 + (a + b) * a * b + b**3
def binary_search(a: int) -> int:
lo = -1
hi = 1 << 20
while hi - lo > 1:
# print(lo, hi)
b = (lo + hi) // 2
if f(a, b) >= n:
hi = b
else:
lo = b
return hi
mn = 1 << 64
for a in range(1 << 20):
b = binary_search(a)
x = f(a, b)
mn = min(mn, x)
print(mn)
if __name__ == "__main__":
main()
| 3.828125 | 4 |
_test_projects/unittests/tests/test_index.py | oren0e/cob | 2 | 12767184 | import pytest
from _cob import mymodels
@pytest.fixture
def people():
people = []
for _ in range(10):
person = mymodels.Person(first_name="First", last_name="Last")
people.append(person)
mymodels.db.session.add(person)
mymodels.db.session.commit()
yield people
for person in people:
mymodels.db.session.delete(person)
mymodels.db.session.commit()
def test_person_model_and_view(webapp, people):
assert webapp.get("/index/list_models") == [{"id": person.id} for person in people]
| 2.34375 | 2 |
source/python/AxB.py | JoHyukJun/algorithm-analysis | 0 | 12767185 | '''
main.py
Created by <NAME> on 2020
Copyright © 2020 <NAME>. All rights reserved.
'''
import sys
a, b= map(int, sys.stdin.readline().rstrip().split(' '))
print(a * b) | 2.984375 | 3 |
ElderWand/ElderWand/WandTracker/trainingImageRecorder.py | alancheung/IoT-Elder-Wand | 0 | 12767186 | <gh_stars>0
from collections import deque
import numpy as np
import cv2
import imutils
import argparse
kernel = np.ones((5,5),np.uint8)
pts = deque(maxlen=64)
#parser = argparse.ArgumentParser(description = 'Record training images for Pi Wand Tracker')
#parser.add_argument('path', type='string', help='File path to save images to')
#parser.add_argument()
# initialize camera and size window size as 640px by 480px
camera = cv2.VideoCapture(0)
imgCount = 1
thresh = 40
# loop over the set of tracked points so that we can draw a line for human eyes.
def drawTrackingLine():
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, draw the connecting lines
cv2.line(frame_gray, pts[i - 1], pts[i], (255, 0, 0), thickness = 5)
while 1:
# read from camera
(grabbed, frame) = camera.read()
frame = imutils.rotate(frame, angle=180)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# TODO determine if this completely necessary
cv2.equalizeHist(frame_gray)
# resize image for faster processing.
#frame_gray = cv2.resize(frame_gray, (120, 120), interpolation = cv2.INTER_CUBIC)
# find the point by looking for pixels >threshold
th, frame_gray = cv2.threshold(frame_gray, thresh, 255, cv2.THRESH_BINARY)
# At least 1 pass is needed to create centroid for recognition
# This approach may not be needed if Hough circles are used.
frame_gray = cv2.dilate(frame_gray, kernel, iterations = 1)
# find contours in the mask
# countours meaning the binary area (aka in our case, the white dot).
cnts = cv2.findContours(frame_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# TODO maybe should change this to assume the cnt that is closest to center of frame is the most applicable instead of max.
# find the most applicable contour in the mask (in this case the largest), then use it to compute the minimum enclosing circle and if it matches a known spell.
mostLikelyWandTip = max(cnts, key=cv2.contourArea)
# find bounds of circle in order to show on screen. Not applicable in this sense
# ((x, y), radius) = cv2.minEnclosingCircle(c)
# Moments help find centers of points
# https://www.learnopencv.com/find-center-of-blob-centroid-using-opencv-cpp-python/
M = cv2.moments(mostLikelyWandTip)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# update the points queue
pts.appendleft(center)
# TODO control view port/this extra work by argument
drawTrackingLine()
# detect likely spell if the number of tracked points is >=50%
numPointsTracked = sum(1 for p in pts if p is not None)
if numPointsTracked == len(pts):
filePath = '/home/pi/Desktop/WandTraining/Shapes/HorizontalLine/' + 'shape2-' + str(imgCount) + '.png'
print(filePath)
cv2.imwrite(filePath, frame_gray)
imgCount+=1
# loop and show frame
cv2.imshow('raw', frame)
cv2.imshow('raw-grey', frame_gray)
keyPressed = cv2.waitKey(1) & 0xFF
if keyPressed == ord('q'):
break
elif keyPressed == ord('p'):
cv2.imwrite('home/pi/picameraalanTest.png', frame_gray)
imgCount+=1
# increase threshold if 't' is pressed, decrease for 'g'
elif keyPressed == ord('t'):
thresh = thresh + 10
print('Threshold:' + str(thresh))
elif keyPressed == ord('g'):
thresh = thresh - 10
print('Threshold:' + str(thresh))
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
| 2.53125 | 3 |
clean.py | fahbench/boost | 0 | 12767187 | <reponame>fahbench/boost<filename>clean.py
want = [
'filesystem',
'program_options',
'system',
]
import shutil
import os
for d in os.listdir('libs'):
if d in want:
continue
if os.path.isdir('libs/{}'.format(d)):
shutil.rmtree('libs/{}'.format(d))
else:
os.remove('libs/{}'.format(d))
| 2.171875 | 2 |
anella/tenor_client/tenor_pop.py | Fundacio-i2CAT/ai4.0-tenor | 0 | 12767188 | <filename>anella/tenor_client/tenor_pop.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""TeNOR PoP Class"""
"""Uses tenor ns-manager/routes/dc.rb """
import requests
import json
import ConfigParser
CONFIG = ConfigParser.RawConfigParser()
CONFIG.read('config.cfg')
DEFAULT_TENOR_URL = format('{0}:{1}'.format(
CONFIG.get('tenor', 'url'),
CONFIG.get('tenor', 'port')))
DEFAULT_VNFP_URL = format('{0}:{1}'.format(
CONFIG.get('tenor', 'url'),
CONFIG.get('tenor', 'vnfp_port')))
class TenorPoP(object):
"""Represents a TeNOR PoP"""
def __init__(self, pop_id=None, tenor_url=DEFAULT_TENOR_URL,
vnfp_url=DEFAULT_VNFP_URL):
self._tenor_url = tenor_url
self._vnfp_url = vnfp_url
self._pop_id = int(pop_id)
self._name = None
self._orch = None
def retrieve(self):
"""Gets the PoP Name"""
url = '{0}/pops/dc/{1}'.format(DEFAULT_TENOR_URL, self._pop_id)
try:
resp = requests.get(url)
except:
raise IOError('{0} instance unreachable'.format(DEFAULT_TENOR_URL))
try:
json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
pop = json.loads(resp.text)
self._name = pop['name']
self._orch = pop['orch']
return pop
def get_server_details(self):
"""Gets the server details"""
url = '{0}/pops/servers/{1}'.format(DEFAULT_TENOR_URL, self._pop_id)
try:
resp = requests.get(url)
except:
raise IOError('{0} PoP unreachable'.format(self._pop_id))
try:
servers = json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
return servers['servers']
def get_quota_details(self):
"""Gets the quotas on the PoP"""
url = '{0}/pops/quotas/{1}'.format(DEFAULT_TENOR_URL, self._pop_id)
try:
resp = requests.get(url)
except:
raise IOError('{0} PoP unreachable'.format(self._pop_id))
try:
quotas = json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
return quotas['quota_set']
def get_network_quota_details(self):
"""Gets the quotas on the PoP"""
url = '{0}/pops/network_quotas/{1}'.format(DEFAULT_TENOR_URL, self._pop_id)
try:
resp = requests.get(url)
except:
raise IOError('{0} PoP unreachable'.format(self._pop_id))
try:
quotas = json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
return quotas['quota']
def get_limits(self):
'''Get limits and quotas'''
url = '{0}/pops/limits/{1}'.format(DEFAULT_TENOR_URL, self._pop_id)
try:
resp = requests.get(url)
except:
raise IOError('{0} PoP unreachable'.format(self._pop_id))
try:
limits = json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
return limits['limits']
def get_network_details(self):
"""Gets networks information"""
url = '{0}/pops/networks/{1}'.format(DEFAULT_TENOR_URL, self._pop_id)
try:
resp = requests.get(url)
except:
raise IOError('{0} PoP unreachable'.format(self._pop_id))
try:
networks = json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
network_details = []
for network in networks["networks"]:
network_details.append({'name': network['name'],
'id': network['id'],
'router_external': network['router:external']})
return network_details
def get_floating_ip_details(self):
"""Gets used floating_ips"""
servers = self.get_server_details()
quota = self.get_quota_details()
floating_ips = int(quota['floating_ips'])
used_floating_ips = 0
for server in servers:
for key in server['addresses'].keys():
for address in server['addresses'][key]:
if address['OS-EXT-IPS:type'].upper() == 'FLOATING':
used_floating_ips = used_floating_ips+1
return {'quota': floating_ips, 'used': used_floating_ips, 'ratio': float(used_floating_ips)/float(floating_ips)}
def get_keypair_details(self):
"""Get used keypairs"""
url = '{0}/pops/keypairs/{1}'.format(DEFAULT_TENOR_URL, self._pop_id)
try:
resp = requests.get(url)
except:
raise IOError('{0} PoP unreachable'.format(self._pop_id))
try:
keypairs = json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
keypairs_used = len(keypairs['keypairs'])
keypairs_max = self.get_limits()['absolute']['maxTotalKeypairs']
return {'keypairs_quota': keypairs_max, 'keypairs_used': keypairs_used, 'keypairs_ratio': float(keypairs_used)/float(keypairs_max)}
def get_secutity_groups_details(self):
'''gets used and active security groups'''
sec_groups_max = self.get_limits()['absolute']['maxSecurityGroups']
sec_groups_used = self.get_limits()['absolute']['totalSecurityGroupsUsed']
return {'security_groups_quota': sec_groups_max, 'security_groups_used': sec_groups_used, 'security_groups_ratio': float(sec_groups_used)/float(sec_groups_max)}
def get_instance_details(self):
'''get number of used and quota of images'''
instances_max = self.get_limits()['absolute']['maxTotalInstances']
instances_used = self.get_limits()['absolute']['totalInstancesUsed']
return {'instances_quota': instances_max, 'instances_used': instances_used, 'instances_ratio': float(instances_used)/float(instances_max)}
def get_routers_details(self):
'''get used and active router'''
url = '{0}/pops/routers/{1}'.format(DEFAULT_TENOR_URL, self._pop_id)
try:
resp = requests.get(url)
except:
raise IOError('{0} PoP unreachable'.format(self._pop_id))
try:
routers = json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
routers_max = self.get_network_quota_details()['router']
routers_used = routers
return {'routers_quota': routers_max, 'routers_used': routers_used, 'routers_ratio': float(routers_used)/float(routers_max)}
def get_core_details(self):
"""Gets used and active cores"""
servers = self.get_server_details()
quota = self.get_quota_details()
cores = int(quota['cores'])
used_cores = 0
for server in servers:
if server['status'].upper() == 'ACTIVE':
used_cores = used_cores+int(server['flavor']['detail']['vcpus'])
return {'quota': cores, 'used': used_cores, 'ratio': float(used_cores)/float(cores)}
def get_ram_details(self):
"""Gets used ram active"""
servers = self.get_server_details()
quota = self.get_quota_details()
ram = int(quota['ram'])
used_ram = 0
for server in servers:
if server['status'].upper() == 'ACTIVE':
used_ram = used_ram+int(server['flavor']['detail']['ram'])
return {'quota': ram, 'used': used_ram, 'ratio': float(used_ram)/float(ram),
'units': 'MB'}
def get_flavor_details(self):
"""Gets flavor details"""
url = '{0}/pops/flavours/{1}'.format(DEFAULT_TENOR_URL, self._pop_id)
try:
resp = requests.get(url)
except:
raise IOError('{0} PoP unreachable'.format(self._pop_id))
try:
flavors = json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
flavor_details = []
for flavor in flavors["flavors"]:
flavor_details.append({'name': flavor['name'],
'ram': flavor['ram'],
'disk': flavor['disk'],
'vcpus': flavor['vcpus']})
return flavor_details
def get_cachedimgs(self, vm_image):
self.retrieve()
body = {'vm_image': vm_image, 'vim_url': self._orch}
url = '{0}/vnf-provisioning/cachedimg'.format(self._vnfp_url)
resp = requests.post(url,
headers={'Content-Type': 'application/json'},
json=body)
if resp.status_code == 404:
return []
try:
rits = json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
for rit in rits:
rit['pop_id'] = self._pop_id
return rits
@staticmethod
def get_pop_ids():
"""Gets the list of PoP registered"""
url = '{0}/pops/dc'.format(DEFAULT_TENOR_URL)
try:
resp = requests.get(url)
except:
raise IOError('{0} instance unreachable'.format(DEFAULT_TENOR_URL))
try:
json.loads(resp.text)
except:
raise ValueError('Decoding PoP response json response failed')
ids = []
for pop in json.loads(resp.text):
ids.append(int(pop['id']))
return ids
if __name__ == "__main__":
POP = TenorPoP()
IDS = TenorPoP().get_pop_ids()
POPI = TenorPoP(1)
print POPI.get_flavor_details()
| 2.703125 | 3 |
Topology/mars_topology_launcher/src/mars_topology_launcher/TopologyParser.py | ramp-eu/Motion_Task_Planner | 1 | 12767189 | #!/usr/bin/env python
"""
Fraunhofer IML
Department Automation and Embedded Systems
Tabsize : 4
Charset : UTF-8
"""
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from abc import ABCMeta, abstractmethod
import xml.etree.ElementTree
from MARSVertex import MARSVertex, MARSFootprintType, DEFAULT_FOOTPRINT_RESOLUTION
from MARSEdge import MARSEdge
from mars_common.Id import Id, IdType
import rospy
class TopologyParser():
"""Abstract class for parsing topologies.
Abstract class for parsing topologies.
Attributes:
__id: Current id for the topology entity that will be created.
After creating an entity the id must be increased.
_mars_vertices: Contains all mars vertices which has to be started
in a dictionary.
key = id of the vertex, value = MARSVertex
_mars_edges: Contains all mars edges which has to be started
in a dictionary.
key = id of the edge, value = MARSEdge
"""
__metaclass__ = ABCMeta
def __init__(self):
self.__id = 0
self._mars_vertices = dict()
self._mars_edges = dict()
@abstractmethod
def parse_file(self, file_path):
"""Reads a topology file an creates mars edge and vertex entities.
Reads a topology file an creates mars edge and vertex entities.
This entities can be used to start topology nodes (edges and vertices).
Args:
file_path: path to the file on the system as a string.
Returns:
Returns "True" if the file was successfully opened and parsed!
Raises:
"""
pass
def get_mars_topology_vertices(self):
"""Return all MARSVertex objects.
Returns all MARSVertex objects in a dictionary.
Args:
Returns:
Returns a dictionary with all MARSVertex objects.
Raises:
"""
return self._mars_vertices
def get_mars_topology_edges(self):
"""Return all MARSEdge objects.
Returns all MARSEdge objects in a dictionary.
Args:
Returns:
Returns a dictionary with all MARSEdge objects.
Raises:
"""
return self._mars_edges
def _create_mars_vertex(
self, vertex_name, x_position, y_position, footprint_type, footprint_radius,
footprint_resolution=DEFAULT_FOOTPRINT_RESOLUTION, uuid=None,
uuid_type=IdType.ID_TYPE_STRING_UUID,
footprint_x=None, footprint_y=None):
"""Creates an object of type MARSVertex.
Creates an object of type MARSVertex and sets an unique id!
Args:
vertex_name: Name of the vertex.
x_position: X-Position of the vertex.
y_position: Y-Position of the vertex.
uuid: A string based uuid or name.
uuid_tpye: Type of the given uuid.
Returns:
Return the created MARSVertex object.
Raises:
"""
if uuid is not None:
mars_vertex = MARSVertex(Id(uuid, uuid_type, description=vertex_name))
else:
mars_vertex = MARSVertex(
Id(self.__id, IdType.ID_TYPE_STRING_NAME, description=vertex_name))
mars_vertex.set_name(vertex_name)
mars_vertex.set_position(x_position, y_position)
if footprint_x and footprint_y:
mars_vertex.add_footprint(footprint_x, footprint_y)
else:
if (footprint_type == MARSFootprintType.MARS_FOOTPRINT_TYPE_SQUARE):
mars_vertex.calc_square_footprint(footprint_radius)
elif (footprint_type == MARSFootprintType.MARS_FOOTPRINT_TYPE_CIRCLE):
mars_vertex.calc_circle_footprint(footprint_radius, footprint_resolution)
else:
rospy.logwarn(
"[TopologyParser][_create_mars_vertex] Unknown footprint type for creating footprint was given. Continue with calculating circle footprint.")
mars_vertex.calc_circle_footprint(footprint_radius, footprint_resolution)
self.__id = self.__id + 1
return mars_vertex
def _create_mars_edge(self, edge_name, length, uuid=None,
footprint_x=None, footprint_y=None):
"""Creates an object of type MARSVertex.
Creates an object of type MARSVertex and sets an unique id!
Args:
edge_name: Name of the edge.
length: Length of the edge in meter (float).
max_velocity: Maximum allowed velocity on the edge on m/s (float).
uuid: A string based uuid (optional)
Returns:
Return the created MARSVertex object.
Raises:
"""
if uuid is not None:
mars_edge = MARSEdge(
Id(uuid, IdType.ID_TYPE_STRING_UUID), length=length)
else:
mars_edge = MARSEdge(
Id(self.__id, IdType.ID_TYPE_STRING_NAME), length=length)
mars_edge.set_name(edge_name)
if footprint_x and footprint_y:
mars_edge.add_footprint(footprint_x, footprint_y)
self.__id = self.__id + 1
return mars_edge
def print_parsed_topology_ros_debug(self):
self.__print_entity(self._mars_vertices)
self.__print_entity(self._mars_edges)
def __print_entity(self, entity_collection):
for entity_name in entity_collection.values():
rospy.logdebug(str(entity_name))
| 2.8125 | 3 |
tests/resources/test_arpc.py | andreshndz/cuenca-python | 6 | 12767190 | <gh_stars>1-10
import pytest
from cuenca.resources import Arpc
@pytest.mark.vcr
def test_arpc():
arpc_req = dict(
number='1234567890123403',
arqc='DB3C77D5469C53C6',
arpc_method='1',
transaction_data='somerandomtransactiondata',
response_code='0010',
pan_sequence='01',
unique_number='42D6A016',
transaction_counter='001D',
track_data_method='terminal',
)
arpc = Arpc.create(**arpc_req)
assert arpc.is_valid_arqc
| 2.28125 | 2 |
jacdac/dot_matrix/client.py | microsoft/jacdac-python | 1 | 12767191 | <reponame>microsoft/jacdac-python
# Autogenerated file. Do not edit.
from jacdac.bus import Bus, Client
from .constants import *
from typing import Optional
class DotMatrixClient(Client):
"""
A rectangular dot matrix display, made of monochrome LEDs or Braille pins.
Implements a client for the `Dot Matrix <https://microsoft.github.io/jacdac-docs/services/dotmatrix>`_ service.
"""
def __init__(self, bus: Bus, role: str) -> None:
super().__init__(bus, JD_SERVICE_CLASS_DOT_MATRIX, JD_DOT_MATRIX_PACK_FORMATS, role)
@property
def dots(self) -> Optional[bytes]:
"""
The state of the screen where dot on/off state is
stored as a bit, column by column. The column should be byte aligned.
For example, if the display has no more than 8 rows in each column, then each byte contains bits corresponding
to a single column. Least-significant bit is on top.
If display has 10 rows, then each column is represented by two bytes.
The top-most 8 rows sit in the first byte (with the least significant bit being on top),
and the remainign 2 row sit in the second byte.
The following C expression can be used to check if a given `column, row` coordinate is set:
`dots[column * column_size + (row >> 3)] & (1 << (row & 7))`, where
`column_size` is `(number_of_rows + 7) >> 3` (note that if number of rows is 8 or less then `column_size` is `1`),
and `dots` is of `uint8_t*` type.
The size of this register is `number_of_columns * column_size` bytes.,
"""
return self.register(JD_DOT_MATRIX_REG_DOTS).value()
@dots.setter
def dots(self, value: bytes) -> None:
self.register(JD_DOT_MATRIX_REG_DOTS).set_values(value)
@property
def brightness(self) -> Optional[float]:
"""
(Optional) Reads the general brightness of the display, brightness for LEDs. `0` when the screen is off., _: /
"""
return self.register(JD_DOT_MATRIX_REG_BRIGHTNESS).float_value(100)
@brightness.setter
def brightness(self, value: float) -> None:
self.register(JD_DOT_MATRIX_REG_BRIGHTNESS).set_values(value / 100)
@property
def rows(self) -> Optional[int]:
"""
Number of rows on the screen, _: #
"""
return self.register(JD_DOT_MATRIX_REG_ROWS).value()
@property
def columns(self) -> Optional[int]:
"""
Number of columns on the screen, _: #
"""
return self.register(JD_DOT_MATRIX_REG_COLUMNS).value()
@property
def variant(self) -> Optional[DotMatrixVariant]:
"""
(Optional) Describes the type of matrix used.,
"""
return self.register(JD_DOT_MATRIX_REG_VARIANT).value()
| 3.421875 | 3 |
aiohttp_json_rpc/decorators.py | ZhukovGreen/aiohttp-json-rpc | 0 | 12767192 | def raw_response(function=None):
def decorator(function):
function.raw_response = True
return function
if function:
return decorator(function)
return decorator
| 2.59375 | 3 |
pyjamaparty/strutils/string_builder.py | krajasek/pyjama | 0 | 12767193 |
class StringBuilder(object):
def __init__(self, strr=''):
self.str_list = [s for s in strr]
def __getitem__(self, item):
return ''.join(self.str_list[item])
def __setitem__(self, key, value):
self.str_list[key] = value
def __repr__(self):
return ''.join(self.str_list)
def __len__(self):
return len(self.str_list)
def __iter__(self):
for s in self.str_list:
yield s
def __add__(self, other):
for s in other:
self.str_list.append(s)
return self
def append(self, other):
return self.__add__(other)
def __str__(self):
return self.__repr__()
def __delitem__(self, key):
self.str_list.pop(key)
def remove(self, key):
self.__delitem__(key)
def to_string(self):
return self.__str__()
| 3.515625 | 4 |
inboxmanager.py | zanedma/haro-listener | 1 | 12767194 | <reponame>zanedma/haro-listener<gh_stars>1-10
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from utils import getTime, exitWithError
import colors
class InboxManager:
"""Handles all direct inbox management such as creating/retrieving needed
label and filter ids as well as updating/changing message metadata.
@param service: connected Gmail service
@param user_id: user_id to use for requests
"""
def __init__(self, service, user_id):
self.service = service
self.user_id = user_id
self.needed_labels = {
'unprocessed': 'haro_unprocessed',
'processed': 'haro_processed'
}
self.haro_email = '<EMAIL>'
def initInbox(self):
"""Initialize the inbox and return the label and filter ids
@return Dictionary with label ids and filter id
"""
print('{} Initializing setup...'.format(getTime()))
print('{} Initializing Labels...'.format(getTime()))
labels = self.initLabels()
print('{} Initializing Filter...'.format(getTime()))
filter_id = self.initFilter(labels['unprocessed'])
colors.printGreen('{} Setup complete!'.format(getTime()))
return {'labels': labels, 'filter': filter_id}
def initLabels(self):
"""Check if the inbox has the necessary labels, if so return the corresponding label ids.
If not, create the labels, and return the created ids.
@return dictionary with keys as label names and values as label ids
"""
try:
response = self.service.users().labels().list(userId=self.user_id).execute()
except Exception as error:
exitWithError(error)
labels = response['labels']
labels_dict = {
'unprocessed': None,
'processed': None
}
for label in labels:
if label['name'] == self.needed_labels['unprocessed']:
labels_dict['unprocessed'] = label['id']
elif label['name'] == self.needed_labels['processed']:
labels_dict['processed'] = label['id']
if labels_dict['unprocessed'] is None:
unprocessed_id = self._createLabel(self.needed_labels['unprocessed'])
labels_dict['unprocessed'] = unprocessed_id
if labels_dict['processed'] is None:
processed_id = self._createLabel(self.needed_labels['processed'])
labels_dict['processed'] = processed_id
return labels_dict
def initFilter(self, label_id):
"""Check if the inbox has a filter that sends emails from haro_email to the corresponding
label. If the filter is not set up, create one or modify an existing one.
@return filter id for corresponding filter
"""
try:
response = self.service.users().settings().filters().list(userId=self.user_id).execute()
except Exception as error:
exitWithError(error)
filters = response['filter']
for filter_obj in filters:
if filter_obj['criteria']['from'] == self.haro_email:
if not label_id in filter_obj['action']['addLabelIds']:
filter_obj['action']['addLabelIds'].append(label_id)
return filter_obj['id']
return self._createFilter(label_id)
def _createLabel(self, label_name):
"""Create a label with the name corresponding to label_name.
@param label_name name for the new label
@return id of newly created label
"""
label_obj = {
'messageListVisibility': 'show',
'name': label_name,
'labelListVisibility': 'labelShow'
}
try:
created_label = self.service.users().labels().create(userId=self.user_id, body=label_obj).execute()
except Exception as error:
exitWithError(error)
print('{} Created label with name {}, and id {}'.format(getTime(), created_label['name'], created_label['id']))
return created_label['id']
def _createFilter(self, label_id):
"""Create a filter that filters emails from haro_email into label corresponding to
label_id.
@param label_id id of label to filter messages into
@return id of newly created filter
"""
filter_obj = {
'id': 'haro_filter',
'criteria': {
'from': self.haro_email,
},
'action': {
'addLabelIds': label_id
}
}
try:
created_filter = self.service.users().settings().filters().create(userId=self.user_id, body=filter_obj).execute()
except Exception as error:
exitWithError(error)
return created_filter['id']
def markProcessed(self, msg_id, label_ids):
label_updates = {
'addLabelIds': [
label_ids['processed']
],
'removeLabelIds': [
label_ids['unprocessed'],
['INBOX']
]
}
try:
response = self.service.users().messages().modify(userId=self.user_id, id=msg_id, body=label_updates).execute()
except Exception as error:
colors.printFail('{} Unable to mark message with id {} as processed: {}'.format(getTime(), msg_id, error))
return None
colors.printGreen('{} Marked message with id {} as processed'.format(getTime(), msg_id))
return response
| 2.265625 | 2 |
testing/collectdtesting/fake_backend.py | signalfx/collectd-common | 0 | 12767195 | <filename>testing/collectdtesting/fake_backend.py
"""
Logic pertaining to faking out the ingest server that collectd sends datapoints
to.
"""
from functools import partial as p
from contextlib import contextmanager
from io import BytesIO
import os
import string
import tempfile
from signalfx.generated_protocol_buffers \
import signal_fx_protocol_buffers_pb2 as sf_pbuf
import requests
from .assertions import wait_for
from .containers import container_ip, get_docker_client, run_container, is_container_port_open
INGEST_DOCKERFILE = """
FROM python:3.6
WORKDIR /opt/lib
RUN pip install 'signalfx>=1.0' 'docker>=3.0.0'
ENTRYPOINT [ "python", "-u", "-c", "from collectdtesting import fake_ingest; fake_ingest.run_fake_ingest()" ]
"""
@contextmanager
def run_ingest():
"""
Starts up a new fake ingest that will run on a random port. The returned
object will have properties on it for datapoints and events. The fake
server will be stopped once the context manager block is exited.
This is actually implemented by running the run_fake_ingest function in the
fake_ingest.py module in a separate docker container so that this will work
transparently when this function is executed on a Mac, since it is very
difficult/hacky to refer back to the Mac host from a Docker container
(which is where collectd/metricproxy run). The following is moderately
less hacky and more reliable.
"""
test_package_dir = os.path.dirname(__file__)
dockerfile = INGEST_DOCKERFILE.format(test_package=test_package_dir)
client = get_docker_client()
test_code_image, _ = client.images.build(
fileobj=BytesIO(dockerfile.encode("utf-8")),
rm=True, forcerm=True)
with run_container(test_code_image.id,
[(test_package_dir, "/opt/lib/collectdtesting")],
ports={"8080/tcp": None}) as ingest_cont:
local_port = ingest_cont.attrs["NetworkSettings"]["Ports"]["8080/tcp"][0]["HostPort"]
assert wait_for(p(is_container_port_open, ingest_cont, 8080)), "fake ingest didn't start"
class FakeBackend:
"""
Encapsulates all of the things that users of this service need to know
"""
host = container_ip(ingest_cont)
port = 8080
url = "http://%s:%d" % (host, port)
local_url = "http://127.0.0.1:%s" % (local_port,)
@property
def datapoints(self):
"""
Fetch the datapoints from the fake ingest and deserialize them
"""
resp = requests.get(self.local_url + "/datapoints")
dp_message = sf_pbuf.DataPointUploadMessage()
dp_message.ParseFromString(resp.content)
return dp_message.datapoints
@property
def events(self):
"""
Fetch the events from the fake ingest and deserialize them
"""
resp = requests.get(self.local_url + "/events")
event_message = sf_pbuf.EventUploadMessage()
event_message.ParseFromString(resp.content)
return event_message.events
yield FakeBackend()
METRICPROXY_CONFIG = string.Template("""
{
"ForwardTo": [
{
"DefaultAuthToken": "<PASSWORD>",
"Name": "forwarder-to-fake-ingest",
"type": "signalfx",
"URL": "${ingest_url}/v2/datapoint",
"EventURL": "${ingest_url}/v2/event"
}
],
"ListenFrom": [
{
"ListenAddr": "0.0.0.0:18080",
"Type": "collectd"
}
],
"LogDir": "-"
}
""")
@contextmanager
def run_metric_proxy(ingest_url):
"""
Run metricproxy to get the output from collectd and convert it to SignalFx
datapoints. Metrixproxy will then forward the datapoints to the fake
ingest.
See https://github.com/signalfx/metricproxy for config details.
"""
with tempfile.NamedTemporaryFile(dir="/tmp") as conf_file:
conf_file.write(METRICPROXY_CONFIG.substitute(ingest_url=ingest_url).encode('utf-8'))
conf_file.flush()
with run_container("quay.io/signalfx/metricproxy:v0.10.5",
[(conf_file.name, "/var/config/sfproxy/sfdbproxy.conf")]) \
as mp_cont:
assert wait_for(p(is_container_port_open, mp_cont, 18080)), \
"metricproxy didn't start"
yield "http://%s:18080/post-collectd" % (container_ip(mp_cont),)
@contextmanager
def run_all():
"""
Runs the fake ingest server and metric proxy wired up to it so that
collectd can post to metric proxy but we can assert against the final
datapoints that would be seen by ingest
"""
with run_ingest() as fake_ingest:
with run_metric_proxy(fake_ingest.url) as mp_url:
yield fake_ingest, mp_url
| 2.46875 | 2 |
schema_ephys.py | yueqiw/ephys_analysis | 7 | 12767196 | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from collections import OrderedDict
import gc
from current_clamp import *
from current_clamp_features import extract_istep_features
from visualization.feature_annotations import feature_name_dict
from read_metadata import *
from file_io import load_current_step
# from pymysql import IntegrityError
import datajoint as dj
schema = dj.schema('yueqi_ephys', locals())
FIG_DIR = 'analysis_current_clamp/figures_plot_recording'
'''
class DjImportedFromDirectory(dj.Imported):
# Subclass of Imported. Initialize with data directory.
def __init__(self, directory=''):
self.directory = directory
super().__init__()
'''
@schema
class EphysExperimentsForAnalysis(dj.Manual):
definition = """
# Ephys experiments (excel files) for analysis
experiment: varchar(128) # excel files to use for analysis
---
project: varchar(128) # which project the data belongs to
use: enum('Yes', 'No') # whether to use this experiment
directory: varchar(256) # the parent project directory
"""
def insert_experiment(self, excel_file):
'''
Insert new sample ephys metadata from excel to datajoint tables
'''
entry_list = pd.read_excel(excel_file)[['experiment', 'project', 'use', 'directory']].dropna(how='any')
entry_list = entry_list.to_dict('records')
no_insert = True
for entry in entry_list:
if entry['use'] == 'No':
continue
self.insert1(row=entry, skip_duplicates=True)
no_insert = False
#print("Inserted: " + str(entry))
if no_insert:
print("No new entry inserted.")
return
@schema
class Animals(dj.Imported):
definition = """
# Sample metadata
-> EphysExperimentsForAnalysis
---
id: varchar(128) # organod ID (use date, but need better naming)
strain : varchar(128) # genetic strain
dob = null: date # date of birth
date = null: date # recording date
age = null: smallint # nunmber of days (date - dob)
slicetype: varchar(128) # what kind of slice prep
external: varchar(128) # external solution
internal: varchar(128) # internal solution
animal_comment = '': varchar(256) # general comments
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
animal_info, _ = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
key['id'] = animal_info['id']
key['strain'] = animal_info['strain']
if not pd.isnull(animal_info['DOB']): key['dob'] = animal_info['DOB']
if not pd.isnull(animal_info['age']): key['age'] = animal_info['age']
key['date'] = animal_info['date']
key['slicetype'] = animal_info['type']
key['external'] = animal_info['external']
key['internal'] = animal_info['internal']
if not pd.isnull(animal_info['comment']): key['animal_comment'] = animal_info['comment']
self.insert1(row=key)
return
@schema
class PatchCells(dj.Imported):
definition = """
# Patch clamp metadata for each cell
-> EphysExperimentsForAnalysis
cell: varchar(128) # cell id
---
rp = null: float # pipette resistance
cm_est = null: float # estimated Cm
ra_est = null: float # estimated Ra right after whole-cell mode
rm_est = null: float # estimated Rm
v_rest = null: float # resting membrane potential
fluor = '': varchar(128) # fluorescent label
fill = 'no': enum('yes', 'no', 'unknown', 'out') # wether the cell is biocytin filled. Out -- cell came out with pipette.
cell_external = '': varchar(128) # external if different from sample metadata
cell_internal = '': varchar(128) # internal if different from sample metadata
depth = '': varchar(128) # microns beneath slice surface
location = '': varchar(128) # spatial location
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
_, metadata = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
if 'params' in metadata.columns:
old_file = True
cell_info = parse_cell_info_2017_vertical(metadata)
else:
old_file = False
cell_info = parse_cell_info_2017(metadata)
for i, row in cell_info.iterrows():
newkey = {}
newkey['experiment'] = key['experiment']
newkey['cell'] = row['cell']
if not pd.isnull(row['Rp']): newkey['rp'] = row['Rp']
if not pd.isnull(row['Cm']): newkey['cm_est'] = row['Cm']
if not pd.isnull(row['Ra']): newkey['ra_est'] = row['Ra']
if not pd.isnull(row['Vrest']): newkey['v_rest'] = row['Vrest']
if not pd.isnull(row['depth']): newkey['depth'] = row['depth']
if not old_file:
if not pd.isnull(row['fluor']): newkey['fluor'] = row['fluor']
if not pd.isnull(row['Rm']): newkey['rm_est'] = row['Rm']
if not pd.isnull(row['external']): newkey['cell_external'] = row['external']
if not pd.isnull(row['internal']): newkey['cell_internal'] = row['internal']
if not pd.isnull(row['location']): newkey['location'] = row['location']
if not pd.isnull(row['fill']):
if row['fill'].lower() in ['yes', 'no', 'unknown', 'out']:
newkey['fill'] = row['fill'].lower()
else:
print('"fill" must be yes/no/unknown/out. ')
#print(newkey)
self.insert1(row=newkey)
return
@schema
class EphysRecordings(dj.Imported):
definition = """
# Patch clamp metadata for each recording file
-> EphysExperimentsForAnalysis
cell: varchar(128) # cell id
recording: varchar(128) # recording file name
---
clamp = null : enum('v', 'i') # voltage or current clamp
protocol = '' : varchar(128) # protocols such as gapfree, istep, etc
hold = null : smallint # holding current or voltage
ra_pre = null : smallint # estimated Ra before protocol
ra_post = null : smallint # estimated Ra after protocol
compensate = '' : varchar(128) # percentage of Ra compensation
gain = null : smallint # amplifier gain
filter = null : smallint # filter in kHz
start = null : smallint # current step starting current
step = null : smallint # step size of current injection
stim_strength = '' : varchar(128) # electrical/optical stimulation strength
stim_duration = null : smallint # duration of each stim pulse
stim_interval = null : smallint # interval between two consecutive pulses
response = '' : varchar(256) # what kind of reponse was observed
comment = '' : varchar(256) # general comments
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
_, metadata = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
patch_info = parse_patch_info_2017(metadata)
for i, row in patch_info.iterrows():
newkey = {}
newkey['experiment'] = key['experiment']
newkey['cell'] = row['cell']
newkey['recording'] = row['file']
if not pd.isnull(row['clamp']): newkey['clamp'] = row['clamp'].lower()
if not pd.isnull(row['protocol']): newkey['protocol'] = row['protocol']
if not pd.isnull(row['hold']): newkey['hold'] = row['hold']
if not pd.isnull(row['Ra-pre']):
if type(row['Ra-pre']) is str:
newkey['ra_pre'] = 100
else:
newkey['ra_pre'] = row['Ra-pre']
if not pd.isnull(row['Ra-post']):
if type(row['Ra-post']) is str:
newkey['ra_post'] = 100
else:
newkey['ra_post'] = row['Ra-post']
if not pd.isnull(row.get('compensate')): newkey['compensate'] = row['compensate']
if not pd.isnull(row['gain']): newkey['gain'] = row['gain']
if not pd.isnull(row['filter']): newkey['filter'] = row['filter']
if not pd.isnull(row.get('start')): newkey['start'] = row['start']
if not pd.isnull(row.get('step')): newkey['step'] = row['step']
if not pd.isnull(row.get('stim strength')): newkey['stim_strength'] = row['stim strength']
if not pd.isnull(row.get('stim duration')): newkey['stim_duration'] = row['stim duration']
if not pd.isnull(row.get('stim interval')): newkey['stim_interval'] = row['stim interval']
if not pd.isnull(row['response']): newkey['response'] = row['response']
if not pd.isnull(row.get('comment')): newkey['comment'] = row['comment']
self.insert1(row=newkey)
return
#TODO write a CurrentStepRecordings class and let APandIntrinsicProperties depend on it.
# currently APandIntrinsicProperties points to each experiment rather than each recording.
@schema
class CurrentStepTimeParams(dj.Manual):
definition = """
# Time window parameters for current injection (account for different protocol settings)
-> EphysExperimentsForAnalysis
---
istep_start: float # current injection starting time (s)
istep_end_1s: float # time after 1st second (s) -- use the 1st second for analysis
istep_end: float # current injection actual ending time (s)
istep_duration: float # current injection duration (s)
"""
def insert_params(self, excel_file):
'''
Insert paramters for current injection
'''
experiments = EphysExperimentsForAnalysis().fetch('experiment')
entry_list = pd.read_excel(excel_file)[['experiment', 'istep_start', 'istep_duration']]
entry_list = entry_list.dropna(how='any').to_dict('records')
no_insert = True
for entry in entry_list:
if not entry['experiment'] in experiments:
continue
entry['istep_end_1s'] = entry['istep_start'] + 1
entry['istep_end'] = entry['istep_start'] + entry['istep_duration']
if entry['istep_end'] < entry['istep_end_1s']:
entry['istep_end_1s'] = entry['istep_end']
self.insert1(row=entry, skip_duplicates=True)
no_insert = False
#print("Inserted: " + str(entry))
if no_insert:
print("No new entry inserted.")
return
@schema
class FeatureExtractionParams(dj.Lookup):
definition = """
# Parameters for AllenSDK action potential detection algorithm
params_id : int # unique id for parameter set
---
filter = 10 : float # cutoff frequency for 4-pole low-pass Bessel filter in kHz
dv_cutoff = 4 : float # minimum dV/dt to qualify as a spike in V/s (optional, default 20)
max_interval = 0.02 : float # maximum acceptable time between start of spike and time of peak in sec (optional, default 0.005)
min_height = 10 : float # minimum acceptable height from threshold to peak in mV (optional, default 2)
min_peak = -20 : float # minimum acceptable absolute peak level in mV (optional, default -30)
thresh_frac = 0.05 : float # fraction of average upstroke for threshold calculation (optional, default 0.05)
baseline_interval = 0.1 : float # interval length for baseline voltage calculation (before start if start is defined, default 0.1)
baseline_detect_thresh = 0.3 : float # dV/dt threshold for evaluating flatness of baseline region (optional, default 0.3)
subthresh_min_amp = -80 : float # minimum subthreshold current, not related to spike detection.
n_subthres_sweeps = 4 : smallint # number of hyperpolarizing sweeps for calculating Rin and Tau.
sag_target = -100 : float # Use the sweep with peak Vm closest to this number to calculate Sag.
sag_range_right = -89 : float # the range [left, right] of peak Vm to be considered for sag calculation
sag_range_left = -120 : float # the range [left, right] of peak Vm to be considered for sag calculation
adapt_avg_n_sweeps = 3 : smallint # Use the first n sweeps with >=3 isi's to calculate average adaptation ratio.
adapt_first_n_ratios = 2 : smallint # For each sweep, only average the first n adaptation ratios. If None, average all ratios.
spike_detection_delay = 0.001 : float # start detecting spikes at (start + delay) to skip the initial voltage jump.
suprathreshold_target_delta_v = 15 : float # the amount of current injection at rheobase + I to achive Vm increase by delta_v.
suprathreshold_target_delta_i = 15 : float # evaluate some spike train properties at rheobase + I
latency_target_delta_i = 5 : float # evaluate latency at rheobase + I
"""
@schema
class APandIntrinsicProperties(dj.Imported):
definition = """
# Action potential and intrinsic properties from current injections
-> EphysExperimentsForAnalysis
-> FeatureExtractionParams
-> CurrentStepTimeParams
cell: varchar(128) # cell id
recording: varchar(128) # recording file name
---
has_ap : enum('Yes', 'No') # Yes/No
v_baseline = null : float # mV
bias_current = null : float # pA
tau = null : float #
capacitance = null : float # pF
input_resistance = null : float # MOhm
f_i_curve_slope = null : float # no unit
max_firing_rate = null : float # Hz
sag = null : float # no unit
vm_for_sag = null : longblob # mV
indices_for_sag = null : longblob # no unit
sag_sweep_indices = null : longblob # no unit
ap_threshold = null : float # mV
ap_width = null : float # half height width (peak to trough), ms
ap_height = null : float # peak to trough, mV
ap_peak = null : float # mV
ap_trough = null : float # mV
ap_trough_to_threshold = null : float # AHP amplitude, mV, https://neuroelectro.org/ephys_prop/16/
ap_trough_4w_to_threshold = null : float # fast AHP amplitude at peak + 4 * width, mV
ap_trough_5w_to_threshold = null : float # fast AHP amplitude at peak + 5 * width, mV
ap_peak_to_threshold = null : float # spike amplitude, mV, https://neuroelectro.org/ephys_prop/5/
ap_upstroke = null : float # mV/ms
ap_downstroke = null : float # -mV/ms, positive
ap_updownstroke_ratio = null : float # no unit
ap_trough = null : float # trough within 100 ms from peak, mV
ap_fast_trough = null : float # fast trough defined in allensdk, mV
ap_slow_trough = null : float # slow trough defined in allensdk, mV
ap_adp = null : float # mV
ap_trough_3w = null : float # fast trough at peak + 3 * width, mV
ap_trough_4w = null : float # fast trough at peak + 4 * width, mV
ap_trough_5w = null : float # fast trough at peak + 5 * width, mV
hs_firing_rate = null : float # Hz
avg_firing_rate = null : float # Hz
hs_adaptation = null : float # no unit
hs_median_isi = null : float # ms
hs_latency = null : float # ms
avg_hs_latency = null : float # ms
avg_rheobase_latency = null : float # ms
rheobase_index = null : smallint # no unit
rheobase_stim_amp = null : float # pA
hero_sweep_index = null : smallint # no unit
hero_sweep_stim_amp = null : float # pA
all_firing_rate : longblob
all_stim_amp : longblob
input_resistance_vm : longblob
input_resistance_stim_ap : longblob
all_adaptation : longblob
all_v_baseline : longblob
all_median_isi : longblob
all_first_isi : longblob
all_latency : longblob
spikes_sweep_id : longblob
spikes_threshold_t : longblob
spikes_peak_t: longblob
spikes_trough_t: longblob
spikes_fast_trough_t: longblob
spikes_slow_trough_t: longblob
spikes_adp_t: longblob
spikes_trough_3w_t: longblob
spikes_trough_4w_t: longblob
spikes_trough_5w_t: longblob
spikes_threshold_v: longblob
spikes_peak_v: longblob
spikes_trough_v: longblob
spikes_fast_trough_v: longblob
spikes_slow_trough_v: longblob
spikes_adp_v: longblob
spikes_trough_3w_v: longblob
spikes_trough_4w_v: longblob
spikes_trough_5w_v: longblob
adapt_avg = null : float # average adaptation of the 3 sweeps >= 4Hz (1 sec)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
# use the first second of current injection for analysis, regardless of the actual duration.
istep_start, istep_end_1s = \
(CurrentStepTimeParams() & key).fetch1('istep_start', 'istep_end_1s')
this_sample = (EphysExperimentsForAnalysis() & key)
all_istep_recordings = (EphysRecordings() & "protocol = 'istep'")
cells, istep_recordings = (all_istep_recordings * this_sample).fetch('cell','recording')
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
for cell, rec in zip(cells, istep_recordings):
print('Populating for: ' + key['experiment'] + ' ' + rec)
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
cell_features, summary_features = \
extract_istep_features(data, start=istep_start, end=istep_end_1s,
**params)
newkey = summary_features.copy()
newkey['has_ap'] = 'Yes' if summary_features['has_ap'] else 'No'
newkey['experiment'] = key['experiment']
newkey['cell'] = cell
newkey['recording'] = rec
newkey['params_id'] = params_id
# _ = newkey.pop('file_id', None)
self.insert1(row=newkey, ignore_extra_fields=True)
return
@schema
class CurrentStepPlots(dj.Imported):
definition = """
# Plot current clamp raw sweeps + detected spikes. Save figures locally. Store file path.
-> APandIntrinsicProperties # TODO actually does not need to depend on this.
---
istep_nogray_pdf_path : varchar(256)
istep_nogray_png_large_path : varchar(256)
istep_pdf_path : varchar(256)
istep_png_large_path : varchar(256)
istep_png_mid_path : varchar(256)
istep_raw_pdf_path : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
istep_start, istep_end = \
(CurrentStepTimeParams() & key).fetch1('istep_start', 'istep_end')
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
# figures/istep_plots_params-1/2018-03-30_EP2-15/
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
# The fetched features only contain AP time points for the 1st second
features_1s = (APandIntrinsicProperties() & key).fetch1()
# To get all spike times, recalculate APs using the entire current step
_ , features = \
extract_istep_features(data, start=istep_start, end=istep_end,
**params)
for filetype in ['istep_nogray', 'istep', 'istep_raw']:
target_folder = os.path.join(directory, parent_directory, filetype)
if not os.path.exists(target_folder):
os.mkdir(target_folder)
fig = plot_current_step(data, fig_height=6, startend=[istep_start, istep_end],
offset=[0.2, 0.4], skip_sweep=1,
blue_sweep=features_1s['hero_sweep_index'],
spikes_t = features['spikes_peak_t'],
spikes_sweep_id = features['spikes_sweep_id'],
bias_current = features['bias_current'],
plot_gray_sweeps = False, lw_scale=2, alpha_scale=1, ilim=[-95,60],
other_features = None,
rheobase_sweep = features_1s['rheobase_index'],
sag_sweeps = features_1s['sag_sweep_indices'][:1],
save=False, rasterized=True)
target_folder = os.path.join(parent_directory, 'istep_nogray')
key['istep_nogray_pdf_path'] = os.path.join(target_folder, 'istep_nogray_' + rec + '.pdf')
fig.savefig(os.path.join(directory, key['istep_nogray_pdf_path']), dpi=300)
key['istep_nogray_png_large_path'] = os.path.join(target_folder, 'istep_nogray_large_' + rec + '.png')
fig.savefig(os.path.join(directory, key['istep_nogray_png_large_path']), dpi=300)
plt.show()
plt.close(fig)
fig = plot_current_step(data, fig_height=6, startend=[istep_start, istep_end],
offset=[0.2, 0.4], skip_sweep=1,
blue_sweep=features_1s['hero_sweep_index'],
spikes_t = features['spikes_peak_t'],
spikes_sweep_id = features['spikes_sweep_id'],
bias_current = features['bias_current'],
other_features = features,
trough_name = 'spikes_trough_5w',
rheobase_sweep = features_1s['rheobase_index'],
sag_sweeps = features_1s['sag_sweep_indices'],
save=False, rasterized=True)
target_folder = os.path.join(parent_directory, 'istep')
key['istep_pdf_path'] = os.path.join(target_folder, 'istep_' + rec + '.pdf')
fig.savefig(os.path.join(directory, key['istep_pdf_path']), dpi=300)
key['istep_png_large_path'] = os.path.join(target_folder, 'istep_large_' + rec + '.png')
fig.savefig(os.path.join(directory, key['istep_png_large_path']), dpi=300)
key['istep_png_mid_path'] = os.path.join(target_folder, 'istep_mid_' + rec + '.png')
fig.savefig(os.path.join(directory, key['istep_png_mid_path']), dpi=200)
plt.show()
plt.close(fig)
fig = plot_current_step(data, fig_height=6, startend=[istep_start, istep_end],
offset=[0.2, 0.4], skip_sweep=1,
blue_sweep=features_1s['hero_sweep_index'],
spikes_t = features['spikes_peak_t'],
spikes_sweep_id = features['spikes_sweep_id'],
bias_current = features['bias_current'],
other_features = None,
rheobase_sweep = features_1s['rheobase_index'],
sag_sweeps = features_1s['sag_sweep_indices'][:1],
save=False, rasterized=False)
target_folder = os.path.join(parent_directory, 'istep_raw')
key['istep_raw_pdf_path'] = os.path.join(target_folder, 'istep_raw_' + rec + '.pdf')
fig.savefig(os.path.join(directory, key['istep_raw_pdf_path']), dpi=200)
plt.show()
plt.close(fig)
self.insert1(row=key)
return
@schema
class AnimatedCurrentStepPlots(dj.Imported):
definition = """
# Plot current clamp raw sweeps + detected spikes. Save figures locally. Store file path.
# Saving the animations is slow (~10s per recording). Skip this to finish the pipeline faster.
-> APandIntrinsicProperties
---
istep_gif_path : varchar(256)
istep_mp4_path : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
istep_start, istep_end = \
(CurrentStepTimeParams() & key).fetch1('istep_start', 'istep_end')
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
# figures/istep_plots_params-1/2018-03-30_EP2-15/
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
# The fetched features only contain AP time points for the 1st second
features_1s = (APandIntrinsicProperties() & key).fetch1()
# To get all spike times, recalculate APs using the entire current step
_ , features = \
extract_istep_features(data, start=istep_start, end=istep_end,
**params)
target_folder = os.path.join(directory, parent_directory, 'istep_animation')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
key['istep_gif_path'] = os.path.join(parent_directory, 'istep_animation', 'istep_' + rec + '.gif')
key['istep_mp4_path'] = os.path.join(parent_directory, 'istep_animation', 'istep_' + rec + '.mp4')
fig_anim, anim = animate_current_step(data, fig_height=6, startend=[istep_start, istep_end], offset=[0.2, 0.4],
spikes_t = features['spikes_peak_t'],
spikes_sweep_id = features['spikes_sweep_id'],
bias_current = features['bias_current'],
save=False, blit = True)
anim.save(os.path.join(directory, key['istep_gif_path']), writer='imagemagick', fps=2.5, dpi=100)
anim.save(os.path.join(directory, key['istep_mp4_path']), writer='ffmpeg', fps=2.5, dpi=100)
plt.close(fig_anim)
gc.collect()
self.insert1(row=key)
return
@schema
class FICurvePlots(dj.Imported):
definition = """
# Plot F-I curve from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
fi_svg_path = '' : varchar(256)
fi_png_path = '' : varchar(256)
fi_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'fi_curve')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
fi_curve = plot_fi_curve(features['all_stim_amp'], features['all_firing_rate'])
key['fi_png_path'] = os.path.join(parent_directory, 'fi_curve', 'fi_' + rec + '.png')
key['fi_svg_path'] = os.path.join(parent_directory, 'fi_curve', 'fi_' + rec + '.svg')
key['fi_pdf_path'] = os.path.join(parent_directory, 'fi_curve', 'fi_' + rec + '.pdf')
fi_curve.savefig(os.path.join(directory, key['fi_png_path']), dpi=200)
fi_curve.savefig(os.path.join(directory, key['fi_svg_path']), dpi=200)
fi_curve.savefig(os.path.join(directory, key['fi_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class VICurvePlots(dj.Imported):
definition = """
# Plot V-I curve (hyperpolarizing) from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
vi_svg_path = '' : varchar(256)
vi_png_path = '' : varchar(256)
vi_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'vi_curve')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
vi_curve = plot_vi_curve(features['input_resistance_stim_ap'], features['input_resistance_vm'])
key['vi_png_path'] = os.path.join(parent_directory, 'vi_curve', 'vi_' + rec + '.png')
key['vi_svg_path'] = os.path.join(parent_directory, 'vi_curve', 'vi_' + rec + '.svg')
key['vi_pdf_path'] = os.path.join(parent_directory, 'vi_curve', 'vi_' + rec + '.pdf')
vi_curve.savefig(os.path.join(directory, key['vi_png_path']), dpi=200)
vi_curve.savefig(os.path.join(directory, key['vi_svg_path']), dpi=200)
vi_curve.savefig(os.path.join(directory, key['vi_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class FirstSpikePlots(dj.Imported):
definition = """
# Plot first spikes from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
spike_svg_path = '' : varchar(256)
spike_png_path = '' : varchar(256)
spike_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'first_spike')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
first_spike = plot_first_spike(data, features, time_zero='threshold', lw_scale=1.5)
key['spike_png_path'] = os.path.join(parent_directory, 'first_spike', 'spike_' + rec + '.png')
key['spike_svg_path'] = os.path.join(parent_directory, 'first_spike', 'spike_' + rec + '.svg')
key['spike_pdf_path'] = os.path.join(parent_directory, 'first_spike', 'spike_' + rec + '.pdf')
first_spike.savefig(os.path.join(directory, key['spike_png_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_svg_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class PhasePlanes(dj.Imported):
definition = """
# Plot phase planes of first spikes. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
phase_svg_path = '' : varchar(256)
phase_png_path = '' : varchar(256)
phase_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'phase_plane')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
phase_plane = plot_phase_plane(data, features, filter=5.0, lw_scale=1.5) # or use features['filter']
key['phase_png_path'] = os.path.join(parent_directory, 'phase_plane', 'phase_' + rec + '.png')
key['phase_svg_path'] = os.path.join(parent_directory, 'phase_plane', 'phase_' + rec + '.svg')
key['phase_pdf_path'] = os.path.join(parent_directory, 'phase_plane', 'phase_' + rec + '.pdf')
phase_plane.savefig(os.path.join(directory, key['phase_png_path']), dpi=200)
phase_plane.savefig(os.path.join(directory, key['phase_svg_path']), dpi=200)
phase_plane.savefig(os.path.join(directory, key['phase_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class FirstSpikeFirstDerivativePlots(dj.Imported):
definition = """
# Plot first spikes from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
spike_dvdt_svg_path = '' : varchar(256)
spike_dvdt_png_path = '' : varchar(256)
spike_dvdt_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'first_spike_dvdt')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
first_spike = plot_first_spike_dvdt(data, features, time_zero='threshold', filter_dvdt=5.0) # or use features['filter']
key['spike_dvdt_png_path'] = os.path.join(parent_directory, 'first_spike_dvdt', 'spike_dvdt_' + rec + '.png')
key['spike_dvdt_svg_path'] = os.path.join(parent_directory, 'first_spike_dvdt', 'spike_dvdt_' + rec + '.svg')
key['spike_dvdt_pdf_path'] = os.path.join(parent_directory, 'first_spike_dvdt', 'spike_dvdt_' + rec + '.pdf')
first_spike.savefig(os.path.join(directory, key['spike_dvdt_png_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_dvdt_svg_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_dvdt_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class FirstSpikeSecondDerivativePlots(dj.Imported):
definition = """
# Plot first spikes from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
spike_2nd_derivative_svg_path = '' : varchar(256)
spike_2nd_derivative_png_path = '' : varchar(256)
spike_2nd_derivative_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'first_spike_2nd_derivative')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
first_spike = plot_first_spike_2nd_derivative(data, features, time_zero='threshold', filter_dvdt=5.0) # or use features['filter']
key['spike_2nd_derivative_png_path'] = os.path.join(parent_directory, 'first_spike_2nd_derivative', 'spike_2nd_derivative_' + rec + '.png')
key['spike_2nd_derivative_svg_path'] = os.path.join(parent_directory, 'first_spike_2nd_derivative', 'spike_2nd_derivative_' + rec + '.svg')
key['spike_2nd_derivative_pdf_path'] = os.path.join(parent_directory, 'first_spike_2nd_derivative', 'spike_2nd_derivative_' + rec + '.pdf')
first_spike.savefig(os.path.join(directory, key['spike_2nd_derivative_png_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_2nd_derivative_svg_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_2nd_derivative_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class FirstSpikePlotsMarkersTrough(dj.Imported):
definition = """
# Plot first spikes from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
spike_other_markers_svg_path = '' : varchar(256)
spike_other_markers_png_path = '' : varchar(256)
spike_other_markers_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'first_spike_other_markers')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
other_features = ['spikes_trough', 'spikes_fast_trough', 'spikes_slow_trough',
'spikes_adp', 'spikes_trough_3w', 'spikes_trough_4w', 'spikes_trough_5w']
first_spike = plot_first_spike(data, features, time_zero='threshold',
figsize=(7,4), window=[-10,110],
other_markers={k:v for k, v in zip(other_features, sns.color_palette("husl", len(other_features)).as_hex())})
key['spike_other_markers_png_path'] = os.path.join(parent_directory, 'first_spike_other_markers', 'spike_other_markers_' + rec + '.png')
key['spike_other_markers_svg_path'] = os.path.join(parent_directory, 'first_spike_other_markers', 'spike_other_markers_' + rec + '.svg')
key['spike_other_markers_pdf_path'] = os.path.join(parent_directory, 'first_spike_other_markers', 'spike_other_markers_' + rec + '.pdf')
first_spike.savefig(os.path.join(directory, key['spike_other_markers_png_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_other_markers_svg_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_other_markers_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class CombinedPlots(dj.Imported):
definition = """
# Combine F-I, first spike, phase plane and current step plots together.
-> CurrentStepPlots
-> FICurvePlots
-> FirstSpikePlots
-> PhasePlanes
---
small_fi_spike_phase = '' : varchar(256)
small_istep_fi_spike_phase = '' : varchar(256)
mid_fi_spike_phase = '' : varchar(256)
mid_istep_fi_spike_phase = '' : varchar(256)
large_fi_spike_phase = '' : varchar(256)
large_istep_fi_spike_phase = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
fi = (FICurvePlots() & key).fetch1('fi_png_path')
spike = (FirstSpikePlots() & key).fetch1('spike_png_path')
phase = (PhasePlanes() & key).fetch1('phase_png_path')
istep = (CurrentStepPlots() & key).fetch1('istep_png_large_path')
if not (fi and spike and phase and istep):
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
left_large = combine_vertical([Image.open(os.path.join(directory, x)) for x in [fi, spike, phase]], scale=1)
left_mid = left_large.resize([int(x * 0.5) for x in left_large.size], resample=Image.BICUBIC)
left_small = left_large.resize([int(x * 0.2) for x in left_large.size], resample=Image.BICUBIC)
all_large = combine_horizontal([left_large, Image.open(os.path.join(directory, istep))], scale=1)
all_mid = all_large.resize([int(x * 0.5) for x in all_large.size], resample=Image.BICUBIC)
all_small = all_large.resize([int(x * 0.2) for x in all_large.size], resample=Image.BICUBIC)
for fpath, folder, img in zip(['large_fi_spike_phase', 'mid_fi_spike_phase', 'small_fi_spike_phase',
'large_istep_fi_spike_phase', 'mid_istep_fi_spike_phase', 'small_istep_fi_spike_phase'],
['combine_fi_spike_phase'] * 3 + ['combine_istep_fi_spike_phase'] * 3,
[left_large, left_mid, left_small, all_large, all_mid, all_small]):
target_folder = os.path.join(directory, parent_directory, folder)
if not os.path.exists(target_folder):
os.mkdir(target_folder)
key[fpath] = os.path.join(parent_directory, folder, fpath + '_' + rec + '.png')
img.save(os.path.join(directory, key[fpath]))
self.insert1(row=key)
return
@schema
class CombinedPlotsWithText(dj.Imported):
definition = """
# Combine F-I, first spike, phase plane and current step plots together.
-> CurrentStepPlots
-> FICurvePlots
-> VICurvePlots
-> FirstSpikePlots
-> PhasePlanes
-> Animals
-> PatchCells
-> APandIntrinsicProperties
---
small_fi_vi_spike_phase = '' : varchar(256)
mid_fi_vi_spike_phase = '' : varchar(256)
large_fi_vi_spike_phase = '' : varchar(256)
small_istep_fi_vi_spike_phase = '' : varchar(256)
mid_istep_fi_vi_spike_phase = '' : varchar(256)
large_istep_fi_vi_spike_phase = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
fi = (FICurvePlots() & key).fetch1('fi_png_path')
vi = (VICurvePlots() & key).fetch1('vi_png_path')
spike = (FirstSpikePlots() & key).fetch1('spike_png_path')
phase = (PhasePlanes() & key).fetch1('phase_png_path')
istep = (CurrentStepPlots() & key).fetch1('istep_png_large_path')
animal = (Animals() & key).fetch1()
cell = (PatchCells() & key).fetch1()
features_1s = (APandIntrinsicProperties() & key).fetch1()
features_and_meta = OrderedDict()
features_and_meta.update(animal)
features_and_meta.update(cell)
features_and_meta.update(features_1s)
if not (fi and spike and phase and istep):
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
top_large = combine_horizontal([Image.open(os.path.join(directory, x)) for x in [vi, fi]], scale=1)
bot_large = combine_horizontal([Image.open(os.path.join(directory, x)) for x in [phase, spike]], scale=1)
left_large = combine_vertical([top_large, bot_large], scale=1)
left_mid = left_large.resize([int(x * 0.5) for x in left_large.size], resample=Image.BICUBIC)
left_small = left_large.resize([int(x * 0.2) for x in left_large.size], resample=Image.BICUBIC)
left_with_text = combine_vertical([top_large, bot_large, Image.new('RGB', bot_large.size, (255,255,255))], scale=1)
# print metadata and features on the plot
features_keys = ['input_resistance', 'sag', 'capacitance', 'v_rest',
'f_i_curve_slope', 'ap_threshold', 'ap_width', 'ap_peak_to_threshold',
'ap_trough_to_threshold', 'ap_trough_5w_to_threshold', 'ap_upstroke',
'ap_updownstroke_ratio', 'adapt_avg', 'avg_rheobase_latency']
metadata_keys = ['date', 'strain', 'cell', 'recording', 'dob', 'age', 'fill']
features_to_print = [(feature_name_dict[feature], features_and_meta[feature]) for feature in features_keys]
#print(features_to_print)
features_to_print = '\n'.join(["{}: {:.3g}".format(x, y) if isinstance(y, float) else "{}: {}".format(x, y) for x, y in features_to_print])
metadata_to_print = [(metadata, features_and_meta[metadata]) for metadata in metadata_keys]
metadata_to_print = '\n'.join(["{}: {}".format(x, y) for x, y in metadata_to_print])
left_with_text = draw_text_on_image(left_with_text, [metadata_to_print, features_to_print],
[(100,1650), (900,1650)], font_size=38)
all_large = combine_horizontal([left_with_text, Image.open(os.path.join(directory, istep))], scale=1)
all_mid = all_large.resize([int(x * 0.5) for x in all_large.size], resample=Image.BICUBIC)
all_small = all_large.resize([int(x * 0.2) for x in all_large.size], resample=Image.BICUBIC)
for fpath, folder, img in zip(['large_fi_vi_spike_phase', 'mid_fi_vi_spike_phase', 'small_fi_vi_spike_phase',
'large_istep_fi_vi_spike_phase', 'mid_istep_fi_vi_spike_phase', 'small_istep_fi_vi_spike_phase'],
['combine_fi_vi_spike_phase'] * 3 + ['combine_istep_fi_vi_spike_phase'] * 3,
[left_large, left_mid, left_small, all_large, all_mid, all_small]):
target_folder = os.path.join(directory, parent_directory, folder)
if not os.path.exists(target_folder):
os.mkdir(target_folder)
key[fpath] = os.path.join(parent_directory, folder, fpath + '_' + rec + '.png')
img.save(os.path.join(directory, key[fpath]))
self.insert1(row=key)
return
| 2.21875 | 2 |
tests/test__meter.py | alpha-health-ai/pyformance | 4 | 12767197 | from pyformance.meters import Meter
from tests import TimedTestCase
class MeterTestCase(TimedTestCase):
def setUp(self):
super(MeterTestCase, self).setUp()
self.meter = Meter(key="test_meter", clock=TimedTestCase.clock)
def tearDown(self):
super(MeterTestCase, self).tearDown()
def test__one_minute_rate(self):
self.meter.mark(3)
self.clock.add(5)
self.meter.tick()
# the EWMA has a rate of 0.6 events/sec after the first tick
self.assertAlmostEqual(0.6, self.meter.get_one_minute_rate(), delta=0.000001)
self.clock.add(60)
# the EWMA has a rate of 0.22072766 events/sec after 1 minute
self.assertAlmostEqual(
0.22072766, self.meter.get_one_minute_rate(), delta=0.000001
)
self.clock.add(60)
# the EWMA has a rate of 0.08120117 events/sec after 2 minute
self.assertAlmostEqual(
0.08120117, self.meter.get_one_minute_rate(), delta=0.000001
)
def test__five_minute_rate(self):
self.meter.mark(3)
self.clock.add(5)
self.meter.tick()
# the EWMA has a rate of 0.6 events/sec after the first tick
self.assertAlmostEqual(0.6, self.meter.get_five_minute_rate(), delta=0.000001)
self.clock.add(60)
# the EWMA has a rate of 0.49123845 events/sec after 1 minute
self.assertAlmostEqual(
0.49123845, self.meter.get_five_minute_rate(), delta=0.000001
)
self.clock.add(60)
# the EWMA has a rate of 0.40219203 events/sec after 2 minute
self.assertAlmostEqual(
0.40219203, self.meter.get_five_minute_rate(), delta=0.000001
)
def test__fifteen_minute_rate(self):
self.meter.mark(3)
self.clock.add(5)
self.meter.tick()
# the EWMA has a rate of 0.6 events/sec after the first tick
self.assertAlmostEqual(
0.6, self.meter.get_fifteen_minute_rate(), delta=0.000001
)
self.clock.add(60)
# the EWMA has a rate of 0.56130419 events/sec after 1 minute
self.assertAlmostEqual(
0.56130419, self.meter.get_fifteen_minute_rate(), delta=0.000001
)
self.clock.add(60)
# the EWMA has a rate of 0.52510399 events/sec after 2 minute
self.assertAlmostEqual(
0.52510399, self.meter.get_fifteen_minute_rate(), delta=0.000001
)
def test__mean_rate(self):
self.meter.mark(60)
self.clock.add(60)
self.meter.tick()
val = self.meter.get_mean_rate()
self.assertEqual(1, val)
| 2.6875 | 3 |
pylith/tests/Fault.py | joegeisz/pylith | 1 | 12767198 | <gh_stars>1-10
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file pylith/tests/Fault.py
##
## @brief Check fault output from PyLith.
import numpy
import h5py
from spatialdata.units.NondimElasticQuasistatic import NondimElasticQuasistatic
def check_vertex_fields(testcase, filename, mesh, fieldNames):
"""
Check properties.
"""
h5 = h5py.File(filename, "r", driver="sec2")
# Check cells
cells = h5['topology/cells'][:]
(ncells, ncorners) = cells.shape
testcase.assertEqual(mesh['ncells'], ncells)
testcase.assertEqual(mesh['ncorners'], ncorners)
# Check vertices
vertices = h5['geometry/vertices'][:]
(nvertices, spaceDim) = vertices.shape
testcase.assertEqual(mesh['nvertices'], nvertices)
testcase.assertEqual(mesh['spaceDim'], spaceDim)
# Check fault information
tolerance = 1.0e-5
normalizer = NondimElasticQuasistatic()
normalizer._configure()
for name in fieldNames:
valuesE = testcase.calcFaultField(name, vertices)
values = h5['vertex_fields/%s' % name][:]
(nstepsE, nverticesE, dimE) = valuesE.shape
(nsteps, nvertices, dim) = values.shape
testcase.assertEqual(nstepsE, nsteps)
testcase.assertEqual(nverticesE, nvertices)
testcase.assertEqual(dimE, dim)
scale = 1.0
if name == "traction_change" or name == "traction":
scale *= normalizer.pressureScale().value
for istep in xrange(nsteps):
for idim in xrange(dim):
okay = numpy.zeros((nvertices,), dtype=numpy.bool)
maskR = numpy.abs(valuesE[istep,:,idim]) > 0.0
ratio = numpy.abs(1.0 - values[istep,maskR,idim]/valuesE[istep,maskR,idim])
if len(ratio) > 0:
okay[maskR] = ratio < tolerance
maskD = ~maskR
diff = numpy.abs(values[istep,maskD,idim] - valuesE[istep,maskD,idim]) / scale
if len(diff) > 0:
okay[maskD] = diff < tolerance
if numpy.sum(okay) != nvertices:
print "Error in component %d of field '%s' for timestep %d." % (idim, name, istep)
print "Expected values:",valuesE
print "Output values:",values
print "Expected values (not okay): ",valuesE[istep,~okay,idim]
print "Computed values (not okay): ",values[istep,~okay,idim]
print "Coordinates (not okay): ",vertices[~okay,:]
h5.close()
testcase.assertEqual(nvertices, numpy.sum(okay))
h5.close()
return
def check_data(testcase, filename, mesh, fieldNames):
"""
Check properties.
"""
h5 = h5py.File(filename, "r", driver="sec2")
# Check cells
cells = h5['topology/cells'][:]
(ncells, ncorners) = cells.shape
testcase.assertEqual(mesh['ncells'], ncells)
testcase.assertEqual(mesh['ncorners'], ncorners)
# Check vertices
vertices = h5['geometry/vertices'][:]
(nvertices, spaceDim) = vertices.shape
testcase.assertEqual(mesh['nvertices'], nvertices)
testcase.assertEqual(mesh['spaceDim'], spaceDim)
# Check fault information
tolerance = 1.0e-5
for name in fieldNames:
valuesE = testcase.calcFaultInfo(name, data['vertices'])
values = h5['vertex_fields/%s' % name][:]
(nverticesE, dim) = valuesE.shape
values = values.reshape( (nvertices, dim) )
testcase.assertEqual(nverticesE, nvertices)
for i in xrange(dim):
ratio = numpy.abs(1.0 - values[:,i]/valuesE[:,i])
diff = numpy.abs(values[:,i] - valuesE[:,i])
mask = valuesE[:,i] != 0.0
okay = mask*(ratio < tolerance) + ~mask*(diff < tolerance)
if numpy.sum(okay) != nvertices:
print "Error in component %d of field '%s'." % (i, name)
print "Expected values:",valuesE
print "Output values:",values
testcase.assertEqual(nvertices, numpy.sum(okay))
h5.close()
return
# End of file
| 2.28125 | 2 |
Chapter-11/collections/ansible_collections/kubernetes/core/plugins/modules/k8s_taint.py | PacktPublishing/Ansible-for-Real-life-Automation | 7 | 12767199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, <NAME> <@alinabuzachis>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: k8s_taint
short_description: Taint a node in a Kubernetes/OpenShift cluster
version_added: "2.3.0"
author: <NAME> (@alinabuzachis)
description:
- Taint allows a node to refuse Pod to be scheduled unless that Pod has a matching toleration.
- Untaint will remove taints from nodes as needed.
extends_documentation_fragment:
- kubernetes.core.k8s_auth_options
options:
state:
description:
- Determines whether to add or remove taints.
type: str
default: present
choices: [ present, absent ]
name:
description:
- The name of the node.
required: true
type: str
taints:
description:
- List containing the taints.
type: list
required: true
elements: dict
suboptions:
key:
description:
- The taint key to be applied to a node.
type: str
value:
description:
- The taint value corresponding to the taint key.
type: str
effect:
description:
- The effect of the taint on Pods that do not tolerate the taint.
- Required when I(state=present).
type: str
choices: [ NoSchedule, NoExecute, PreferNoSchedule ]
replace:
description:
- If C(true), allow taints to be replaced.
required: false
default: false
type: bool
requirements:
- python >= 3.6
- kubernetes >= 12.0.0
"""
EXAMPLES = r"""
- name: Taint node "foo"
kubernetes.core.k8s_taint:
state: present
name: foo
taints:
- effect: NoExecute
key: "key1"
- name: Taint node "foo"
kubernetes.core.k8s_taint:
state: present
name: foo
taints:
- effect: NoExecute
key: "key1"
value: "value1"
- effect: NoSchedule
key: "key1"
value: "value1"
- name: Remove taint from "foo".
kubernetes.core.k8s_taint:
state: absent
name: foo
taints:
- effect: NoExecute
key: "key1"
value: "value1"
"""
RETURN = r"""
result:
description:
- The tainted Node object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
"""
import copy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
K8sAnsibleMixin,
get_api_client,
)
from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
AUTH_ARG_SPEC,
)
try:
from kubernetes.client.api import core_v1_api
from kubernetes.client.exceptions import ApiException
except ImportError:
# ImportError are managed by the common module already.
pass
def _equal_dicts(a, b):
keys = ["key", "effect"]
if "effect" not in set(a).intersection(b):
keys.remove("effect")
return all((a[x] == b[x] for x in keys))
def _get_difference(a, b):
return [
a_item for a_item in a if not any(_equal_dicts(a_item, b_item) for b_item in b)
]
def _get_intersection(a, b):
return [a_item for a_item in a if any(_equal_dicts(a_item, b_item) for b_item in b)]
def _update_exists(a, b):
return any(
(
any(
_equal_dicts(a_item, b_item)
and a_item.get("value") != b_item.get("value")
for b_item in b
)
for a_item in a
)
)
def argspec():
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(
dict(
state=dict(type="str", choices=["present", "absent"], default="present"),
name=dict(type="str", required=True),
taints=dict(type="list", required=True, elements="dict"),
replace=dict(type="bool", default=False),
)
)
return argument_spec
class K8sTaintAnsible:
def __init__(self, module):
self.module = module
self.k8s_ansible_mixin = K8sAnsibleMixin(module=self.module)
self.k8s_ansible_mixin.client = get_api_client(module=self.module)
self.k8s_ansible_mixin.module = self.module
self.k8s_ansible_mixin.argspec = self.module.argument_spec
self.k8s_ansible_mixin.check_mode = self.module.check_mode
self.k8s_ansible_mixin.params = self.module.params
self.k8s_ansible_mixin.fail_json = self.module.fail_json
self.k8s_ansible_mixin.fail = self.module.fail_json
self.k8s_ansible_mixin.exit_json = self.module.exit_json
self.k8s_ansible_mixin.warn = self.module.warn
self.k8s_ansible_mixin.warnings = []
self.api_instance = core_v1_api.CoreV1Api(self.k8s_ansible_mixin.client.client)
self.k8s_ansible_mixin.check_library_version()
self.changed = False
def get_node(self, name):
try:
node = self.api_instance.read_node(name=name)
except ApiException as exc:
if exc.reason == "Not Found":
self.module.fail_json(msg="Node '{0}' has not been found.".format(name))
self.module.fail_json(
msg="Failed to retrieve node '{0}' due to: {1}".format(
name, exc.reason
),
status=exc.status,
)
except Exception as exc:
self.module.fail_json(
msg="Failed to retrieve node '{0}' due to: {1}".format(
name, to_native(exc)
)
)
return node
def patch_node(self, taints):
body = {"spec": {"taints": taints}}
try:
result = self.api_instance.patch_node(
name=self.module.params.get("name"), body=body
)
except Exception as exc:
self.module.fail_json(
msg="Failed to patch node due to: {0}".format(to_native(exc))
)
return result.to_dict()
def execute_module(self):
result = {"result": {}}
state = self.module.params.get("state")
taints = self.module.params.get("taints")
name = self.module.params.get("name")
node = self.get_node(name)
existing_taints = node.spec.to_dict().get("taints") or []
diff = _get_difference(taints, existing_taints)
if state == "present":
if diff:
# There are new taints to be added
self.changed = True
if self.module.check_mode:
self.module.exit_json(changed=self.changed, **result)
if self.module.params.get("replace"):
# Patch with the new taints
result["result"] = self.patch_node(taints=taints)
self.module.exit_json(changed=self.changed, **result)
result["result"] = self.patch_node(
taints=[*_get_difference(existing_taints, taints), *taints]
)
else:
# No new taints to be added, but maybe there is something to be updated
if _update_exists(existing_taints, taints):
self.changed = True
if self.module.check_mode:
self.module.exit_json(changed=self.changed, **result)
result["result"] = self.patch_node(
taints=[*_get_difference(existing_taints, taints), *taints]
)
else:
result["result"] = node.to_dict()
elif state == "absent":
# Nothing to be removed
if not existing_taints:
result["result"] = node.to_dict()
if not diff:
self.changed = True
if self.module.check_mode:
self.module.exit_json(changed=self.changed, **result)
self.patch_node(taints=_get_difference(existing_taints, taints))
else:
if _get_intersection(existing_taints, taints):
self.changed = True
if self.module.check_mode:
self.module.exit_json(changed=self.changed, **result)
self.patch_node(taints=_get_difference(existing_taints, taints))
else:
self.module.exit_json(changed=self.changed, **result)
self.module.exit_json(changed=self.changed, **result)
def main():
module = AnsibleModule(argument_spec=argspec(), supports_check_mode=True,)
k8s_taint = K8sTaintAnsible(module)
k8s_taint.execute_module()
if __name__ == "__main__":
main()
| 2.109375 | 2 |
exe017.py | evertondutra/Curso_em-_Video_Python | 0 | 12767200 | <reponame>evertondutra/Curso_em-_Video_Python
#from math import hypot
co = float(input('Quanto mede o cateto oposto? '))
ca = float(input('Quanto mede o cateto adjacente? '))
hi = hypot(co, ca)
print(f'A hipotenusa vai medir {hi:.2f}')
'''
import math
co = float(input('Quanto mede o cateto oposto? '))
ca = float(input('Quanto mede o cateto adjacente? '))
hi = math.hypot(co, ca)
print(f'A hipotenusa vai medir {hi:.2f}')
'''
| 3.515625 | 4 |
tests/processors/test_metadata.py | ankitshah009/dcase_util | 0 | 12767201 | <reponame>ankitshah009/dcase_util
import nose.tools
import dcase_util
import tempfile
import os
def test_MetadataReadingProcessor():
tmp = tempfile.NamedTemporaryFile('r+', suffix='.txt', dir='/tmp', delete=False)
try:
dcase_util.utils.Example.event_metadata_container().save(tmp.name)
m = dcase_util.processors.MetadataReadingProcessor()
processed = m.process(
filename=tmp.name,
focus_filename='test1.wav'
)
nose.tools.eq_(processed.event_count, 3)
nose.tools.eq_(processed.file_count, 1)
m = dcase_util.processors.MetadataReadingProcessor()
processed = m.process(
filename=tmp.name,
focus_filename='test1.wav',
focus_start_seconds=0.0,
focus_stop_seconds=3.0
)
nose.tools.eq_(processed.event_count, 1)
nose.tools.eq_(processed.file_count, 1)
m = dcase_util.processors.MetadataReadingProcessor()
processed = m.process(
filename=tmp.name,
focus_filename='test1.wav',
focus_start_seconds=0,
focus_duration_seconds=3.0
)
nose.tools.eq_(processed.event_count, 1)
nose.tools.eq_(processed.file_count, 1)
finally:
os.unlink(tmp.name)
| 2.1875 | 2 |
utilities/model_logging.py | salvaRC/Graphino | 16 | 12767202 | <reponame>salvaRC/Graphino<gh_stars>10-100
"""
Author: <NAME>
"""
from datetime import datetime
import torch
def log_epoch_vals(writer, loss, epoch, val_stats=None, test_stats=None):
if writer is None:
return
writer.add_scalar('train/_loss', loss, epoch)
# writer.add_scalar('train/_mae', stats['mae'], epoch)
if val_stats is not None:
writer.add_scalar('val/_mae', val_stats['mae'], epoch)
writer.add_scalar('val/_rmse', val_stats['rmse'], epoch)
writer.add_scalar('val/_corrcoef', val_stats['corrcoef'], epoch)
writer.add_scalar('val/_all_season_cc', val_stats['all_season_cc'], epoch)
writer.add_scalar('test/_mae', test_stats['mae'], epoch)
writer.add_scalar('test/_rmse', test_stats['rmse'], epoch)
writer.add_scalar('test/_corrcoef', test_stats['corrcoef'], epoch)
writer.add_scalar('test/_all_season_cc', test_stats['all_season_cc'], epoch)
def set_if_exists(dictio_from, dictio_to, key, prefix):
if key in dictio_from:
dictio_to[f'{prefix}_{key}'.lstrip('_')] = dictio_from[key]
def update_tqdm(tq, train_loss, val_stats=None, test_stats=None, **kwargs):
def get_stat_dict(dictio, prefix, all=False):
dict_two = dict()
set_if_exists(dictio, dict_two, 'rmse', prefix)
set_if_exists(dictio, dict_two, 'corrcoef', prefix)
set_if_exists(dictio, dict_two, 'all_season_cc', prefix)
if all:
set_if_exists(dictio, dict_two, 'mae', prefix)
return dict_two
if val_stats is None:
if test_stats is None:
tq.set_postfix(train_loss=train_loss, **kwargs)
else:
test_print = get_stat_dict(test_stats, 'test')
tq.set_postfix(train_loss=train_loss, **test_print, **kwargs)
else:
val_print = get_stat_dict(val_stats, 'val', all=True)
if test_stats is None:
tq.set_postfix(train_loss=train_loss, **val_print, **kwargs)
else:
test_print = get_stat_dict(test_stats, 'test')
tq.set_postfix(train_loss=train_loss, **val_print, **test_print, **kwargs)
def save_model(model, model_dir, params, net_params, optimizer, epoch, ID='model.pkl'):
checkpoint_dict = {
'model': model.state_dict(),
'epoch': epoch,
'name': str(model),
'optimizer': optimizer.state_dict(),
'metadata': {
'date': datetime.now().strftime('%Y-%m-%d'),
'params': params,
'net_params': net_params
}
}
# In case a model dir was given --> save best model (wrt validation data)
if model_dir is not None:
torch.save(checkpoint_dict, f'{model_dir}/{ID}')
| 2.09375 | 2 |
geo/models/data_paths.py | stefantaubert/life | 2 | 12767203 | import os
from geo.data_dir_config import root
test_submission = root + "submissions/submission_val.csv"
img_list_dir = root + "image_lists/"
train_samples = img_list_dir + "train/samples.npy"
train_samples_species_map = img_list_dir + "train/species_map.py"
test_samples = img_list_dir + "test/samples.npy"
#xgb normal training paths
xgb_dir = root + "xgb/"
xgb_model = xgb_dir + "model"
xgb_model_dump = xgb_dir + "model_dump"
xgb_feature_importances = xgb_dir + "feature_importances.pdf"
if not os.path.exists(xgb_dir):
os.makedirs(xgb_dir)
submissions_dir = root + "submissions/"
if not os.path.exists(submissions_dir):
os.makedirs(submissions_dir)
vector_submission = submissions_dir + "vector_submission.csv"
xgb_multimodel_submission = submissions_dir + "xgb_multimodel_submission.csv"
xgb_multimodel_groups_submission = submissions_dir + "xgb_multimodel_groups_submission.csv"
xgb_singlemodel_submission = submissions_dir + "xgb_singlemodel_submission.csv"
random_submission = submissions_dir + "random_submission.csv"
probability_submission = submissions_dir + "probability_submission.csv"
#keras single model training paths
keras_training_dir = root + "keras_training_results/"
keras_training_gt = keras_training_dir + "gt.npy"
keras_training_results = keras_training_dir + "results.npy"
keras_training_species_map = keras_training_dir + "species_map.py"
keras_training_submission = keras_training_dir + "submission.csv"
keras_training_glc_ids = keras_training_dir + "glc_ids.npy"
keras_training_model = keras_training_dir + "model.h5"
#keras multi model training paths
keras_multi_model_training_dir = root + "keras_multi_model_training_results/"
keras_multi_model_training_gt = keras_multi_model_training_dir + "gt.npy"
keras_multi_model_training_results = keras_multi_model_training_dir + "results.npy"
keras_multi_model_training_species_map = keras_multi_model_training_dir + "species_map.py"
keras_multi_model_training_submission = keras_multi_model_training_dir + "submission.csv"
keras_multi_model_training_glc_ids = keras_multi_model_training_dir + "glc_ids.npy"
keras_multi_model_training_model1 = keras_multi_model_training_dir + "model1.h5"
keras_multi_model_training_model2 = keras_multi_model_training_dir + "model2.h5"
keras_multi_model_training_model3 = keras_multi_model_training_dir + "model3.h5"
keras_multi_model_training_model4 = keras_multi_model_training_dir + "model4.h5"
keras_multi_model_training_model5 = keras_multi_model_training_dir + "model5.h5"
keras_multi_model_training_model6 = keras_multi_model_training_dir + "model6.h5"
#keras single model test paths
keras_test_dir = root + "keras_predictions/"
keras_test_results = keras_test_dir + "results.npy"
keras_test_glc_ids = keras_test_dir + "glc_ids.npy"
keras_test_submission = keras_test_dir + "submission.csv"
#keras multi model test paths
keras_multi_model_test_dir = root + "keras_multi_model_predictions/"
keras_multi_model_test_results = keras_multi_model_test_dir + "results.npy"
keras_multi_model_test_glc_ids = keras_multi_model_test_dir + "glc_ids.npy"
keras_multi_model_test_submission = keras_multi_model_test_dir + "submission.csv"
if not os.path.exists(img_list_dir):
os.makedirs(img_list_dir)
os.makedirs(img_list_dir+"train/")
os.makedirs(img_list_dir+"test/")
if not os.path.exists(keras_multi_model_training_dir):
os.makedirs(keras_multi_model_training_dir)
if not os.path.exists(keras_training_dir):
os.makedirs(keras_training_dir)
if not os.path.exists(keras_test_dir):
os.makedirs(keras_test_dir)
if not os.path.exists(keras_multi_model_test_dir):
os.makedirs(keras_multi_model_test_dir)
| 2.1875 | 2 |
implementation/server/cms/migrations/0006_homepage_hero_cta_link.py | Aincient/cleo | 0 | 12767204 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-06-19 20:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('cms', '0005_auto_20180619_1525'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='hero_cta_link',
field=models.ForeignKey(blank=True, help_text='Choose a page to link to for the Call to Action', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page', verbose_name='Hero CTA link'),
),
]
| 1.679688 | 2 |
flask_scaffolding/scaffoldings/basic/gunicorn_conf_docker.py | MaXXXXfeng/flask-scaffolding | 0 | 12767205 | <filename>flask_scaffolding/scaffoldings/basic/gunicorn_conf_docker.py
# -*- coding: utf-8 -*-
import logging
LOG_PATH = '/data/logs/proj'
PID_FILE = 'proj.pid'
bind = '%s:%s' % ('0.0.0.0', 80)
workers = 4
worker_connections = 100
preload_app = True
timeout = 600
deamon = False
debug = False
loglevel = 'info'
pidfile = '%s/%s' % (LOG_PATH, PID_FILE)
accesslog = '-'
| 1.640625 | 2 |
ionoscloud/models/user_properties_put.py | ionos-cloud/ionos-cloud-sdk-python | 0 | 12767206 | # coding: utf-8
"""
CLOUD API
IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. # noqa: E501
The version of the OpenAPI document: 6.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class UserPropertiesPut(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'firstname': 'str',
'lastname': 'str',
'email': 'str',
'password': '<PASSWORD>',
'administrator': 'bool',
'force_sec_auth': 'bool',
'sec_auth_active': 'bool',
'active': 'bool',
}
attribute_map = {
'firstname': 'firstname',
'lastname': 'lastname',
'email': 'email',
'password': 'password',
'administrator': 'administrator',
'force_sec_auth': 'forceSecAuth',
'sec_auth_active': 'secAuthActive',
'active': 'active',
}
def __init__(self, firstname=None, lastname=None, email=None, password=None, administrator=None, force_sec_auth=None, sec_auth_active=None, active=None, local_vars_configuration=None): # noqa: E501
"""UserPropertiesPut - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._firstname = None
self._lastname = None
self._email = None
self._password = None
self._administrator = None
self._force_sec_auth = None
self._sec_auth_active = None
self._active = None
self.discriminator = None
if firstname is not None:
self.firstname = firstname
if lastname is not None:
self.lastname = lastname
if email is not None:
self.email = email
if password is not None:
self.password = password
if administrator is not None:
self.administrator = administrator
if force_sec_auth is not None:
self.force_sec_auth = force_sec_auth
if sec_auth_active is not None:
self.sec_auth_active = sec_auth_active
if active is not None:
self.active = active
@property
def firstname(self):
"""Gets the firstname of this UserPropertiesPut. # noqa: E501
The first name of the user. # noqa: E501
:return: The firstname of this UserPropertiesPut. # noqa: E501
:rtype: str
"""
return self._firstname
@firstname.setter
def firstname(self, firstname):
"""Sets the firstname of this UserPropertiesPut.
The first name of the user. # noqa: E501
:param firstname: The firstname of this UserPropertiesPut. # noqa: E501
:type firstname: str
"""
self._firstname = firstname
@property
def lastname(self):
"""Gets the lastname of this UserPropertiesPut. # noqa: E501
The last name of the user. # noqa: E501
:return: The lastname of this UserPropertiesPut. # noqa: E501
:rtype: str
"""
return self._lastname
@lastname.setter
def lastname(self, lastname):
"""Sets the lastname of this UserPropertiesPut.
The last name of the user. # noqa: E501
:param lastname: The lastname of this UserPropertiesPut. # noqa: E501
:type lastname: str
"""
self._lastname = lastname
@property
def email(self):
"""Gets the email of this UserPropertiesPut. # noqa: E501
The email address of the user. # noqa: E501
:return: The email of this UserPropertiesPut. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this UserPropertiesPut.
The email address of the user. # noqa: E501
:param email: The email of this UserPropertiesPut. # noqa: E501
:type email: str
"""
self._email = email
@property
def password(self):
"""Gets the password of this UserPropertiesPut. # noqa: E501
password of the user # noqa: E501
:return: The password of this UserPropertiesPut. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this UserPropertiesPut.
password of the user # noqa: E501
:param password: The password of this UserPropertiesPut. # noqa: E501
:type password: str
"""
self._password = password
@property
def administrator(self):
"""Gets the administrator of this UserPropertiesPut. # noqa: E501
Indicates if the user has admin rights. # noqa: E501
:return: The administrator of this UserPropertiesPut. # noqa: E501
:rtype: bool
"""
return self._administrator
@administrator.setter
def administrator(self, administrator):
"""Sets the administrator of this UserPropertiesPut.
Indicates if the user has admin rights. # noqa: E501
:param administrator: The administrator of this UserPropertiesPut. # noqa: E501
:type administrator: bool
"""
self._administrator = administrator
@property
def force_sec_auth(self):
"""Gets the force_sec_auth of this UserPropertiesPut. # noqa: E501
Indicates if secure authentication should be forced on the user. # noqa: E501
:return: The force_sec_auth of this UserPropertiesPut. # noqa: E501
:rtype: bool
"""
return self._force_sec_auth
@force_sec_auth.setter
def force_sec_auth(self, force_sec_auth):
"""Sets the force_sec_auth of this UserPropertiesPut.
Indicates if secure authentication should be forced on the user. # noqa: E501
:param force_sec_auth: The force_sec_auth of this UserPropertiesPut. # noqa: E501
:type force_sec_auth: bool
"""
self._force_sec_auth = force_sec_auth
@property
def sec_auth_active(self):
"""Gets the sec_auth_active of this UserPropertiesPut. # noqa: E501
Indicates if secure authentication is active for the user. # noqa: E501
:return: The sec_auth_active of this UserPropertiesPut. # noqa: E501
:rtype: bool
"""
return self._sec_auth_active
@sec_auth_active.setter
def sec_auth_active(self, sec_auth_active):
"""Sets the sec_auth_active of this UserPropertiesPut.
Indicates if secure authentication is active for the user. # noqa: E501
:param sec_auth_active: The sec_auth_active of this UserPropertiesPut. # noqa: E501
:type sec_auth_active: bool
"""
self._sec_auth_active = sec_auth_active
@property
def active(self):
"""Gets the active of this UserPropertiesPut. # noqa: E501
Indicates if the user is active. # noqa: E501
:return: The active of this UserPropertiesPut. # noqa: E501
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this UserPropertiesPut.
Indicates if the user is active. # noqa: E501
:param active: The active of this UserPropertiesPut. # noqa: E501
:type active: bool
"""
self._active = active
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserPropertiesPut):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UserPropertiesPut):
return True
return self.to_dict() != other.to_dict()
| 1.585938 | 2 |
Database/publisher_methods.py | RomaOkorosso/fes-test-task | 0 | 12767207 | # created by RomaOkorosso at 21.03.2021
# publisher_methods.py
from datetime import datetime, timedelta, date
from typing import Optional
from Models.models import Publisher
from Models import schemas
from sqlalchemy.orm import Session
from Database.exceptions import *
from pydantic import ValidationError
class PublisherMethods:
@staticmethod
def create_publisher(db: Session, publisher: schemas.AddPublisher):
try:
publisher = Publisher(**publisher.dict())
except Exception as err:
print(err)
else:
db.add(publisher)
db.commit()
@staticmethod
def get_publisher(db: Session, publisher_id: int):
try:
publisher = db.query(Publisher).filter(Publisher.id == publisher_id).first()
except Exception as err:
print(err)
else:
if publisher is None:
raise ItemNotFound(f"No such publisher with id: {publisher_id} in database")
return publisher
| 2.625 | 3 |
Products/CMFCore/CachingPolicyManager.py | fulv/Products.CMFCore | 0 | 12767208 | ##############################################################################
#
# Copyright (c) 2001 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Caching tool implementation.
"""
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from App.Common import rfc1123_date
from App.special_dtml import DTMLFile
from DateTime.DateTime import DateTime
from OFS.Cache import ZCM_MANAGERS
from OFS.Cache import Cache
from OFS.Cache import CacheManager
from OFS.Cache import getVerifiedManagerIds
from OFS.interfaces import IObjectWillBeMovedEvent
from OFS.SimpleItem import SimpleItem
from Persistence import PersistentMapping
from Products.PageTemplates.Expressions import SecureModuleImporter
from Products.PageTemplates.Expressions import getEngine
from zope.component import getUtility
from zope.container.interfaces import IObjectMovedEvent
from zope.interface import implementer
from .Expression import Expression
from .interfaces import ICachingPolicy
from .interfaces import ICachingPolicyManager
from .interfaces import IMembershipTool
from .permissions import ManagePortal
from .permissions import View
from .utils import _dtmldir
from .utils import _setCacheHeaders
from .utils import _ViewEmulator
from .utils import registerToolInterface
# This is lame :(
# This listing is used to decide whether to wrap an object inside a "fake view"
# for the OFS.Cache caching. If it is a view type, no fake view wrap is needed.
VIEW_METATYPES = ('Page Template', 'DTML Method', 'DTML Document',
'Filesystem DTML Method', 'Filesystem Page Template')
def createCPContext(content, view_method, keywords, time=None):
"""
Construct an expression context for TALES expressions,
for use by CachingPolicy objects.
"""
mtool = getUtility(IMembershipTool)
if mtool.isAnonymousUser():
member = None
else:
member = mtool.getAuthenticatedMember()
if time is None:
time = DateTime()
# The name "content" is deprecated and will go away in CMF 2.0,
# please use "object" in your policy
data = {'content': content,
'object': content,
'view': view_method,
'keywords': keywords,
'request': getattr(content, 'REQUEST', {}),
'member': member,
'modules': SecureModuleImporter,
'nothing': None,
'time': time}
return getEngine().getContext(data)
class CPMCache(Cache):
""" Simple OFS.Cache-implementation
"""
security = ClassSecurityInfo()
@security.private
def ZCache_invalidate(self, ob):
""" An object is forced out of the cache
This implementation stores nothing and does not attempt to
communicate with cache servers, so this is a no-op.
"""
pass
@security.private
def ZCache_get(self, ob, view_name, keywords, mtime_func, default):
""" An object is retrieved from the cache
This implementation stores nothing - a no-op.
"""
pass
@security.private
def ZCache_set(self, ob, data, view_name, keywords, mtime_func):
""" An object is pushed into the cache
Even though this cache implementation does not cache anything per se,
this method is used as a suitable hook to activate the real heavy
lifting done by the CachePolicyManager.
"""
if ob.meta_type not in VIEW_METATYPES:
ob = _ViewEmulator().__of__(ob)
return _setCacheHeaders(ob, extra_context={})
InitializeClass(CPMCache)
@implementer(ICachingPolicy)
class CachingPolicy:
"""
Represent a single class of cachable objects:
- class membership is defined by 'predicate', a TALES expression
with access to the following top-level names:
'object' -- the object itself
'view' -- the name of the view method
'keywords' -- keywords passed to the request
'request' -- the REQUEST object itself
'member' -- the authenticated member, or None if anonymous
'modules' -- usual TALES access-with-import
'nothing' -- None
'time' -- A DateTime object for the current date and time
- mtime_func is used to set the "Last-modified" HTTP response
header, which is another TALES expression evaluated
against the same namespace. If not specified explicitly,
uses 'object/modified'. mtime_func is also used in responding
to conditional GETs.
- The "Expires" HTTP response header and the "max-age" token of
the "Cache-control" header will be set using 'max_age_secs',
if passed; it should be an integer value in seconds.
- The "s-maxage" token of the "Cache-control" header will be
set using 's_max_age_secs', if passed; it should be an integer
value in seconds.
- The "Vary" HTTP response headers will be set if a value is
provided. The Vary header is described in RFC 2616. In essence,
it instructs caches that respect this header (such as Squid
after version 2.4) to distinguish between requests not just by
the request URL, but also by values found in the headers showing
in the Vary tag. "Vary: Cookie" would force Squid to also take
Cookie headers into account when deciding what cached object to
choose and serve in response to a request.
- The "ETag" HTTP response header will be set if a value is
provided. The value is a TALES expression and the result
after evaluation will be used as the ETag header value.
- Other tokens will be added to the "Cache-control" HTTP response
header as follows:
'no_cache=1' argument => "no-cache" token
'no_store=1' argument => "no-store" token
'must_revalidate=1' argument => "must-revalidate" token
'proxy_revalidate=1' argument => "proxy-revalidate" token
'public=1' argument => "public" token
'private=1' argument => "private" token
'no_transform=1' argument => "no-transform" token
- The last_modified argument is used to determine whether to add a
Last-Modified header. last_modified=1 by default. There appears
to be a bug in IE 6 (and possibly other versions) that uses the
Last-Modified header plus some heuristics rather than the other
explicit caching headers to determine whether to render content
from the cache. If you set, say, max-age=0, must-revalidate and
have a Last-Modified header some time in the past, IE will
recognize that the page in cache is stale and will request an
update from the server BUT if you have a Last-Modified header
with an older date, will then ignore the update and render from
the cache, so you may want to disable the Last-Modified header
when controlling caching using Cache-Control headers.
- The pre-check and post-check Cache-Control tokens are Microsoft
proprietary tokens added to IE 5+. Documentation can be found
here: http://msdn.microsoft.com/workshop/author/perf/perftips.asp
Unfortunately these are needed to make IE behave correctly.
"""
def __init__(self,
policy_id,
predicate='',
mtime_func='',
max_age_secs=None,
no_cache=0,
no_store=0,
must_revalidate=0,
vary='',
etag_func='',
s_max_age_secs=None,
proxy_revalidate=0,
public=0,
private=0,
no_transform=0,
enable_304s=0,
last_modified=1,
pre_check=None,
post_check=None):
if not predicate:
predicate = 'python:1'
if not mtime_func:
mtime_func = 'object/modified'
if max_age_secs is not None:
if str(max_age_secs).strip() == '':
max_age_secs = None
else:
max_age_secs = int(max_age_secs)
if s_max_age_secs is not None:
if str(s_max_age_secs).strip() == '':
s_max_age_secs = None
else:
s_max_age_secs = int(s_max_age_secs)
if pre_check is not None:
if str(pre_check).strip() == '':
pre_check = None
else:
pre_check = int(pre_check)
if post_check is not None:
if str(post_check).strip() == '':
post_check = None
else:
post_check = int(post_check)
self._policy_id = policy_id
self._predicate = Expression(text=predicate)
self._mtime_func = Expression(text=mtime_func)
self._max_age_secs = max_age_secs
self._s_max_age_secs = s_max_age_secs
self._no_cache = int(no_cache)
self._no_store = int(no_store)
self._must_revalidate = int(must_revalidate)
self._proxy_revalidate = int(proxy_revalidate)
self._public = int(public)
self._private = int(private)
self._no_transform = int(no_transform)
self._vary = vary
self._etag_func = Expression(text=etag_func)
self._enable_304s = int(enable_304s)
self._last_modified = int(last_modified)
self._pre_check = pre_check
self._post_check = post_check
def getPolicyId(self):
"""
"""
return self._policy_id
def getPredicate(self):
"""
"""
return self._predicate.text
def getMTimeFunc(self):
"""
"""
return self._mtime_func.text
def getMaxAgeSecs(self):
"""
"""
return self._max_age_secs
def getSMaxAgeSecs(self):
"""
"""
return getattr(self, '_s_max_age_secs', None)
def getNoCache(self):
"""
"""
return self._no_cache
def getNoStore(self):
"""
"""
return self._no_store
def getMustRevalidate(self):
"""
"""
return self._must_revalidate
def getProxyRevalidate(self):
"""
"""
return getattr(self, '_proxy_revalidate', 0)
def getPublic(self):
"""
"""
return getattr(self, '_public', 0)
def getPrivate(self):
"""
"""
return getattr(self, '_private', 0)
def getNoTransform(self):
"""
"""
return getattr(self, '_no_transform', 0)
def getVary(self):
"""
"""
return getattr(self, '_vary', '')
def getETagFunc(self):
"""
"""
etag_func_text = ''
etag_func = getattr(self, '_etag_func', None)
if etag_func is not None:
etag_func_text = etag_func.text
return etag_func_text
def getEnable304s(self):
"""
"""
return getattr(self, '_enable_304s', 0)
def getLastModified(self):
"""Should we set the last modified header?"""
return getattr(self, '_last_modified', 1)
def getPreCheck(self):
"""
"""
return getattr(self, '_pre_check', None)
def getPostCheck(self):
"""
"""
return getattr(self, '_post_check', None)
def testPredicate(self, expr_context):
""" Does this request match our predicate?"""
return self._predicate(expr_context)
def getHeaders(self, expr_context):
"""
Does this request match our predicate? If so, return a
sequence of caching headers as ( key, value ) tuples.
Otherwise, return an empty sequence.
"""
headers = []
if self.testPredicate(expr_context):
if self.getLastModified():
mtime = self._mtime_func(expr_context)
if isinstance(mtime, str):
mtime = DateTime(mtime)
if mtime is not None:
mtime_str = rfc1123_date(mtime.timeTime())
headers.append(('Last-modified', mtime_str))
control = []
if self.getMaxAgeSecs() is not None:
now = expr_context.vars['time']
exp_time_str = rfc1123_date(now.timeTime()
+ self._max_age_secs)
headers.append(('Expires', exp_time_str))
control.append('max-age=%d' % self._max_age_secs)
if self.getSMaxAgeSecs() is not None:
control.append('s-maxage=%d' % self._s_max_age_secs)
if self.getNoCache():
control.append('no-cache')
# The following is for HTTP 1.0 clients
headers.append(('Pragma', 'no-cache'))
if self.getNoStore():
control.append('no-store')
if self.getPublic():
control.append('public')
if self.getPrivate():
control.append('private')
if self.getMustRevalidate():
control.append('must-revalidate')
if self.getProxyRevalidate():
control.append('proxy-revalidate')
if self.getNoTransform():
control.append('no-transform')
pre_check = self.getPreCheck()
if pre_check is not None:
control.append('pre-check=%d' % pre_check)
post_check = self.getPostCheck()
if post_check is not None:
control.append('post-check=%d' % post_check)
if control:
headers.append(('Cache-control', ', '.join(control)))
if self.getVary():
headers.append(('Vary', self._vary))
if self.getETagFunc():
headers.append(('ETag', self._etag_func(expr_context)))
return headers
@implementer(ICachingPolicyManager)
class CachingPolicyManager(SimpleItem, CacheManager):
"""
Manage the set of CachingPolicy objects for the site; dispatch
to them from skin methods.
"""
id = 'caching_policy_manager'
meta_type = 'CMF Caching Policy Manager'
zmi_icon = 'fa fa-rocket'
_isCacheManager = 1 # Dead chicken. Yum.
security = ClassSecurityInfo()
def __init__(self):
self._policy_ids = ()
self._policies = PersistentMapping()
#
# ZMI
#
manage_options = (
({'label': 'Policies', 'action': 'manage_cachingPolicies',
'help': ('CMFCore', 'CPMPolicies.stx')},) +
CacheManager.manage_options +
SimpleItem.manage_options)
security.declareProtected(ManagePortal, # NOQA: flake8: D001
'manage_cachingPolicies')
manage_cachingPolicies = DTMLFile('cachingPolicies', _dtmldir)
@security.public
def listPolicies(self):
"""List '(id, (policy, typeObjectName))' tuples for all policies.
"""
return tuple([(id, self._policies[id]) for id in self._policy_ids])
@security.protected(ManagePortal)
def addPolicy(self,
policy_id,
predicate, # TALES expr (def. 'python:1')
mtime_func, # TALES expr (def. 'object/modified')
max_age_secs, # integer, seconds (def. 0)
no_cache, # boolean (def. 0)
no_store, # boolean (def. 0)
must_revalidate, # boolean (def. 0)
vary, # string value
etag_func, # TALES expr (def. '')
REQUEST=None,
s_max_age_secs=None, # integer, seconds (def. None)
proxy_revalidate=0, # boolean (def. 0)
public=0, # boolean (def. 0)
private=0, # boolean (def. 0)
no_transform=0, # boolean (def. 0)
enable_304s=0, # boolean (def. 0)
last_modified=1, # boolean (def. 1)
pre_check=None, # integer, default None
post_check=None): # integer, default None
"""
Add a caching policy.
"""
if max_age_secs is None or str(max_age_secs).strip() == '':
max_age_secs = None
else:
max_age_secs = int(max_age_secs)
if s_max_age_secs is None or str(s_max_age_secs).strip() == '':
s_max_age_secs = None
else:
s_max_age_secs = int(s_max_age_secs)
if pre_check is None or str(pre_check).strip() == '':
pre_check = None
else:
pre_check = int(pre_check)
if post_check is None or str(post_check).strip() == '':
post_check = None
else:
post_check = int(post_check)
self._addPolicy(policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs,
proxy_revalidate,
public,
private,
no_transform,
enable_304s,
last_modified,
pre_check,
post_check)
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()
+ '/manage_cachingPolicies'
+ '?manage_tabs_message='
+ 'Policy+added.')
@security.protected(ManagePortal)
def updatePolicy(self,
policy_id,
predicate, # TALES expr (def. 'python:1')
mtime_func, # TALES expr (def. 'object/modified')
max_age_secs, # integer, seconds (def. 0)
no_cache, # boolean (def. 0)
no_store, # boolean (def. 0)
must_revalidate, # boolean (def. 0)
vary, # string value
etag_func, # TALES expr (def. '')
REQUEST=None,
s_max_age_secs=None, # integer, seconds (def. 0)
proxy_revalidate=0, # boolean (def. 0)
public=0, # boolean (def. 0)
private=0, # boolean (def. 0)
no_transform=0, # boolean (def. 0)
enable_304s=0, # boolean (def. 0)
last_modified=1, # boolean (def. 1)
pre_check=0, # integer, default=None
post_check=0): # integer, default=None
"""
Update a caching policy.
"""
if max_age_secs is None or str(max_age_secs).strip() == '':
max_age_secs = None
else:
max_age_secs = int(max_age_secs)
if s_max_age_secs is None or str(s_max_age_secs).strip() == '':
s_max_age_secs = None
else:
s_max_age_secs = int(s_max_age_secs)
if pre_check is None or str(pre_check).strip() == '':
pre_check = None
else:
pre_check = int(pre_check)
if post_check is None or str(post_check).strip() == '':
post_check = None
else:
post_check = int(post_check)
self._updatePolicy(policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs,
proxy_revalidate,
public,
private,
no_transform,
enable_304s,
last_modified,
pre_check,
post_check)
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()
+ '/manage_cachingPolicies'
+ '?manage_tabs_message='
+ 'Policy+updated.')
@security.protected(ManagePortal)
def movePolicyUp(self, policy_id, REQUEST=None):
"""
Move a caching policy up in the list.
"""
policy_ids = list(self._policy_ids)
ndx = policy_ids.index(policy_id)
if ndx == 0:
msg = 'Policy+already+first.'
else:
self._reorderPolicy(policy_id, ndx - 1)
msg = 'Policy+moved.'
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()
+ '/manage_cachingPolicies'
+ '?manage_tabs_message=%s' % msg)
@security.protected(ManagePortal)
def movePolicyDown(self, policy_id, REQUEST=None):
"""
Move a caching policy down in the list.
"""
policy_ids = list(self._policy_ids)
ndx = policy_ids.index(policy_id)
if ndx == len(policy_ids) - 1:
msg = 'Policy+already+last.'
else:
self._reorderPolicy(policy_id, ndx + 1)
msg = 'Policy+moved.'
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()
+ '/manage_cachingPolicies'
+ '?manage_tabs_message=%s' % msg)
@security.protected(ManagePortal)
def removePolicy(self, policy_id, REQUEST=None):
"""
Remove a caching policy.
"""
self._removePolicy(policy_id)
if REQUEST is not None:
pth = '/manage_cachingPolicies?manage_tabs_message=Policy+removed.'
REQUEST['RESPONSE'].redirect('%s%s' % (self.absolute_url(), pth))
#
# Policy manipulation methods.
#
@security.private
def _addPolicy(self,
policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs=None,
proxy_revalidate=0,
public=0,
private=0,
no_transform=0,
enable_304s=0,
last_modified=1,
pre_check=None,
post_check=None):
"""
Add a policy to our registry.
"""
policy_id = str(policy_id).strip()
if not policy_id:
raise ValueError('Policy ID is required!')
if policy_id in self._policy_ids:
raise KeyError('Policy %s already exists!' % policy_id)
self._policies[policy_id] = CachingPolicy(policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs,
proxy_revalidate,
public,
private,
no_transform,
enable_304s,
last_modified,
pre_check,
post_check)
idlist = list(self._policy_ids)
idlist.append(policy_id)
self._policy_ids = tuple(idlist)
@security.private
def _updatePolicy(self,
policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs=None,
proxy_revalidate=0,
public=0,
private=0,
no_transform=0,
enable_304s=0,
last_modified=1,
pre_check=None,
post_check=None):
"""
Update a policy in our registry.
"""
if policy_id not in self._policy_ids:
raise KeyError('Policy %s does not exist!' % policy_id)
self._policies[policy_id] = CachingPolicy(policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs,
proxy_revalidate,
public,
private,
no_transform,
enable_304s,
last_modified,
pre_check,
post_check)
@security.private
def _reorderPolicy(self, policy_id, newIndex):
"""
Reorder a policy in our registry.
"""
if policy_id not in self._policy_ids:
raise KeyError('Policy %s does not exist!' % policy_id)
idlist = list(self._policy_ids)
ndx = idlist.index(policy_id)
pred = idlist[ndx]
idlist = idlist[:ndx] + idlist[ndx + 1:]
idlist.insert(newIndex, pred)
self._policy_ids = tuple(idlist)
@security.private
def _removePolicy(self, policy_id):
"""
Remove a policy from our registry.
"""
if policy_id not in self._policy_ids:
raise KeyError('Policy %s does not exist!' % policy_id)
del self._policies[policy_id]
idlist = list(self._policy_ids)
ndx = idlist.index(policy_id)
idlist = idlist[:ndx] + idlist[ndx + 1:]
self._policy_ids = tuple(idlist)
#
# 'portal_caching' interface methods
#
@security.protected(View)
def getHTTPCachingHeaders(self, content, view_method, keywords, time=None):
"""
Return a list of HTTP caching headers based on 'content',
'view_method', and 'keywords'.
"""
context = createCPContext(content, view_method, keywords, time=time)
for _policy_id, policy in self.listPolicies():
headers = policy.getHeaders(context)
if headers:
return headers
return ()
@security.protected(View)
def getModTimeAndETag(self, content, view_method, keywords, time=None):
""" Return the modification time and ETag for the content object,
view method, and keywords as the tuple (modification_time, etag,
set_last_modified_header), where modification_time is a DateTime,
or None.
"""
context = createCPContext(content, view_method, keywords, time=time)
for _policy_id, policy in self.listPolicies():
if policy.getEnable304s() and policy.testPredicate(context):
last_modified = policy._mtime_func(context)
if isinstance(last_modified, str):
last_modified = DateTime(last_modified)
content_etag = None
if policy.getETagFunc():
content_etag = policy._etag_func(context)
return (last_modified, content_etag, policy.getLastModified())
return None
#
# OFS.CacheManager API
#
@security.private
def ZCacheManager_getCache(self):
""" Retrieve a cache object
"""
cache = getattr(self, '_cache', None)
if cache is None:
self._cache = CPMCache()
cache = self._cache
return cache
InitializeClass(CachingPolicyManager)
registerToolInterface('caching_policy_manager', ICachingPolicyManager)
def handleCachingPolicyManagerEvent(ob, event):
""" Event subscriber for (un)registering a CPM as CacheManager
"""
if not ICachingPolicyManager.providedBy(ob):
return
if IObjectMovedEvent.providedBy(event):
if event.newParent is not None:
ids = getVerifiedManagerIds(event.newParent)
id = ob.getId()
if id not in ids:
setattr(event.newParent, ZCM_MANAGERS, ids + (id,))
elif IObjectWillBeMovedEvent.providedBy(event):
if event.oldParent is not None:
ids = list(getVerifiedManagerIds(event.oldParent))
id = ob.getId()
if id in ids:
ids.remove(id)
setattr(event.oldParent, ZCM_MANAGERS, tuple(ids))
def manage_addCachingPolicyManager(self, REQUEST=None):
"""
Add a CPM to self.
"""
id = CachingPolicyManager.id
mgr = CachingPolicyManager()
self._setObject(id, mgr)
if REQUEST is not None:
pth = '/manage_main?manage_tabs_message=Caching+Policy+Manager+added.'
REQUEST['RESPONSE'].redirect('%s%s' % (self.absolute_url(), pth))
| 1.484375 | 1 |
utils/test_analysis.py | ShenLeixian/data2vis | 103 | 12767209 | import os
import json
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
# t_stat, p_val = stats.ttest_ind(sample1, sample2, equal_var=False)
test_result_dir = "utils/testresults"
all_results = {}
aggregate_terms = [
"count", "valid", "missing", "distinct", "sum", "mean", "average",
"variance", "variancep", "stdev", "stdevp", "stderr", "median", "q1", "q3",
"ci0", "ci1", "min", "max", "argmin", "argmax"
]
file_paths = [
"/vizmodeluninat5.json", "/vizmodeluninat10.json",
"/vizmodeluninat15.json", "/vizmodeluninat20.json", "/vizmodeluni5.json",
"/vizmodeluni10.json", "/vizmodeluni15.json", "/vizmodeluni20.json",
"/vizmodelbi5.json", "/vizmodelbi10.json", "/vizmodelbi15.json",
"/vizmodelbi20.json"
]
def analyze_test_suite(test_dataset_directory):
# for subdir, dirs, files in os.walk(test_dataset_directory):
# for file in files:
# filepath = subdir + os.sep + file
# if filepath.endswith(
# "json") and not filepath.endswith("lsit.json"):
for filepath in file_paths:
filepath = test_result_dir + filepath
# data = json.load(open(filepath))
# print(filepath)
analyze_data(filepath)
def is_valid_aggregate(agg_val):
if (agg_val not in aggregate_terms):
# print("issh", agg_val)
return False
else:
return True
def computer_anova():
print("anova")
def analyze_data(filepath):
data = json.load(open(filepath))
beam_width = data["beamwidth"]
valid_json_array = []
valid_vega_array = []
phantom_count_array = []
x = list(range(0, 100))
for row in data["data"]:
valid_json_count = row["validjsoncount"] / beam_width
valid_json_array.append(valid_json_count)
valid_vega_count = row["validvegacount"]
vs_array = row["vegaspecarray"]
# mark specs with incorrect aggregation value as invalid vega
for vs_row in vs_array:
if ("aggregate" in vs_row["encoding"]["y"]):
if not is_valid_aggregate(
vs_row["encoding"]["y"]["aggregate"]):
valid_vega_count -= 1
else:
if ("aggregate" in vs_row["encoding"]["x"]):
if not is_valid_aggregate(
vs_row["encoding"]["x"]["aggregate"]):
valid_vega_count -= 1
# print(valid_vega_count, row["validjsoncount"])
valid_vegap_count = valid_vega_count
valid_vega_count = valid_vega_count / beam_width
valid_vega_array.append(valid_vega_count)
if (valid_vega_count == 0):
phantom_count = 0
else:
phantom_count = row["phantomcount"] / valid_vegap_count
phantom_count_array.append(phantom_count)
# print("Count", row["phantomcount"], valid_vegap_count)
# print(x, valid_json_array)
# plt.plot(x, valid_json_array)
# plt.plot(x, valid_vega_array)
# plt.plot(x, phantom_count_array)
# plt.show()
print(
filepath.split("vizmodel")[1], "Json:",
round(np.mean(valid_json_array), 3), "Vega",
round(np.mean(valid_vega_array), 3), "Mean % Phantom",
round(np.mean(phantom_count_array), 3))
result = {"json:": valid_json_array, "vega": valid_vega_array}
analyze_test_suite(test_result_dir)
# data = json.load(open("utils/testresults/vizmodelbi15.json"))
# print(len(data["data"]))
# analyze_data("utils/testresults/vizmodeluninat15.json")
| 2.3125 | 2 |
cgi-bin/pay_order.py | metallhead01/CREATE_ORDER_HTTP_SERVER | 0 | 12767210 | <filename>cgi-bin/pay_order.py<gh_stars>0
#!C:\Users\Rustam\AppData\Local\Programs\Python\Python36\python.exe
import time
import urllib3
import requests
import xml.etree.ElementTree as ET
print("Content-type: text/html;charset=utf8\r\n\r\n")
print("<html>")
#print("<head>Something</head>")
print("<body>")
def pay_order(i, p, user_name, pass_word):
result = ""
session = requests.session()
ip_string = 'https://' + i + ":" + p + '/rk7api/v0/xmlinterface.xml'
xml_request_string = ('<?xml version="1.0" encoding="UTF-8"?><RK7Query>'
'<RK7CMD CMD="GetOrderList" onlyOpened = "1"/></RK7Query>')
xml_unicode_request_string = xml_request_string.encode('utf-8')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
response_request_order = session.request(method='POST', url=ip_string,
data=xml_unicode_request_string,
auth=(user_name, pass_word), verify=False)
response_request_order.encoding = 'UTF-8'
# Распарсим полученый ответ для того, чтобы получить GUID только что созданного заказа.
parsed_guid_nodes = ET.fromstring(response_request_order.content)
'''Перебираем все ноды "Item" в прямой дочерней ноде "Dishes"'''
parsed_guid_order = parsed_guid_nodes.attrib
# Проверяем возможность создания заказа - если статус что-нибудь, кроме "Ок" кидаем исключение.
if parsed_guid_order.get('Status') != "Ok":
result = parsed_guid_order.get('ErrorText')
guid = ''
to_pay = ''
for item in parsed_guid_nodes.findall("./Visit/Orders/Order"):
attr_of_item_node = (item.attrib)
guid = str(attr_of_item_node.get('guid'))
to_pay = str(attr_of_item_node.get('ToPaySum'))
# время ожидания перед оплатой
#time.sleep(int(pay_time))
xml_pay_string = ('<RK7Query><RK7CMD CMD="PayOrder"><Order guid="' + guid + '"/>'
'<Cashier code="9999"/><Station code="1"/><Payment id="1" amount="' +
to_pay + '"/></RK7CMD></RK7Query>')
xml_pay_string = xml_pay_string.encode('utf-8')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
response_pay_order = session.request(method='POST', url=ip_string, data=xml_pay_string,
auth=(user_name, pass_word), verify=False)
parsed_pay_nodes = ET.fromstring(response_pay_order.content)
parsed_pay_ok = parsed_pay_nodes.attrib
response_pay_order.encoding = 'UTF-8'
if parsed_pay_ok.get('Status') != "Ok":
result = str(parsed_pay_ok.get('ErrorText'))
else:
result = "Order " + guid + " has been successfully paid."
return result
call_func = pay_order("127.0.0.1", "4545", "Admin", "1")
print("<h2>" + call_func + "</h2>")
print("</body>")
print("</html>")
| 2.75 | 3 |
xnat_dashboards/app/authentication/controllers.py | XNAT-Dashboards/XNAT-Dashboards | 0 | 12767211 | # Import flask dependencies
from flask import Blueprint, render_template, session, request, redirect,\
url_for
from xnat_dashboards.app.authentication import model
# Define the blueprint: 'auth', set its url prefix: app.url/auth
auth = Blueprint('auth', __name__, url_prefix='/auth')
# Set the route and accepted methods
@auth.route('/login/', methods=['GET', 'POST'])
def login():
"""
This is the login route. Uses authentication model for checking
the user details.
User Exist function checks whether user exist on the XNAT instance.
If user exist we proceed further.
Then this checks user roles whether user role is assigned, If user
roles isn't assigned we set it as guest.
Returns:
route: It routes to dashboard if user details are correct
else reloads the page
"""
servers_list = model.login_urls()
if request.method == 'GET':
# Checks if there is a error key in session
if 'error' in session:
if session['error'] == -1:
display_error = "Logged out"
del session['error']
else:
display_error = session['error']
del session['error']
return render_template(
'authentication/login.html',
error=display_error,
servers_list=servers_list)
else:
# If there is no error meaning the user is called login
# page using browser instead of a redirect
return render_template(
'authentication/login.html', servers_list=servers_list)
else:
# Fetch details from form
user_details = request.form
username = user_details['username']
password = <PASSWORD>_details['password']
server_name = user_details['server']
for server in servers_list:
if server_name == server['name']:
server_url = server['url']
ssl = server['verify']
break
# Check from API whether user exist in the XNAT instance
exists = model.user_exists(username, password, server_url, ssl)
if type(exists) == int:
# If exist check whether the XNAT instance is same
config = model.user_role_config(username)
# If same xnat instance check role assign to user
if config:
# If no role assigned to a user then guest is set
# as default role
if username in config['user roles']:
session['role_exist'] = config['user roles'][username]
else:
session['role_exist'] = 'guest'
# Add data to session
session['username'] = username
session['server'] = server_url
session['project_visible'] = config['project_visible']
# Redirect to dashboard
return redirect(url_for('dashboard.stats'))
else:
# User is forbiden to login
session['error'] = "User role assigned is "
"forbidden login not allowed"
return redirect(url_for('auth.login'))
else:
# Wrong password or username
session['error'] = "Wrong Password or Username"
return redirect(url_for('auth.login'))
| 2.828125 | 3 |
suddendev/game/game.py | SuddenDevs/SuddenDev | 2 | 12767212 | <filename>suddendev/game/game.py
#!/usr/bin/python3.5
from .vector import Vector
from .color import Color3, random_color3, from_hex
from .player import Player
from .enemy import Enemy
from .pickup import Pickup, PickupType
from .enemy_type import EnemyType
from .wall import Wall
from .core import Core
from .event import Event, EventType, GameOverType
from .game_config import GameConfig
import time
import random
import math
def random_pos_edge(size, width, height):
n = random.getrandbits(2)
pos = Vector(0, 0)
r = random.random()
if n == 0:
pos = Vector(-size, r * height)
elif n == 1:
pos = Vector(width + size, r * height)
elif n == 2:
pos = Vector(r * width, -size)
else:
pos = Vector(r * width, height + size)
return pos
class Map:
def __init__(self, width, height):
random.seed(time.time())
self.width = width
self.height = height
class Game:
def __init__(self, wave, player_names, scripts, player_ids, colors):
self.walls = []
self.events = []
self.enemies = []
self.pickups = []
self.stats = {}
self.wave = wave
self.gc = GameConfig(wave)
self.enemy_spawn_timer = self.gc.ENEMY_SPAWN_DELAY
self.pickup_spawn_timer = self.gc.POW_SPAWN_DELAY
self.pickup_count = 0
self.time = 0
self.active = True
#Map
self.map = Map(self.gc.MAP_WIDTH, self.gc.MAP_HEIGHT)
#Core
self.core = Core()
self.core.pos = Vector(self.map.width/2, self.map.height/2)
self.core.health_max = self.gc.CORE_HEALTH
self.core.health = self.core.health_max
#Players
self.init_players(player_names, scripts, player_ids, colors)
self.events_add(Event(EventType.GAME_START))
def init_players(self, player_names, scripts, player_ids, colors):
player_count = len(player_names)
self.players = []
for i in range(player_count):
name = player_names[i]
script = scripts[i]
player_id = player_ids[i]
color = from_hex(colors[i])
angle = i * 2 * math.pi / player_count - math.pi/2
player = Player(name, color, self, script, player_id)
player.pos = self.get_random_spawn(player.size)
player.pos = self.core.pos\
+ Vector(math.cos(angle), math.sin(angle))\
* (self.core.size + player.size * 2)
self.init_player_stats(player_id)
self.players.append(player)
def init_player_stats(self, player_id):
self.stats[player_id] = dict()
self.stats[player_id]['kills'] = 0
def events_add(self, event):
self.events.append(event)
def events_flush(self):
del self.events[:]
#### Main Loop ####
def tick(self, delta):
#Timekeeping
self.time += delta
self.enemy_spawn_timer -= delta
self.pickup_spawn_timer -= delta
# Update entities
self.update_players(delta)
self.update_enemies(delta)
self.spawn_pickups()
self.spawn_enemies()
#Ending Conditions / Wave Conditions
result = self.check_if_game_over()
if result is not None:
self.active = False
self.events_add(Event(EventType.GAME_END, result))
# TODO: nicer way of seeing if the wave was cleared
self.cleared = result == GameOverType.WIN
def check_if_game_over(self):
if len(self.enemies) == 0 and not self.gc.enemy_types:
return GameOverType.WIN
elif self.core.health <= 0:
return GameOverType.LOSE_CORE
elif len(self.players) == 0:
return GameOverType.LOSE_PLAYERS
elif self.time >= self.gc.TIME_LIMIT:
return GameOverType.LOSE_TIMEOUT
else:
return None
def update_players(self, delta):
#Update Players
for p in self.players:
if p.health <= 0:
self.players.remove(p)
self.events_add(Event(EventType.PLAYER_DEATH, p))
break
pos = self.clamp_pos(p.update(delta))
if not self.collides_with_walls(pos, p.size):
p.pos = pos
# Pickup pickups
for pu in self.pickups:
if pu.intersects(p):
self.events_add(Event(EventType.PICKUP_USED, pu))
pu.pickup(p)
self.pickups.remove(pu)
def update_enemies(self, delta):
#Update Enemies
for e in self.enemies:
if e.health <= 0:
self.enemies.remove(e)
self.events_add(Event(EventType.ENEMY_DEATH, e))
if e.is_boss:
# TODO make less ugly
# Temporarily fake that we're in a lower wave to spawn non-boss enemies
temp_gc = self.gc
self.gc = GameConfig(self.wave - 1)
self.wave -= 1
types = [EnemyType.CORE_KILLER, EnemyType.PLAYER_KILLER]
for i in range(self.gc.BOSS_MINION_NUM):
position = Vector(e.pos.x, e.pos.y)
position.x += random.randint(-e.size * 2, e.size * 2)
position.y += random.randint(-e.size * 2, e.size * 2)
self.gc.enemy_types.append(random.choice(types))
e = self.spawn_enemy(position)
e.speed_max = 40
e.damage = 1
e.attack_delay = 15
e.size = 5
e.health_max = 20
e.health = 20
self.wave += 1
self.gc = temp_gc
else:
pos = self.clamp_pos(e.update(delta))
if not self.collides_with_walls(pos, e.size):
e.pos = pos
def spawn_pickups(self):
# pickupTypes = [PickupType.AMMO, PickupType.HEALTH]
pickupTypes = [pickup for _, pickup in PickupType.__members__.items()]
#Pickup Spawning
if (self.pickup_spawn_timer <= 0
and self.pickup_count < self.gc.POW_LIMIT
and random.random() < self.gc.POW_SPAWN_PROBABILITY):
pu = Pickup(self.get_random_spawn(self.gc.POW_SIZE), random.choice(pickupTypes))
self.pickups.append(pu)
self.pickup_count += 1
self.events_add(Event(EventType.PICKUP_SPAWN, pu))
def spawn_enemy(self, position=None):
enemy_type=random.choice(self.gc.enemy_types)
self.gc.enemy_types.remove(enemy_type)
enemy = Enemy(self, enemy_type=enemy_type)
if position is None:
enemy.pos = random_pos_edge(enemy.size,
self.map.width, self.map.height)
else:
enemy.pos = position
self.enemies.append(enemy)
self.events_add(Event(EventType.ENEMY_SPAWN, enemy))
return enemy
def spawn_enemies(self):
#Enemy Spawning
if (self.enemy_spawn_timer <= 0
and self.gc.enemy_types
and random.random() < self.gc.ENEMY_SPAWN_PROBABILITY):
self.spawn_enemy()
def clamp_pos(self, pos):
if pos.x < 0:
pos.x = 0
if pos.y < 0:
pos.y = 0
if pos.x > self.map.width:
pos.x = self.map.width
if pos.y > self.map.height:
pos.y = self.map.height
return pos
def collides_with_walls(self, center, size):
for w in self.walls:
if w.intersects(center, size):
return True
return False
def find_by_tag(self, tag):
for e in self.enemies:
if e.tag == tag:
return e
for e in self.players:
if e.tag == tag:
return e
for e in self.pickups:
if e.tag == tag:
return e
if self.core.tag == tag:
return self.core
return None
def get_random_spawn(self, size):
""" Generates a random position that does not collide with any walls. """
cond = True
while cond:
pos = Vector(random.random()*self.map.width,
random.random()*self.map.height)
cond = self.collides_with_walls(pos, size)
return pos
def was_cleared(self):
return self.cleared
def get_map_width(self):
return self.map.width
def get_map_height(self):
return self.map.height
| 2.484375 | 2 |
safekit/models/tiered_lm.py | duebukua/safekit | 117 | 12767213 | <filename>safekit/models/tiered_lm.py<gh_stars>100-1000
#!/usr/bin/env python
"""
This is a two tiered language model for anomaly detection, where the second tier LSTM (log line level)
takes the concatenation of the average sentence vector and final hidden state
from the lower tier (token level) LSTM as input, creating a new context vector and hidden state
for the given user.
Example Command for running a model configuration
-------------------------------------------------
**Raw (character token) tiered model** (The jagged parameter lets the model know there are variable length sequences) ::
python safekit/models/tiered_lm.py results/ safekit/features/specs/lm/lanl_char_config.json data_examples/lanl/lm_feats/raw_day_split/ -test -skipsos -jagged
.. Note ::
The output results will be printed to /tmp/lanl_result/ and then moved to results/ upon completion
to avoid experiment slowdown of constant network traffic.
File name convention:
---------------------
- em: embedding size for token embedding
- ns: number of loglines per user per mini-batch for trunctated back propagation through time
- mb: Minibatch size (mini-batch over users)
- lr: learnrate (step size for gradient descent)
- cl: context layers (number of hidden layers for top level (log line level) context rnn)
- lml: language model layers (number of hidden layers for the bottom level, token level, rnn)
- rs: random seed for reproducible results
stdout
------
For each mini-batch the following is printed to standard output ::
batchsize line_number second status filename index current_loss
Where:
- batchsize: The size of the mini-batch
- line_number: Line number from original auth.txt file (may be off by 1)
- second: The second of the first event in the mini-batch
- status: Whether the model is updating or merely forward propagating
- filename: The current file being processed
- index: The number of samples processed to date
- current_loss: The average loss over the mini-batch
File output
-----------
::
batch_num line second day user red loss
Where:
- batch_num: The mini-batch this event was a part of
- line: Line number from original auth.txt file (may be off by 1)
- second: The second which the event occurred on
- day: The day the event occurred on
- user: The user who performed the event
- red: Whether this event was a labeled red team activity (1 for red team activity 0 otherwise)
- loss: The anomaly score for this event
.. Note ::
The runtime of the experiment is also printed to a file called runtimes.txt at the end of training
Input Data
----------
The format of the input makes the following assumptions:
- Input files are together in datafolder, one file for each day.
- Input files are plain text files with one line of integers per log line representing meta data and the tokens from log text.
- Input format for fixed length sequences ::
line_nums second day user red logtokenid1 .... logtokenid_SentenceLen
- Zero paded Input format for jagged sequences ::
line_nums second day user red SentenceLen logtokenid1 .... logtokenid_SentenceLen 0 0 .... 0
"""
import os
import sys
# So we can run this code on arbitrary environment which has tensorflow but not safekit installed
cyberpath = '/'.join(os.path.realpath(__file__).split('/')[:-3])
sys.path.insert(0, cyberpath)
import tensorflow as tf
import numpy as np
import time
from safekit.batch import OnlineLMBatcher
from simple_lm import write_results, CELL
from safekit.tf_ops import lm_rnn, bidir_lm_rnn
from safekit.graph_training_utils import ModelRunner
from safekit.util import get_mask, Parser
import json
import math
def return_parser():
parser = Parser()
parser.add_argument('results_folder', type=str,
help='The folder to print results to.')
parser.add_argument('config', type=str,
help='The data spec.')
parser.add_argument("datafolder", type=str,
help="File with token features")
parser.add_argument('-encoding', type=str, default=None,
help='Can be "oct", "raw" or "word"')
parser.add_argument("-em", type=int, default=5,
help="Dimension of token embeddings")
parser.add_argument("-numsteps", type=int, default=3,
help="length of unrolled context_rnn, number of log lines per user per train step")
parser.add_argument('-mb', type=int, default=64,
help='Number of users in mini-batch.')
parser.add_argument('-learnrate', type=float, default=0.001,
help='Step size for gradient descent.')
parser.add_argument("-context_layers", type=int, nargs='+', default=[10],
help='List of hidden layer sizes for context lstm.')
parser.add_argument('-lm_layers', type=int, nargs='+', default=[5],
help='List of hidden layer sizes for token lstm.')
parser.add_argument('-debug', action='store_true',
help='Use this flag to print feed dictionary contents and dimensions.')
parser.add_argument('-random_seed', type=int, default=5,
help='Random seed for reproducible experiments.')
parser.add_argument('-jagged', action='store_true',
help='Whether using sequences of variable length (Input should'
'be zero-padded to max_sequence_length.')
parser.add_argument('-skipsos', action='store_true',
help='Whether to skip a start of sentence token.')
parser.add_argument('-bidir', action='store_true',
help='Whether to use bidirectional lstm for lower tier.')
parser.add_argument('-test', action='store_true',
help='Whether to run on a subset of the data (5000 lines from days 1,2,3) or the entire set.')
parser.add_argument('-verbose', type=int, default=1,
help='Whether to print loss during training.')
parser.add_argument('-delimiter', type=str, default=',',
help="Delimiter for input text file")
parser.add_argument('-cell_type', type=str, default='lstm',
help='Can be either "lstm", "ident_ran", or "ran"')
parser.add_argument('-upper_cell_type', type=str, default='lstm',
help='Can be either "lstm", "ident_ran", or "ran"')
return parser
class ContextRNN:
"""
Log line level LSTM cell that keeps track of it's last lstm state tuple
"""
def __init__(self, layers, initial_state,
cell=tf.nn.rnn_cell.LSTMCell):
"""
:param layers: List of hidden layer sizes.
:param initial_state: List of numlayers lists of tensors (cell_state, hidden_state),
or List of lstm state tuples (which are named tuples of tensors (c=cell_state, h=hidden_state)
:param cell: Type of rnn cell to use.
"""
self.cell_type = cell
self.cell_stack = tf.nn.rnn_cell.MultiRNNCell([self.cell_type(cell_size) for cell_size in layers])
self.layers = layers
self.state = initial_state
def __call__(self, lower_outputs, final_hidden, seq_len):
"""
:param line_input: The input for current time step.
:param state: The cell state output by ContextRnn from previous time step.
:param seq_len: A 1D tensor of of size mb giving lengths of sequences in mb for this time step
:return: (tensor, LSTMStateTuple) output, state
"""
ctxt_input = ContextRNN._create_input(lower_outputs, final_hidden, seq_len)
output, self.state = self.cell_stack(ctxt_input, self.state)
return output, self.state
@staticmethod
def _create_input(lower_outputs, final_hidden, seq_len):
"""
:param lower_outputs: The list of output Tensors from the token level rnn
:param final_hidden: The final hidden state from the token level rnn
:param seq_len: A 1D tensor of of size mb giving lengths of token level sequences in mb for this time step
:return: A tensor which is the concatenation of the hidden state averages and final hidden state from lower
tier model. Used as input to context rnn
"""
if seq_len is not None:
mean_hidden = tf.reduce_sum(tf.stack(lower_outputs, axis=0), axis=0)/seq_len
else:
mean_hidden = tf.reduce_mean(tf.stack(lower_outputs, axis=0), axis=0)
return tf.concat([mean_hidden, final_hidden], 1)
def tiered_lm(token_set_size, embedding_size, ph_dict, context_layers, lm_layers,
numsteps, bidir=False, jagged=False):
"""
:param token_set_size: (int) Number of unique tokens in token set
:param embedding_size: (int) Dimensionality of token embeddings
:param ph_dict: dictionary of tensorflow placeholders and lists of tensorflow placeholders
:param context_layers: List of hidden layer sizes for stacked context LSTM
:param lm_layers: list of hidden layer sizes for stacked sentence LSTM
:param numsteps: How many steps (log lines) to unroll the upper tier RNN
:param bidir: Whether to use bidirectional LSTM for lower tier model
:param jagged: Whether or not variable length sequences are used
:return: total_loss (scalar tensor),
context_vector (tensor),
line_loss_matrix (tensor), Losses for each line in mini-batch
context_state (LSTMStateTuple) Final state of upper tier model
"""
if bidir:
language_model = bidir_lm_rnn
else:
language_model = lm_rnn
# =========================================================
# ========== initialize token level lstm variables ========
# =========================================================
if jagged:
ph_dict['lens'] = []
ph_dict['masks'] = []
context_vector = tf.placeholder(tf.float32, [None, ctxt_size], name="context_vector")
ph_dict['context_vector'] = context_vector
tf.add_to_collection('context_vector', ph_dict['context_vector'])
token_embed = tf.Variable(tf.truncated_normal([token_set_size, embedding_size])) # Initial embeddings vocab X embedding size
total_loss = 0.0
# =========================================================
# ======= initialize log line level (context) lstm ========
# =========================================================
ph_dict['c_state_init'] = [tf.placeholder(tf.float32, [None, c_size]) for c_size in context_layers]
ph_dict['h_state_init'] = [tf.placeholder(tf.float32, [None, h_size]) for h_size in context_layers]
context_init = [tf.nn.rnn_cell.LSTMStateTuple(ph_dict['c_state_init'][i],
ph_dict['h_state_init'][i])
for i in range(len(context_layers))]
ctxt_rnn = ContextRNN(context_layers, context_init, cell=CELL[args.upper_cell_type])
# =========================================================
# ======= initiate loop that ties together tiered lstm ====
# =========================================================
with tf.variable_scope("reuse_scope") as vscope:
for i in range(numsteps):
x = tf.placeholder(tf.int64, [None, sentence_length])
t = tf.placeholder(tf.int64, [None, sentence_length-2*bidir])
ph_dict['x'].append(x)
ph_dict['t'].append(t)
if jagged:
seq_len = tf.placeholder(tf.int32, [None])
ph_dict['lens'].append(seq_len)
else:
seq_len = None
token_losses, hidden_states, final_hidden = language_model(x, t, token_embed, lm_layers,
seq_len=seq_len,
context_vector=context_vector,
cell=CELL[args.cell_type])
if jagged:
ph_dict['masks'].append(tf.placeholder(tf.float32, [None, sentence_length-2*bidir]))
token_losses *= ph_dict['masks'][-1]
line_losses = tf.reduce_sum(token_losses, axis=1) # batch_size X 1
sequence_lengths = tf.reshape(tf.cast(ph_dict['lens'][-1], tf.float32), (-1, 1))
else:
line_losses = tf.reduce_mean(token_losses, axis=1) # batch_size X 1
sequence_lengths = None
avgloss = tf.reduce_mean(line_losses) # scalar
total_loss += avgloss
if i == 0:
line_loss_matrix = tf.reshape(line_losses, [1, -1])
tf.add_to_collection('first_line_loss_matrix', line_loss_matrix)
else:
line_loss_matrix = tf.concat((line_loss_matrix, tf.reshape(line_losses, [1, -1])), 0)
context_vector, context_state = ctxt_rnn(hidden_states,
final_hidden,
sequence_lengths)
tf.add_to_collection('context_vector', context_vector)
tf.add_to_collection('context_state', context_state)
tf.get_variable_scope().reuse_variables()
total_loss /= float(numsteps)
return total_loss, context_vector, line_loss_matrix, context_state
if __name__ == "__main__":
# ===========================================================================
# =========================PARSE ARGUMENTS===================================
# ===========================================================================
args = return_parser().parse_args()
conf = json.load(open(args.config, 'r'))
assert all(x == args.context_layers[0] for x in args.context_layers), 'Different sized context layers not supported.'
assert args.numsteps > 1, 'Must have at least two upper tier time steps to build graph for tiered lstm.'
if not args.results_folder.endswith('/'):
args.results_folder += '/'
tf.set_random_seed(args.random_seed)
np.random.seed(args.random_seed)
sentence_length = (conf['sentence_length'] - 1) - int(args.skipsos) + int(args.bidir)
token_set_size = conf['token_set_size']
ctxt_size = args.context_layers[0]
direction = ('fwd', 'bidir')[args.bidir]
results_file = 'tier_%s_%s_%s_%s__em_%s__ns_%s__mb_%s__lr_%s__cl_%s__lml_%s__rs_%s' % (direction,
args.encoding,
args.cell_type,
time.ctime(time.time()).replace(' ', '-'),
args.em,
args.numsteps,
args.mb,
args.learnrate,
args.context_layers[0],
args.lm_layers[0],
args.random_seed)
# if the -test flag passed, store predictions in a temporary file
if "lanl_results" not in os.listdir("/tmp"):
os.system("mkdir /tmp/lanl_results; chmod g+rwx /tmp/lanl_results")
outfile = open("/tmp/lanl_results/" + results_file, 'w')
outfile.write("batch line second day user red loss\n")
mode = ('fixed', 'update')
jag = int(args.jagged)
skipsos = int(args.skipsos)
# ===========================================================================
# =========================BUILD GRAPH=======================================
# ===========================================================================
ph_dict = {'x': [], 't': []}
dummy_loss = tf.constant(1)
total_loss, context_vector, line_loss_matrix, context_state = tiered_lm(token_set_size, args.em,
ph_dict,
args.context_layers,
args.lm_layers,
args.numsteps,
bidir=args.bidir,
jagged=args.jagged)
tiered_network_model = ModelRunner(total_loss, ph_dict, learnrate=args.learnrate,
debug=args.debug, decay=True,
decay_rate=0.99, decay_steps=20)
# ===========================================================================
# =========================TRAINING LOOP=====================================
# ===========================================================================
init_triple = (np.zeros([1, ctxt_size], np.float32), # context
[np.zeros([1, c_size], np.float32) for c_size in args.context_layers], # state
[np.zeros([1, h_size], np.float32) for h_size in args.context_layers]) # hidden
start_time = time.time()
def trainday(is_training, f, states, logs):
num_processed = 0
data = OnlineLMBatcher(args.datafolder + f, init_triple,
batch_size=args.mb, num_steps=args.numsteps, skiprows=0)
do_update = is_training
if states is not None:
data.state_triples = states
batch, state_triple = data.next_batch()
batch_num = 0
stragglers = False
while batch is not None:
if data.flush:
do_update = False
if len(batch.shape) == 2: # Straggler log lines that don't fit into num_steps by end of day are run in large batches one step at a time
stragglers = True
batch = batch.reshape((1, batch.shape[0], batch.shape[1]))
endx = batch.shape[2] - int(not args.bidir)
endt = batch.shape[2] - int(args.bidir)
datadict = {'line': batch[:, :, 0],
'second': batch[:, :, 1],
'day': batch[:, :, 2],
'user': batch[:, :, 3],
'red': batch[:, :, 4],
'x': [batch[0, :, 5 + jag + skipsos:endx]] * args.numsteps,
't': [batch[0, :, 6 + jag + skipsos:endt]] * args.numsteps,
'context_vector': state_triple['context_vector'],
'c_state_init': state_triple['c_state_init'],
'h_state_init': state_triple['h_state_init']}
if args.jagged:
datadict['lens'] = [batch[0, :, 5] - skipsos] * args.numsteps
datadict['masks'] = [get_mask(seq_length - 2 * args.bidir, sentence_length - 2 * args.bidir) for
seq_length in datadict['lens']]
for i in range(len(datadict['x'])):
assert np.all(datadict['lens'][i] <= datadict['x'][i].shape[1]), \
'Sequence found greater than num_tokens_predicted'
assert np.nonzero(datadict['lens'][i])[0].shape[0] == datadict['lens'][i].shape[0], \
'Sequence lengths must be greater than zero.' \
'Found zero length sequence in datadict["lengths"]: %s' % datadict['lens']
first_output_context_state = tf.get_collection('context_state')[0]
eval_tensors = ([total_loss,
tf.get_collection('context_vector')[1],
tf.get_collection('first_line_loss_matrix')[0]] +
[state_tuple.c for state_tuple in first_output_context_state] +
[state_tuple.h for state_tuple in first_output_context_state])
else: # Ordinary batching and matrix flush batching
batch = np.transpose(batch, axes=(1, 0, 2))
endx = batch.shape[2] - int(not args.bidir)
endt = batch.shape[2] - int(args.bidir)
datadict = {'line': batch[:, :, 0],
'second': batch[:, :, 1],
'day': batch[:, :, 2],
'user': batch[:, :, 3],
'red': batch[:, :, 4],
'x': [batch[i, :, 5 + jag + skipsos:endx] for i in range(args.numsteps)],
't': [batch[i, :, 6 + jag + skipsos:endt] for i in range(args.numsteps)],
'context_vector': state_triple['context_vector'],
'c_state_init': state_triple['c_state_init'],
'h_state_init': state_triple['h_state_init']}
if args.jagged:
datadict['lens'] = [batch[i, :, 5] - skipsos for i in range(args.numsteps)]
datadict['masks'] = [get_mask(seq_length-args.bidir-args.skipsos,
sentence_length-2*args.bidir) for seq_length in datadict['lens']]
for i in range(len(datadict['x'])):
assert np.all(datadict['lens'][i] <= datadict['x'][i].shape[1]), \
'Sequence found greater than num_tokens_predicted'
assert np.nonzero(datadict['lens'][i])[0].shape[0] == datadict['lens'][i].shape[0], \
'Sequence lengths must be greater than zero.' \
'Found zero length sequence in datadict["lengths"]: %s' % datadict['lens']
eval_tensors = ([total_loss, context_vector, line_loss_matrix] +
[state_tuple.c for state_tuple in context_state] +
[state_tuple.h for state_tuple in context_state])
# output dims: 0: Nothing, 1 (total_loss): scalar, 2 (context_vector): num_users X hidden_size,
# 3 (line_loss_matrix): num_users X num_steps
output = tiered_network_model.train_step(datadict, eval_tensors=eval_tensors,
update=do_update)
loss, context, loss_matrix = output[1], output[2], output[3]
current_context_state = output[4:4 + len(args.context_layers)]
current_context_hidden = output[4 + len(args.context_layers):4 + 2*len(args.context_layers)]
data.update_state_triples([context, current_context_state, current_context_hidden])
if args.verbose:
print('%s %s %s %s %s %s %r' % (datadict['day'].shape[1],
datadict['line'][0][0],
datadict['second'][0][0],
mode[do_update],
f,
data.line_num, loss))
if math.isnan(loss) or math.isinf(loss):
print('Exiting due to divergence!')
exit(1)
if not is_training:
num_processed += batch.shape[0] * batch.shape[1]
if not stragglers:
assert loss_matrix.shape[0] * loss_matrix.shape[1] == batch.shape[0] * batch.shape[1], 'Batch size %s is different from output size %s. May be losing datapoints.' % (batch.shape, loss_matrix.shape)
write_results(datadict, loss_matrix, outfile, batch_num)
else:
assert loss_matrix[0, :].shape[0] == batch.shape[0] * batch.shape[1], 'Batch size is different from output size. May be losing datapoints.'
write_results(datadict, loss_matrix[0, :], outfile, batch_num)
batch, state_triple = data.next_batch()
batch_num += 1
return data.state_triples, data.user_logs, num_processed
weekend_days = conf["weekend_days"]
if args.test:
files = conf["test_files"] # 5000 lines from each of day 0, day 1 and day 2
else:
files = [str(i) + '.txt' for i in range(conf["num_days"]) if i not in weekend_days]
states1 = None
logs1 = None
number_processed = 0
for idx, f in enumerate(files[:-1]):
states1, logs1, num_processed = trainday(True, f, states1, logs1)
states2, logs2, num_processed = trainday(False, files[idx + 1], states1, logs1)
number_processed += num_processed
outfile.close()
total_time = time.time() - start_time
print('elapsed time: %s' % total_time)
os.system("mv /tmp/lanl_results/%s %s" % (results_file, args.results_folder + results_file))
print('number processed', number_processed)
| 2.65625 | 3 |
src/nearby/delaunaymain.py | ghjwp7/nearby | 0 | 12767214 | <filename>src/nearby/delaunaymain.py
#!/usr/bin/python3
# -*- mode: python; coding: utf-8 -*-
# <NAME> - November 2020
'''Some tests for circumcircle routines in autoAdder2d.py'''
from pypevue import Point
from nearby.delaunay import CircumCircle2, CircumCircle3, Face
from nearby import delaunayCCvis
from random import seed, random
import time, sys
def makeCCTestData(npoints, nfaces, style='xy', salt=123457, scale=10):
'''Make npoints points of specified style, xy or xyz; make nfaces
faces, and for each face a point in & out of it.'''
# nfaces cannot exceed (npoints choose 3)
npC3 = npoints*(npoints-1)*(npoints-2)//6
if nfaces > npC3:
print (f'nfaces = {nfaces} > {npoints} C 3 = {npC3}, exiting')
sys.exit(1)
seed(a=salt+npoints+nfaces)
zf = random if style=='xyz' else lambda:0
verts = [Point(scale*random(), scale*random(),
scale*zf()) for i in range(npoints)]
faces, pIn, pOut, cache = [], [], [], {}
p2 = p3 = int(npoints*random())
while p2==p3:
p3 = int(npoints*random())
for i in range(nfaces):
while 1:
p1, p2 = p2, p3
while p1==p3 or p2==p3:
p3 = int(npoints*random())
f = Face(p1,p2,p3)
if f.canon not in cache:
faces.append(f)
cache[f.canon] = 1
break
# Create point at barycenter of current face. Every point
# within the triangle is also within the circumcircle (CC).
A,B,C = verts[p1], verts[p2], verts[p3]
pI = 1/3*(A+B+C)
# Create a point outside current face's CC: Because A,B,C are
# on the CC, and pI is inside the CC, any extrapolation beyond
# A,B,C on a line from pI is outside the CC.
xl = 0.3; pO = (1+xl)*B - xl*pI
#print (f'Face {i:<2} : {f}\n in:{pI}\n o:{pO}')
pIn.append(pI); pOut.append(pO)
return verts, faces, pIn, pOut
def timeTest1(CircumCircle, verts, faces, pIn, pOut, tCache, note, bmul):
baseTime = time.time()
cache = {}
nface = len(faces)
print ()
for tn, f, pI, pO in zip(range(nface), faces, pIn, pOut):
tnum = tn*2 + bmul*nface
threep = [verts[p] for p in f.get123]
if tCache:
canon = f.canon
else: canon = None; cache = {}
zin, c, rr, dd = CircumCircle(pI, threep, canon, cache)
zot, c, rr, dd = CircumCircle(pO, threep, canon, cache)
if not zin:
print (f'F{tn}: Error, point {pI} showed as outside {f} = {threep}')
if zot:
print (f'F{tn}: Error, point {pO} showed as inside {f} = {[str(v) for v in threep]}')
lapsTime = time.time() - baseTime
print (f'Test time for {note}: {lapsTime:3.6f} seconds')
#--------------------------------------------------------------
if __name__ == '__main__':
from sys import argv
arn = 0
arn+=1; tcode = argv[arn] if len(argv)>arn else 'val'
arn+=1; nverts = int(argv[arn]) if len(argv)>arn else 5
arn+=1; nfaces = int(argv[arn]) if len(argv)>arn else 5
arn+=1; tcache = int(argv[arn]) if len(argv)>arn else 1
arn+=1; haveZ = int(argv[arn]) if len(argv)>arn else 0
style = 'xyz' if haveZ else 'xy'
points, faces, pIn, pOut = makeCCTestData(nverts, nfaces, style=style)
def timeTest(tCache, note):
timeTest1(CircumCircle2, points, faces, pIn, pOut, tCache, 'CC2'+note, 0)
timeTest1(CircumCircle3, points, faces, pIn, pOut, tCache, 'CC3'+note, 2)
def valTest(tCache):
pass
if tcode.startswith('tim'): # Timing test, no cache
timeTest(False, ' time test without cache')
elif tcode.startswith('cac'): # Timing test, with cache
timeTest(True, ' time test using cache')
elif tcode.startswith('val'): # Values accuracy test
valTest(True)
if tcode.endswith('vis'): # Visualization (generates SCAD code)
delaunayCCvis.visCCData(points, faces, pIn, pOut)
| 2.546875 | 3 |
init.py | reckonsys/jatayu | 0 | 12767215 | # file: app.py
import asyncio
import asyncpg
from pgorm.postgresql import PostgreSQL
from myapp.tables import User
async def run():
connection = await asyncpg.connect(
user='reckonsys', password='<PASSWORD>',
database='demo', host='db.reckonsys.com')
pg = PostgreSQL(connection)
# Lets create UUID OSSP extention (because UUIDField)
await pg.create_extension_uuid_ossp()
await pg.create(User) # Create Table
user = await pg.insert(User(name='dhilipsiva', age=30))
print(user)
# User(pk=UUID('f46863...'), name='dhilipsiva', age=30)
await connection.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
| 2.625 | 3 |
adw_test/hyopt_gen.py | clinfo/DeepKF | 5 | 12767216 | <reponame>clinfo/DeepKF<gh_stars>1-10
import hyopt as hy
import os
import sys
import itertools
import random
process_num=2
if len(sys.argv)>1 and sys.argv[1]=="rm":
cnt=1
idx="%05d"%(cnt)
model_path= "hyopt/model"+idx+""
result_path="hyopt/result"+idx+""
try:
os.removedirs(model_path)
except OSError:
pass
try:
os.removedirs(result_path)
except OSError:
pass
os.remove("hyopt/hyparam"+idx+".result.json")
os.remove("hyopt/hyparam"+idx+".json")
quit()
###
param_set={}
param_set["dim"]=[2,4,8]
param_set["emission_internal_layers"]=[
[
{"name":"fc_bn"},
{"name":"do"},
{"name":"fc_bn"},
],
[
{"name":"fc_bn"},
{"name":"do"},
{"name":"fc_bn"},
{"name":"do"},
{"name":"fc_bn"},
],
[
{"name":"fc_bn"},
{"name":"do"},
{"name":"fc_res_start"},
{"name":"fc_bn"},
{"name":"fc_res"},
{"name":"do"},
{"name":"fc_bn"},
],
]
param_set["transition_internal_layers"]=[
[
{"name":"fc_bn"},
{"name":"do"},
{"name":"fc_bn"},
],
[
{"name":"fc_bn"},
{"name":"do"},
{"name":"fc_bn"},
{"name":"do"},
{"name":"fc_bn"},
],
[
{"name":"fc_bn"},
{"name":"do"},
{"name":"fc_res_start"},
{"name":"fc_bn"},
{"name":"fc_res"},
{"name":"do"},
{"name":"fc_bn"},
],
]
param_set["variational_internal_layers"]=[
[
{"name":"fc_bn"},
{"name":"do"},
{"name":"fc_bn"},
],
[
{"name":"fc_bn"},
{"name":"do"},
{"name":"lstm"}
],
[
{"name":"fc_bn"},
{"name":"do"},
{"name":"lstm"},
{"name":"do"},
{"name":"lstm"},
],
]
keys=param_set.keys()
xs=[param_set[k] for k in keys]
#x1=param_set["emssion_internal_layers"]
#x2=param_set["transition_internal_layers"]
#x3=param_set["variational_internal_layers"]
cnt=0
scripts=[]
for l in itertools.product(*xs):
cnt+=1
idx="%05d"%(cnt)
model_path= "hyopt/model"+idx+""
result_path="hyopt/result"+idx+""
hy.initialize_hyperparameter(load_filename="hyopt_hyparam_template.json")
param=hy.get_hyperparameter()
param["evaluation_output"]="hyopt/hyparam"+idx+".result.json"
param["hyperparameter_input"]="hyopt/hyparam"+idx+".json"
os.makedirs(model_path,exist_ok=True)
os.makedirs(result_path,exist_ok=True)
param["save_model_path"]= model_path
param["load_model"]= ""
param["save_result_train"]= result_path+"/train.jbl"
param["save_result_test"]= result_path+"/test.jbl"
param["save_result_filter"]=result_path+"/filter.jbl"
param["plot_path"]=result_path+"/plot"
param["simulation_path"]=result_path+"/sim"
#param["emssion_internal_layers"] =l1
#param["transition_internal_layers"] =l2
#param["variational_internal_layers"]=l3
for el,k in zip(l,keys):
param[k]=el
###
hy.save_hyperparameter(param["hyperparameter_input"])
cmd="python dkf.py --config hyopt/config_train.json --hyperparam "+param["hyperparameter_input"]+" train > "+result_path+"log.txt 2>&1"+"\n"
scripts.append(cmd)
random.shuffle(scripts)
fps=[]
for pid in range(process_num):
fp=open("hyopt/run"+str(pid)+".sh","w")
fp.write("export CUDA_VISIBLE_DEVICES="+str(pid)+"\n")
fps.append(fp)
for i,line in enumerate(scripts):
j=i%process_num
fps[j].write(line)
| 2.015625 | 2 |
bgen_reader/_environment.py | sbaker-dev/bgen-reader-py | 0 | 12767217 | import os
from pathlib import Path
from appdirs import user_cache_dir
from ._file import make_sure_dir_exist
CUSTOM_CACHE = None
BGEN_READER_CACHE_HOME = Path(
os.environ.get(
"BGEN_READER_CACHE_HOME",
default=Path(user_cache_dir("bgen-reader", "limix")) / "bgen-reader",
)
)
def custom_meta_path(custom_path: Path = None):
"""
All end user to over-ride default path behaviors and store files in a set
location. Potentially useful if working on a linux cluster where
permissions issues are more prevalent.
:param custom_path: Path to a directory to store meta data
"""
global CUSTOM_CACHE
CUSTOM_CACHE = custom_path
__all__ = ["BGEN_READER_CACHE_HOME", "custom_meta_path", "CUSTOM_CACHE"]
make_sure_dir_exist(BGEN_READER_CACHE_HOME)
make_sure_dir_exist(BGEN_READER_CACHE_HOME / "test_data")
make_sure_dir_exist(BGEN_READER_CACHE_HOME / "metafile")
| 2.359375 | 2 |
app/grandchallenge/reader_studies/templatetags/get_ground_truth.py | njmhendrix/grand-challenge.org | 101 | 12767218 | <filename>app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
from django import template
register = template.Library()
@register.simple_tag
def get_ground_truth(obj, image, question):
"""
Get the ground truth value for the image/question combination in reader
study obj.
"""
ground_truths = obj.statistics["ground_truths"]
return ground_truths[image][question]
| 2.140625 | 2 |
curvepy/extension/tangent.py | diatche/curvepy | 0 | 12767219 | <gh_stars>0
from .extension import Extension
from ..line import Line
from intervalpy import Interval
class TangentExtension(Extension):
"""
Extends an end of a function with a line using its edge tangents.
"""
name = "tangent"
def __init__(self, func, regression_degree=None, regression_period=None, **kwargs):
self.regression_degree = None
self.regression_period = None
if regression_degree is not None:
self.regression_degree = int(regression_degree)
assert self.regression_degree > 0
if regression_period is not None:
self.regression_period = float(regression_period)
assert self.regression_period > 0
assert self.regression_degree is None or self.regression_period is None
super().__init__(func, **kwargs)
def update_extension(self):
if self.start:
x = self.curve.domain.start
if x is not None and self.curve.domain.start_open and self.curve.domain.contains(x + self.min_step):
x += self.min_step
y = None
d_y = None
if self.regression_degree is not None:
x1 = x
for i in range(self.regression_degree):
x1 = self.curve.x_next(x1, min_step=self.min_step)
domain = Interval(x, x1)
tangent = self.curve.regression(domain, min_step=self.min_step)
if tangent is not None:
y = tangent.y(x)
d_y = tangent.slope
elif self.regression_period is not None:
domain = Interval(x, x + self.regression_period)
tangent = self.curve.regression(domain, min_step=self.min_step)
if tangent is not None:
y = tangent.y(x)
d_y = tangent.slope
else:
y = self.curve.y(x)
d_y = self.curve.d_y(x, forward=True)
self.start_valid = y is not None and d_y is not None
if self.start_valid:
self.update_extension_func(self.start_func, x, y, d_y)
if self.end:
x = self.curve.domain.end
if x is not None and self.curve.domain.end_open and self.curve.domain.contains(x - self.min_step):
x -= self.min_step
y = None
d_y = None
if self.regression_degree is not None:
x0 = x
for i in range(self.regression_degree):
x0 = self.curve.x_previous(x0, min_step=self.min_step)
domain = Interval(x0, x)
tangent = self.curve.regression(domain, min_step=self.min_step)
if tangent is not None:
y = tangent.y(x)
d_y = tangent.slope
elif self.regression_period is not None:
domain = Interval(x - self.regression_period, x)
tangent = self.curve.regression(domain, min_step=self.min_step)
if tangent is not None:
y = tangent.y(x)
d_y = tangent.slope
else:
y = self.curve.y(x)
d_y = self.curve.d_y(x, forward=False)
self.end_valid = y is not None and d_y is not None
if self.end_valid:
self.update_extension_func(self.end_func, x, y, d_y)
def create_extension_func(self, start=False):
return Line(const=0, slope=0)
def update_extension_func(self, func, x, y, dy):
func.set(p1=(x, y), slope=dy)
| 2.796875 | 3 |
tests/mock_client.py | roman-kachanovsky/cmd.fm-python | 0 | 12767220 | from __future__ import unicode_literals
from client.client import DirbleClient
class MockClient(DirbleClient):
def get_genres(self):
return [
{
"id": 1,
"title": "Trance",
"description": "stations that plays commercial and other things in trance-music genre.",
"slug": "trance",
"ancestry": "14"
},
{
"id": 2,
"title": "Rock",
"description": "simple rock. from elvis to metallica and like hardrock as iron maiden.",
"slug": "rock",
"ancestry": None
},
{
"id": 3,
"title": "Dance",
"description": "dance music, the new from 80's and 90's, like bubblegum and more.",
"slug": "dance",
"ancestry": "14"
},
{
"id": 4,
"title": "Dancehall",
"description": "dancehall music.",
"slug": "dancehall",
"ancestry": "14"
},
]
def get_stations(self, category_id):
# TODO: Return fake data
return []
| 2.5 | 2 |
setup.py | yangzhl/trash | 3 | 12767221 | """`trash` lives on `GitHub <http://github.com/halst/trash/>`_."""
from distutils.core import setup
setup(name='trash',
version='0.1.0',
description='Safe `rm` substitute for OS X',
long_description=__doc__,
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/halst/trash/',
classifiers=['Intended Audience :: Developers',
'Environment :: Console',
'Programming Language :: Python :: 2',
'Operating System :: MacOS',
'License :: OSI Approved :: MIT License'],
keywords='rm, rmtrash, trash',
py_modules=['trash'],
scripts=['trash'])
| 1.523438 | 2 |
src/models/jaxgp/mean.py | jejjohnson/uncertain_gps | 9 | 12767222 | <filename>src/models/jaxgp/mean.py
import jax.numpy as jnp
def zero_mean(x):
return jnp.zeros(x.shape[0])
| 2.0625 | 2 |
anywhere/test_base_types.py | mlga/anywhere | 0 | 12767223 | # -*- coding:utf-8 -*-
from copy import deepcopy
from itertools import combinations_with_replacement
import pytest
from anywhere import testsets
@pytest.mark.parametrize(
'where_obj,string',
zip(
testsets.WHERES,
testsets.STRINGS,
),
ids=[str(i) for i in range(1, len(testsets.WHERES) + 1)],
)
def test_str(where_obj, string):
assert str(where_obj) == string
@pytest.mark.parametrize(
'where_obj,representation',
zip(
testsets.WHERES,
testsets.REPRESENTATIONS,
),
ids=[str(i) for i in range(1, len(testsets.WHERES) + 1)],
)
def test_repr(where_obj, representation):
assert repr(where_obj) == representation
@pytest.mark.parametrize(
'where_obj1,where_obj2',
combinations_with_replacement(testsets.WHERES, 2),
ids=lambda _: '',
)
def test_comparision(where_obj1, where_obj2):
if id(where_obj1) == id(where_obj2):
# We are comparing the same object, should be equal
assert where_obj1 == where_obj2
else:
assert where_obj1 != where_obj2
@pytest.mark.parametrize(
'where_obj',
testsets.WHERES,
ids=[str(i) for i in range(1, len(testsets.WHERES) + 1)],
)
def test_comparision_deepcopy(where_obj):
where_obj2 = deepcopy(where_obj)
assert id(where_obj) != id(where_obj2)
assert where_obj == where_obj2
| 2.578125 | 3 |
myparser/config.py | kbondar17/declarations-parser | 0 | 12767224 | <gh_stars>0
import configparser
from dotenv import dotenv_values
config = dotenv_values(".env")
| 1.273438 | 1 |
export_standard_full.py | MuckRock/API-examples | 42 | 12767225 | <reponame>MuckRock/API-examples<filename>export_standard_full.py
#!/usr/bin/env python2
# -- coding: utf-8 --
# Standard Library
import datetime
import json
import os
import time
# Third Party
import requests
from simplejson.scanner import JSONDecodeError
import utils
url = utils.API_URL
token = utils.get_api_key()
headers = utils.get_headers(token)
page = 599
next_ = url + 'foia/?embargo=3&page=%d' % page
agencies = {}
jurisdictions = {}
def get_jurisdiction(jurisdiction_id):
global jurisdictions
if jurisdiction_id in jurisdictions:
return jurisdictions[jurisdiction_id]
else:
print 'getting jurisdiction', jurisdiction_id
r = requests.get(url + 'jurisdiction/' + str(jurisdiction_id), headers=headers)
jurisdiction_json = r.json()
jurisdiction = '%s_%s' % (jurisdiction_id, jurisdiction_json['slug'])
jurisdictions[jurisdiction_id] = jurisdiction
return jurisdiction
while next_ is not None: # Handling at the page level
try:
r = requests.get(next_, headers=headers)
json_data = r.json()
print 'Page %d of %d (%d total)' % (page, json_data['count'] / 50 + 1, json_data['count'])
page += 1
next_ = json_data['next']
print next_
for request in json_data['results']:
print 'Working on request ' + str(request['id'])
if request['status'] == 'started':
continue
if request['agency'] in agencies:
agency, jurisdiction = agencies[request['agency']]
else:
if request['agency'] is None:
agency = 'None'
jurisdiction = 'None'
else:
print 'getting agency', request['agency']
r = requests.get(url + 'agency/' + str(request['agency']), headers=headers)
agency_json = r.json()
agency = '%s_%s' % (request['agency'], agency_json['slug'])
jurisdiction = get_jurisdiction(agency_json['jurisdiction'])
agencies[request['agency']] = (agency, jurisdiction)
communications = request['communications']
dir_name = '%s/%s/%s_%s' % (jurisdiction, agency, request['id'], request['slug'])
if not os.path.exists(dir_name):
os.makedirs(dir_name)
for i, communication in enumerate(communications):
for file_ in communication['files']:
fileurl = file_['ffile']
file_name = '%s_%s' % (file_['datetime'], fileurl.split('/')[-1])
file_name = '%s/%s' % (dir_name, file_name)
if not os.path.exists(file_name):
with open(file_name, 'wb') as f:
f.write(requests.get(fileurl).content)
communication_text = communication['communication'].encode('ascii', 'ignore')
date = communication['datetime'].split('T')[0]
with open('%s/%s_%s_communication.txt' % (dir_name, date, i), 'w') as f:
f.write(communication_text)
except JSONDecodeError:
print r.status_code
print r.content
raise
except KeyboardInterrupt:
import ipdb
ipdb.set_trace()
| 2.390625 | 2 |
python/kudu/tests/common.py | AnupamaGupta01/kudu-1 | 2 | 12767226 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import division
import json
import fnmatch
import os
import shutil
import subprocess
import tempfile
import time
import kudu
from kudu.client import Partitioning
class KuduTestBase(object):
"""
Base test class that will start a configurable number of master and
tablet servers.
"""
BASE_PORT = 37000
NUM_TABLET_SERVERS = 3
@classmethod
def start_cluster(cls):
local_path = tempfile.mkdtemp(dir=os.getenv("TEST_TMPDIR"))
kudu_build = os.getenv("KUDU_BUILD")
if not kudu_build:
kudu_build = os.path.join(os.getenv("KUDU_HOME"), "build", "latest")
bin_path = "{0}/bin".format(kudu_build)
os.makedirs("{0}/master/".format(local_path))
os.makedirs("{0}/master/data".format(local_path))
os.makedirs("{0}/master/logs".format(local_path))
path = [
"{0}/kudu-master".format(bin_path),
"-rpc_server_allow_ephemeral_ports",
"-rpc_bind_addresses=0.0.0.0:0",
"-fs_wal_dir={0}/master/data".format(local_path),
"-fs_data_dirs={0}/master/data".format(local_path),
"-log_dir={0}/master/logs".format(local_path),
"-logtostderr",
"-webserver_port=0",
# Only make one replica so that our tests don't need to worry about
# setting consistency modes.
"-default_num_replicas=1",
"-server_dump_info_path={0}/master/config.json".format(local_path)
]
p = subprocess.Popen(path, shell=False)
fid = open("{0}/master/kudu-master.pid".format(local_path), "w+")
fid.write("{0}".format(p.pid))
fid.close()
# We have to wait for the master to settle before the config file
# appears
config_file = "{0}/master/config.json".format(local_path)
for i in range(30):
if os.path.exists(config_file):
break
time.sleep(0.1 * (i + 1))
else:
raise Exception("Could not find kudu-master config file")
# If the server was started get the bind port from the config dump
master_config = json.load(open("{0}/master/config.json"
.format(local_path), "r"))
# One master bound on local host
master_port = master_config["bound_rpc_addresses"][0]["port"]
for m in range(cls.NUM_TABLET_SERVERS):
os.makedirs("{0}/ts/{1}".format(local_path, m))
os.makedirs("{0}/ts/{1}/logs".format(local_path, m))
path = [
"{0}/kudu-tserver".format(bin_path),
"-rpc_server_allow_ephemeral_ports",
"-rpc_bind_addresses=0.0.0.0:0",
"-tserver_master_addrs=127.0.0.1:{0}".format(master_port),
"-webserver_port=0",
"-log_dir={0}/master/logs".format(local_path),
"-logtostderr",
"-fs_data_dirs={0}/ts/{1}/data".format(local_path, m),
"-fs_wal_dir={0}/ts/{1}/data".format(local_path, m),
]
p = subprocess.Popen(path, shell=False)
tserver_pid = "{0}/ts/{1}/kudu-tserver.pid".format(local_path, m)
fid = open(tserver_pid, "w+")
fid.write("{0}".format(p.pid))
fid.close()
return local_path, master_port
@classmethod
def stop_cluster(cls, path):
for root, dirnames, filenames in os.walk('{0}/..'.format(path)):
for filename in fnmatch.filter(filenames, '*.pid'):
with open(os.path.join(root, filename)) as fid:
a = fid.read()
r = subprocess.Popen(["kill", "{0}".format(a)])
r.wait()
os.remove(os.path.join(root, filename))
shutil.rmtree(path, True)
@classmethod
def setUpClass(cls):
cls.cluster_path, master_port = cls.start_cluster()
time.sleep(1)
cls.master_host = '127.0.0.1'
cls.master_port = master_port
cls.client = kudu.connect(cls.master_host, cls.master_port)
cls.schema = cls.example_schema()
cls.partitioning = cls.example_partitioning()
cls.ex_table = 'example-table'
if cls.client.table_exists(cls.ex_table):
cls.client.delete_table(cls.ex_table)
cls.client.create_table(cls.ex_table, cls.schema, cls.partitioning)
@classmethod
def tearDownClass(cls):
cls.stop_cluster(cls.cluster_path)
@classmethod
def example_schema(cls):
builder = kudu.schema_builder()
builder.add_column('key', kudu.int32, nullable=False)
builder.add_column('int_val', kudu.int32)
builder.add_column('string_val', kudu.string)
builder.set_primary_keys(['key'])
return builder.build()
@classmethod
def example_partitioning(cls):
return Partitioning().set_range_partition_columns(['key'])
| 1.773438 | 2 |
tickets.py | DrumSergio/router-express | 0 | 12767227 | # -*- coding: utf-8 -*-
# system('python tickets.py "Sergio" "Fernandez" "<EMAIL>" "950001" "968868968"')
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
from selenium.webdriver.common.action_chains import ActionChains
import sys
TLFN = sys.argv.pop()
CC = sys.argv.pop()
EMAIL = sys.argv.pop()
SURN = sys.argv.pop()
NAME = sys.argv.pop()
driver = webdriver.PhantomJS()
driver.get("http://averias.emartinez.es:8081/helpdesk/WebObjects/Helpdesk.woa/wa")
driver.find_element_by_id("userName").send_keys("admin")
driver.find_element_by_id("password").send_keys("PASSWORD")
driver.find_element_by_name("1172.16.31.10.7.4.1.11.0.1.0").click()
#driver.save_screenshot("/home/tecnico/WebApp/data/prueba.png")
#Nuevo cliente
driver.find_element_by_xpath("//img[@alt='Clientes']").click()
driver.find_element_by_css_selector("div.squareButtonMiddle").click()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.13.0.0.1").clear()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.13.0.0.1").send_keys(NAME) #NOMBRE
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.17.0.0.1").clear()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.17.0.0.1").send_keys(SURN) #APELLIDO
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.21.1.1.0.0.1").clear()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.21.1.1.0.0.1").send_keys(EMAIL) #EMAIL
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.33.0.0.1").clear()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.33.0.0.1").send_keys(CC) #CÓDIGO CLIENTE
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.41.0.0.1").clear()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.41.0.0.1").send_keys(TLFN) #TELÉFONO
driver.find_element_by_css_selector("div.aquaMiddleSel").click()
#Nuevo ticket
driver.find_element_by_xpath("//img[@alt='Configuración']").click()
element = driver.find_element_by_xpath("//div[@id='preferences-menu']/div/div[23]")
hover = ActionChains(driver).move_to_element(element)
hover.perform()
time.sleep(1)
driver.find_element_by_xpath("//div[@id='preferences-menu']/div/div[24]/ul/li[5]/a/div/div[2]").click()
driver.find_element_by_xpath("//input[@name='Field Separator' and @value='1']").click()
driver.find_element_by_xpath("//input[@type='file']").send_keys("ticketFIN.csv")
driver.find_element_by_css_selector("div.aquaMiddleSel").click()
time.sleep(4)
driver.find_element_by_id("logoutLink").click()
driver.close()
driver.quit()
| 2.53125 | 3 |
tests/integration/test_cookies_and_contact.py | ONSdigital/ras-frontstage | 8 | 12767228 | <reponame>ONSdigital/ras-frontstage
import unittest
import requests_mock
from frontstage import app
from tests.integration.mocked_services import url_banner_api
class TestCookiesContact(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
@requests_mock.mock()
def test_cookies_success(self, mock_request):
mock_request.get(url_banner_api, status_code=404)
response = self.app.get("/cookies")
self.assertEqual(response.status_code, 200)
self.assertTrue("Cookies on surveys.ons.gov.uk".encode() in response.data)
self.assertTrue(
"Cookies are small files saved on your phone, tablet or computer when you visit a website".encode()
in response.data
)
@requests_mock.mock()
def test_privacy_success(self, mock_request):
mock_request.get(url_banner_api, status_code=404)
response = self.app.get("/privacy-and-data-protection")
self.assertEqual(response.status_code, 200)
self.assertTrue("We will keep your information secure and confidential".encode() in response.data)
self.assertTrue("Where can I find out more about how my information will be treated?".encode() in response.data)
@requests_mock.mock()
def test_contact_success(self, mock_request):
mock_request.get(url_banner_api, status_code=404)
response = self.app.get("/contact-us")
self.assertEqual(response.status_code, 200)
self.assertTrue("Contact us".encode() in response.data)
self.assertTrue("Opening hours:".encode() in response.data)
| 2.578125 | 3 |
tests/policy/test_offline.py | usaito/zr-obp | 0 | 12767229 | <reponame>usaito/zr-obp
import pytest
import numpy as np
from sklearn.linear_model import LogisticRegression
import torch
from obp.policy.offline import IPWLearner
from obp.policy.offline import NNPolicyLearner
from obp.policy.policy_type import PolicyType
from obp.ope.estimators import InverseProbabilityWeighting
def test_base_opl_init():
# n_actions
with pytest.raises(ValueError):
IPWLearner(n_actions=1)
with pytest.raises(ValueError):
IPWLearner(n_actions="3")
# len_list
with pytest.raises(ValueError):
IPWLearner(n_actions=2, len_list=0)
with pytest.raises(ValueError):
IPWLearner(n_actions=2, len_list="3")
# policy_type
assert IPWLearner(n_actions=2).policy_type == PolicyType.OFFLINE
# invalid relationship between n_actions and len_list
with pytest.raises(ValueError):
IPWLearner(n_actions=5, len_list=10)
with pytest.raises(ValueError):
IPWLearner(n_actions=2, len_list=3)
def test_ipw_learner_init():
# base classifier
len_list = 2
learner1 = IPWLearner(n_actions=2, len_list=len_list)
assert isinstance(learner1.base_classifier, LogisticRegression)
for i in range(len_list):
assert isinstance(learner1.base_classifier_list[i], LogisticRegression)
with pytest.raises(ValueError):
from sklearn.linear_model import LinearRegression
IPWLearner(n_actions=2, base_classifier=LinearRegression())
from sklearn.naive_bayes import GaussianNB
learner2 = IPWLearner(n_actions=2, len_list=len_list, base_classifier=GaussianNB())
assert isinstance(learner2.base_classifier, GaussianNB)
for i in range(len_list):
assert isinstance(learner2.base_classifier_list[i], GaussianNB)
def test_ipw_learner_create_train_data_for_opl():
context = np.array([1.0, 1.0]).reshape(1, -1)
learner = IPWLearner(n_actions=2)
action = np.array([0])
reward = np.array([1.0])
pscore = np.array([0.5])
X, sample_weight, y = learner._create_train_data_for_opl(
context=context, action=action, reward=reward, pscore=pscore
)
assert np.allclose(X, np.array([1.0, 1.0]).reshape(1, -1))
assert np.allclose(sample_weight, np.array([2.0]))
assert np.allclose(y, np.array([0]))
def test_ipw_learner_fit():
context = np.array([1.0, 1.0, 1.0, 1.0]).reshape(2, -1)
action = np.array([0, 1])
reward = np.array([1.0, 0.0])
position = np.array([0, 0])
learner = IPWLearner(n_actions=2, len_list=1)
learner.fit(context=context, action=action, reward=reward, position=position)
# inconsistency with the shape
with pytest.raises(ValueError):
learner = IPWLearner(n_actions=2, len_list=2)
variant_context = np.array([1.0, 1.0, 1.0, 1.0])
learner.fit(
context=variant_context, action=action, reward=reward, position=position
)
# len_list > 2, but position is not set
with pytest.raises(ValueError):
learner = IPWLearner(n_actions=2, len_list=2)
learner.fit(context=context, action=action, reward=reward)
def test_ipw_learner_predict():
n_actions = 2
len_list = 1
# shape error
with pytest.raises(ValueError):
context = np.array([1.0, 1.0])
learner = IPWLearner(n_actions=n_actions, len_list=len_list)
learner.predict(context=context)
# shape consistency of action_dist
# n_rounds is 5, dim_context is 2
context = np.array([1.0, 1.0, 1.0, 1.0]).reshape(2, -1)
action = np.array([0, 1])
reward = np.array([1.0, 0.0])
position = np.array([0, 0])
learner = IPWLearner(n_actions=2, len_list=1)
learner.fit(context=context, action=action, reward=reward, position=position)
context_test = np.array([i for i in range(10)]).reshape(5, 2)
action_dist = learner.predict(context=context_test)
assert np.allclose(
action_dist.sum(1), np.ones_like((context_test.shape[0], len_list))
)
assert action_dist.shape[0] == 5
assert action_dist.shape[1] == n_actions
assert action_dist.shape[2] == len_list
def test_ipw_learner_sample_action():
n_actions = 2
len_list = 1
context = np.array([1.0, 1.0, 1.0, 1.0]).reshape(2, -1)
action = np.array([0, 1])
reward = np.array([1.0, 0.0])
position = np.array([0, 0])
learner = IPWLearner(n_actions=n_actions, len_list=len_list)
learner.fit(context=context, action=action, reward=reward, position=position)
with pytest.raises(ValueError):
invalid_type_context = [1.0, 2.0]
learner.sample_action(context=invalid_type_context)
with pytest.raises(ValueError):
invalid_ndim_context = np.array([1.0, 2.0, 3.0, 4.0])
learner.sample_action(context=invalid_ndim_context)
context = np.array([1.0, 1.0, 1.0, 1.0]).reshape(2, -1)
n_rounds = context.shape[0]
sampled_action = learner.sample_action(context=context)
assert sampled_action.shape[0] == n_rounds
assert sampled_action.shape[1] == n_actions
assert sampled_action.shape[2] == len_list
ipw = InverseProbabilityWeighting()
invalid_input_of_nn_policy_learner_init = [
(
0,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"n_actions must be an integer larger than 1",
),
(
10,
-1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"len_list must be a positive integer",
),
(
10,
1,
-1,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"dim_context must be a positive integer",
),
(
10,
1,
2,
None,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"off_policy_objective must be callable",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, ""),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"hidden_layer_size must be tuple of positive integers",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"sigmoid",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"activation must be one of 'identity', 'logistic', 'tanh', or 'relu'",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adagrad",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"solver must be one of 'adam', 'lbfgs', or 'sgd'",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
-1,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"alpha must be a nonnegative float",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
0,
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"batch_size must be a positive integer or 'auto'",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"learning_rate_init must be a positive float",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
0,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"max_iter must be a positive integer",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
None,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"shuffle must be a bool",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
"",
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"random_state must be None or an integer",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
-1,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"tol must be a positive float",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
2,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"momentum must be a float in [0., 1.]",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
"",
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"nestrovs_momentum must be a bool",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
None,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"early_stopping must be a bool",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"lbfgs",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"if early_stopping is True, solver must be one of 'sgd' or 'adam'",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
2,
0.9,
0.999,
1e-8,
10,
15000,
"validation_fraction must be a float in",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
2,
0.999,
1e-8,
10,
15000,
"beta_1 must be a float in [0. 1.]",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
2,
1e-8,
10,
15000,
"beta_2 must be a float in [0., 1.]",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
-1,
10,
15000,
"epsilon must be a nonnegative float",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
0,
15000,
"n_iter_no_change must be a positive integer",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
0,
"max_fun must be a positive integer",
),
]
valid_input_of_nn_policy_learner_init = [
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"valid input",
),
(
10,
1,
2,
ipw.estimate_policy_value_tensor,
(100, 50, 100),
"logistic",
"sgd",
0.001,
50,
0.0001,
200,
True,
None,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
15000,
"valid input",
),
]
@pytest.mark.parametrize(
"n_actions, len_list, dim_context, off_policy_objective, hidden_layer_size, activation, solver, alpha, batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, max_fun, description",
invalid_input_of_nn_policy_learner_init,
)
def test_nn_policy_learner_init_using_invalid_inputs(
n_actions,
len_list,
dim_context,
off_policy_objective,
hidden_layer_size,
activation,
solver,
alpha,
batch_size,
learning_rate_init,
max_iter,
shuffle,
random_state,
tol,
momentum,
nesterovs_momentum,
early_stopping,
validation_fraction,
beta_1,
beta_2,
epsilon,
n_iter_no_change,
max_fun,
description,
):
with pytest.raises(ValueError, match=f"{description}*"):
_ = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=dim_context,
off_policy_objective=off_policy_objective,
hidden_layer_size=hidden_layer_size,
activation=activation,
solver=solver,
alpha=alpha,
batch_size=batch_size,
learning_rate_init=learning_rate_init,
max_iter=max_iter,
shuffle=shuffle,
random_state=random_state,
tol=tol,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
n_iter_no_change=n_iter_no_change,
max_fun=max_fun,
)
@pytest.mark.parametrize(
"n_actions, len_list, dim_context, off_policy_objective, hidden_layer_size, activation, solver, alpha, batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, max_fun, description",
valid_input_of_nn_policy_learner_init,
)
def test_nn_policy_learner_init_using_valid_inputs(
n_actions,
len_list,
dim_context,
off_policy_objective,
hidden_layer_size,
activation,
solver,
alpha,
batch_size,
learning_rate_init,
max_iter,
shuffle,
random_state,
tol,
momentum,
nesterovs_momentum,
early_stopping,
validation_fraction,
beta_1,
beta_2,
epsilon,
n_iter_no_change,
max_fun,
description,
):
nn_policy_learner = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=dim_context,
off_policy_objective=off_policy_objective,
hidden_layer_size=hidden_layer_size,
activation=activation,
solver=solver,
alpha=alpha,
batch_size=batch_size,
learning_rate_init=learning_rate_init,
max_iter=max_iter,
shuffle=shuffle,
random_state=random_state,
tol=tol,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
n_iter_no_change=n_iter_no_change,
max_fun=max_fun,
)
assert isinstance(nn_policy_learner, NNPolicyLearner)
def test_nn_policy_learner_create_train_data_for_opl():
context = np.ones((100, 2), dtype=np.int32)
action = np.zeros((100,), dtype=np.int32)
reward = np.ones((100,), dtype=np.float32)
pscore = np.array([0.5] * 100, dtype=np.float32)
estimated_rewards_by_reg_model = np.ones((100, 2), dtype=np.float32)
position = np.zeros((100,), dtype=np.int32)
ipw = InverseProbabilityWeighting()
learner1 = NNPolicyLearner(
n_actions=2,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
training_loader, validation_loader = learner1._create_train_data_for_opl(
context=context,
action=action,
reward=reward,
pscore=pscore,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
position=position,
)
assert isinstance(training_loader, torch.utils.data.DataLoader)
assert validation_loader is None
learner2 = NNPolicyLearner(
n_actions=2,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
early_stopping=True,
)
training_loader, validation_loader = learner2._create_train_data_for_opl(
context=context,
action=action,
reward=reward,
pscore=pscore,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
position=position,
)
assert isinstance(training_loader, torch.utils.data.DataLoader)
assert isinstance(validation_loader, torch.utils.data.DataLoader)
def test_nn_policy_learner_fit():
context = np.ones((100, 2), dtype=np.float32)
action = np.zeros((100,), dtype=int)
reward = np.ones((100,), dtype=np.float32)
pscore = np.array([0.5] * 100, dtype=np.float32)
ipw = InverseProbabilityWeighting()
# inconsistency with the shape
with pytest.raises(ValueError):
learner = NNPolicyLearner(
n_actions=2,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
variant_context = np.ones((101, 2), dtype=np.float32)
learner.fit(
context=variant_context, action=action, reward=reward, pscore=pscore
)
# inconsistency between dim_context and context
with pytest.raises(ValueError):
learner = NNPolicyLearner(
n_actions=2,
dim_context=3,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
def test_nn_policy_learner_predict():
n_actions = 2
len_list = 1
context = np.ones((100, 2), dtype=np.float32)
context_test = np.array([i for i in range(10)], dtype=np.float32).reshape(5, 2)
action = np.zeros((100,), dtype=int)
reward = np.ones((100,), dtype=np.float32)
pscore = np.array([0.5] * 100, dtype=np.float32)
ipw = InverseProbabilityWeighting()
# shape error
with pytest.raises(ValueError):
learner = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
invalid_context = np.array([1.0, 1.0], dtype=np.float32)
learner.predict(context=invalid_context)
# inconsistency between dim_context and context
with pytest.raises(ValueError):
learner = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
invalid_context = np.array([[1.0, 1.0, 1.0]], dtype=np.float32)
learner.predict(context=invalid_context)
# shape consistency of action_dist
# n_rounds is 5, dim_context is 2
learner = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
action_dist = learner.predict(context=context_test)
assert np.allclose(
action_dist.sum(1), np.ones_like((context_test.shape[0], len_list))
)
assert action_dist.shape[0] == 5
assert action_dist.shape[1] == n_actions
assert action_dist.shape[2] == len_list
def test_nn_policy_learner_sample_action():
n_actions = 2
len_list = 1
context = np.ones((100, 2), dtype=np.float32)
context_test = np.array([i for i in range(10)], dtype=np.float32).reshape(5, 2)
action = np.zeros((100,), dtype=int)
reward = np.ones((100,), dtype=np.float32)
pscore = np.array([0.5] * 100, dtype=np.float32)
ipw = InverseProbabilityWeighting()
# shape error
with pytest.raises(ValueError):
learner = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
invalid_context = np.array([1.0, 1.0], dtype=np.float32)
learner.sample_action(context=invalid_context)
# inconsistency between dim_context and context
with pytest.raises(ValueError):
learner = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
invalid_context = np.array([[1.0, 1.0, 1.0]], dtype=np.float32)
learner.sample_action(context=invalid_context)
learner = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
action_dist = learner.sample_action(context=context_test)
assert np.allclose(
action_dist.sum(1), np.ones_like((context_test.shape[0], len_list))
)
assert action_dist.shape[0] == context_test.shape[0]
assert action_dist.shape[1] == n_actions
assert action_dist.shape[2] == len_list
def test_nn_policy_learner_predict_proba():
n_actions = 2
len_list = 1
context = np.ones((100, 2), dtype=np.float32)
context_test = np.array([i for i in range(10)], dtype=np.float32).reshape(5, 2)
action = np.zeros((100,), dtype=int)
reward = np.ones((100,), dtype=np.float32)
pscore = np.array([0.5] * 100, dtype=np.float32)
ipw = InverseProbabilityWeighting()
# shape error
with pytest.raises(ValueError):
learner = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
invalid_context = np.array([1.0, 1.0], dtype=np.float32)
learner.predict_proba(context=invalid_context)
# inconsistency between dim_context and context
with pytest.raises(ValueError):
learner = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
invalid_context = np.array([[1.0, 1.0, 1.0]], dtype=np.float32)
learner.predict_proba(context=invalid_context)
learner = NNPolicyLearner(
n_actions=n_actions,
len_list=len_list,
dim_context=2,
off_policy_objective=ipw.estimate_policy_value_tensor,
)
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
action_dist = learner.predict_proba(context=context_test)
assert np.allclose(
action_dist.sum(1), np.ones_like((context_test.shape[0], len_list))
)
assert action_dist.shape[0] == context_test.shape[0]
assert action_dist.shape[1] == n_actions
assert action_dist.shape[2] == len_list
| 2.046875 | 2 |
shop/models.py | lbacon17/lb-fitness | 0 | 12767230 | <reponame>lbacon17/lb-fitness
from decimal import Decimal
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django_countries.fields import CountryField
class Category(models.Model):
class Meta:
verbose_name_plural = 'Categories'
name = models.CharField(max_length=254)
friendly_name = models.CharField(max_length=254, null=True, blank=True)
def __str__(self):
return self.name
def get_friendly_name(self):
return self.friendly_name
class Product(models.Model):
category = models.ForeignKey('Category', null=True, blank=True, on_delete=models.SET_NULL)
sku = models.CharField(max_length=254, null=True, blank=True)
name = models.CharField(max_length=254)
friendly_name = models.CharField(max_length=254, null=False, blank=False, default=name)
description = models.TextField(null=True, blank=True)
has_sizes = models.BooleanField(default=False, null=True, blank=True)
price = models.DecimalField(max_digits=6, decimal_places=2)
vip_discount_percentage = models.IntegerField(default=50)
rating = models.DecimalField(max_digits=3, decimal_places=2, default=0.00, null=True, blank=True)
image_url = models.URLField(max_length=1054, null=True, blank=True)
image = models.ImageField(null=True, blank=True)
def set_vip_discount(self, user_id):
user = User.objects.get(id=user_id)
if user.member.subscription_package.id == 3:
self.price = self.price * Decimal(self.vip_discount_percentage / 100)
self.save()
def __str__(self):
return self.name
def get_friendly_name(self):
return self.friendly_name
def set_discount_price(self):
discount_price = (float(self.price) * (self.vip_discount_percentage / 100))
return discount_price
| 2.140625 | 2 |
inference/modulated_detection.py | kylevedder/mvits_for_class_agnostic_od | 114 | 12767231 | import numpy as np
import torch
from PIL import Image
import torchvision.transforms as T
from infer import Inference
from utils.nms import nms
torch.set_grad_enabled(False)
def class_agnostic_nms(boxes, scores, iou=0.5):
if len(boxes) > 1:
boxes, scores = nms(np.array(boxes), np.array(scores), iou)
return list(boxes), list(scores)
else:
return boxes, scores
def generate_image_crops(img, num_crops=8):
"""
Note: num_crops must be greater than 2 and of multiple of 2
"""
assert num_crops > 2
assert num_crops % 2 == 0
# Get the image width and height
img_w, img_h = img.size
crops = []
coordinates = []
crops.append(img)
coordinates.append((0, 0, img_w, img_h))
crop_chunks_x = int(num_crops / 2)
crop_chunks_y = int(num_crops / crop_chunks_x)
x_inc = int(img_w / crop_chunks_y)
y_inc = int(img_h / crop_chunks_y)
x_space = np.linspace(0, img_w - x_inc, crop_chunks_y)
y_spcae = np.linspace(0, img_h - y_inc, int(num_crops / crop_chunks_y))
if num_crops > 1:
for x in x_space:
for y in y_spcae:
x1, y1 = x, y
x2, y2 = x1 + x_inc, y1 + y_inc
crops.append((img.crop((x1, y1, x2, y2))).resize((img_w, img_h)))
coordinates.append((x1, y1, x2, y2))
return crops, coordinates, (img_w, img_h)
def scale_boxes(boxes, coordinates, img_dims):
x1, y1, x2, y2 = coordinates
img_w, img_h = img_dims
w = x2 - x1
h = y2 - y1
for b in boxes:
b[0], b[1], b[2], b[3] = int((b[0] / img_w) * w) + x1, int((b[1] / img_h) * h) + y1, \
int((b[2] / img_w) * w) + x1, int((b[3] / img_h) * h) + y1
return boxes
class ModulatedDetection(Inference):
"""
The class supports the inference using both MDETR & MDef-DETR models.
"""
def __init__(self, model, confidence_thresh=0.0):
Inference.__init__(self, model)
self.conf_thresh = confidence_thresh
self.transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
@staticmethod
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(self, out_bbox, size):
img_w, img_h = size
b = self.box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
def infer_image(self, image_path, **kwargs):
caption = kwargs["caption"]
# Read the image
im = Image.open(image_path)
imq = np.array(im)
if len(imq.shape) != 3:
im = im.convert('RGB')
img = self.transform(im).unsqueeze(0).cuda()
# propagate through the models
memory_cache = self.model(img, [caption], encode_and_save=True)
outputs = self.model(img, [caption], encode_and_save=False, memory_cache=memory_cache)
# keep only predictions with self.conf_thresh+ confidence
probas = 1 - outputs['pred_logits'].softmax(-1)[0, :, -1].cpu()
keep = (probas > self.conf_thresh).cpu()
# convert boxes from [0; 1] to image scales
bboxes_scaled = self.rescale_bboxes(outputs['pred_boxes'].cpu()[0, keep], im.size)
kept_probs = probas[keep]
# Convert outputs to the required format
bboxes = list(bboxes_scaled.numpy())
probs = list(kept_probs.numpy())
boxes, scores = [], []
for b, conf in zip(bboxes, probs):
boxes.append([int(b[0]), int(b[1]), int(b[2]), int(b[3])])
scores.append(conf)
# Read image, perform inference, parse results, append the predicted boxes to detections
return boxes, scores
def infer_image_multi_crop(self, image_path, **kwargs):
caption = kwargs["caption"]
# Read the image
im = Image.open(image_path)
crops, coordinates, img_dims = generate_image_crops(im)
imgs = [self.transform(crop).unsqueeze(0).cuda() for crop in crops]
imgs = torch.cat(imgs)
# propagate through the models
memory_cache = self.model(imgs, [caption for i in range(imgs.shape[0])], encode_and_save=True)
outputs = self.model(imgs, [caption], encode_and_save=False, memory_cache=memory_cache)
all_boxes = []
all_scores = []
for i in range(len(crops)):
# keep only predictions with self.conf_thresh+ confidence
probas = 1 - outputs['pred_logits'].softmax(-1)[i, :, -1].cpu()
keep = (probas > self.conf_thresh).cpu()
# convert boxes from [0; 1] to image scales
bboxes_scaled = self.rescale_bboxes(outputs['pred_boxes'].cpu()[i, keep], im.size)
kept_probs = probas[keep]
# Convert outputs to the required format
bboxes = list(bboxes_scaled.numpy())
probs = list(kept_probs.numpy())
boxes, scores = [], []
for b, conf in zip(bboxes, probs):
boxes.append([int(b[0]), int(b[1]), int(b[2]), int(b[3])])
scores.append(conf)
# Read image, perform inference, parse results, append the predicted boxes to detections
boxes = scale_boxes(boxes, coordinates[i], img_dims)
all_boxes += boxes
all_scores += scores
all_boxes = class_agnostic_nms(all_boxes, all_scores)
return all_boxes, all_scores
| 2.234375 | 2 |
Desafios/desafio014.py | MaxGabrielima/Python-Codes | 3 | 12767232 | c = float(input('Digite a temperatura em C°: '))
f = c * 1.8 + 32
print('{:.0f} C° é igual a {:.0f}° Fahrenheight'.format(c, f)) | 4.0625 | 4 |
actions/aux_function.py | NathanDezan/chatbot-pytube | 0 | 12767233 | from pytube import YouTube
import re
import os
#Action
class AuxFunctions():
def get_resolutions(self, url):
res_exist = {'144p': False, '240p': False, '360p': False, '480p': False, '720p': False, '1080p': False, '1440p': False, '2160p': False, '4320p': False}
resolutions_list = list()
function_name = self.get_resolutions.__name__
try:
video = YouTube(url)
describe_tube = video.streams.all()
for i in describe_tube:
for j in res_exist:
res_string = str(i.resolution)
if res_string == j:
res_exist[j] = True
for i in res_exist:
if res_exist[i] == True:
resolutions_list.append(i)
print('Success in ' + function_name)
except:
print('Error in ' + function_name)
return None
return resolutions_list
def removeEmoji(self, text):
#Variaveis
name_function = self.removeEmoji.__name__
try:
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
text_format = emoji_pattern.sub(r'', text)
text_format = re.sub(' +', ' ', text_format)
text_format = text_format.replace('"', '')
text_format = text_format.replace('\\', '')
text_format = text_format.replace('/', '')
text_format = text_format.replace(':', '')
text_format = text_format.replace('*', '')
text_format = text_format.replace('?', '')
text_format = text_format.replace('<', '')
text_format = text_format.replace('>', '')
text_format = text_format.replace('|', '')
print('Success in ' + name_function)
except:
print('Error in ' + name_function)
return None
return text_format
def removeFiles(self, video_title):
#Variaveis
path_video = './download/mp4/' + video_title + '.mp4'
path_audio = './download/mp3/' + video_title
path_last_file = './download/' + video_title + '.mp4'
function_name = self.removeFiles.__name__
try:
if os.path.exists(path_video):
os.remove(path_video)
if os.path.exists(path_audio + '.mp3'):
os.remove(path_audio + '.mp3')
if os.path.exists(path_audio + '.mp4'):
os.remove(path_audio + '.mp4')
if os.path.exists(path_last_file):
os.remove(path_last_file)
print('Success in ' + function_name)
except:
print('Error in ' + function_name)
def information_Audio(self, stream, title):
info_Audio = {'Nome': '', 'Bitrate': '', 'Codec': '', 'Formato': ''}
function_name = self.information_Audio.__name__
try:
info_Audio['Nome'] = title
info_Audio['Bitrate'] = stream.abr
info_Audio['Codec'] = stream.codecs[1]
info_Audio['Formato'] = 'mp3'
print('Success in ' + function_name)
except:
print('Error in ' + function_name)
return None
return info_Audio
def information_Video(self, stream, title):
function_name = self.information_Video.__name__
info_Video = {'Nome': '', 'Resolução': '', 'Fps': '', 'VCodec': '', 'ACodec': '', 'Formato': ''}
try:
info_Video['Nome'] = stream.title
info_Video['Resolução'] = stream.resolution
info_Video['Fps'] = stream.fps
info_Video['VCodec'] = stream.codecs[0]
info_Video['ACodec'] = stream.codecs[1]
info_Video['Formato'] = 'mp4'
print('Sucess in ' + function_name)
except:
print('Error in ' + function_name)
return None
return info_Video | 2.765625 | 3 |
src/moeflow/util.py | rachmadaniHaryono/MoeFlow | 0 | 12767234 | # -*- coding: utf-8 -*-
import hashlib
import logging
import os
import tempfile
import time
import cv2
import numpy as np
WIDTH_HEIGHT_LIMIT = 1600 # in pixel
def resize_large_image(image_data):
img_array = np.fromstring(image_data, dtype=np.uint8)
image = cv2.imdecode(img_array, 1)
height, width = image.shape[:2]
logging.info("Height: {}, Width: {}".format(height, width))
if height > width and height > WIDTH_HEIGHT_LIMIT:
ratio = float(WIDTH_HEIGHT_LIMIT) / float(height)
new_width = int((width * ratio) + 0.5)
return cv2.resize(
image,
(new_width, WIDTH_HEIGHT_LIMIT),
interpolation=cv2.INTER_AREA
)
elif width > WIDTH_HEIGHT_LIMIT:
ratio = float(WIDTH_HEIGHT_LIMIT) / float(width)
new_height = int((height * ratio) + 0.5)
return cv2.resize(
image,
(WIDTH_HEIGHT_LIMIT, new_height),
interpolation=cv2.INTER_AREA
)
else:
return image
def resize_faces(image_files, width=96, height=96):
for image_file in image_files:
image = cv2.imread(image_file)
resized_image = cv2.resize(
image,
(width, height),
interpolation=cv2.INTER_AREA
)
cv2.imwrite(image_file, resized_image)
def cleanup_image_cache(image_dir, expire=3600): # Expire in 1 hour
now = time.time()
for f in os.listdir(image_dir):
f = os.path.join(image_dir, f)
if os.stat(f).st_mtime < now - expire:
if os.path.isfile(f):
os.remove(f)
def sha256_checksum(filename, block_size=65536):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
def get_hex_value(r, g, b):
def clamp(x):
return max(0, min(x, 255))
return "#{0:02x}{1:02x}{2:02x}".format(clamp(r), clamp(g), clamp(b))
def get_resized_face_temp_file(face_dict, cv2_img):
width, height = 96, 96
pos = face_dict['pos']
crop_img = cv2_img[pos.y:pos.y+pos.height, pos.x:pos.x+pos.width]
resized_img = cv2.resize(
crop_img,
(width, height),
interpolation=cv2.INTER_AREA
)
resized_path = None
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_ff:
resized_path = temp_ff.name
cv2.imwrite(temp_ff.name, resized_img)
return resized_path
| 2.53125 | 3 |
Calibration/EcalAlCaRecoProducers/python/alcastreamEcalEtaCalib_cff.py | ckamtsikis/cmssw | 852 | 12767235 | import FWCore.ParameterSet.Config as cms
import HLTrigger.HLTfilters.hltHighLevel_cfi
ecaletaCalibHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
# HLTPaths = ['AlCa_EcalEta'],
eventSetupPathsKey='EcalCalEtaCalib',
throw = False
)
| 0.957031 | 1 |
nose2/tests/functional/support/scenario/module_import_err/pkg/test_attribute_err.py | benj-pml/nose2 | 0 | 12767236 | from nose2.compat import unittest
def test_foo():
pass
class TestFoo(unittest.TestCase):
def test_foo(self):
pass
| 1.671875 | 2 |
tally.py | ishabalu/trial2 | 0 | 12767237 | <reponame>ishabalu/trial2
import calc
veg = 120
fruits = 45
cash = 200
tot = calc.add(veg, fruits)
cashback = calc.sub(cash,tot)
print(" cash returned is",cashback)
| 3.328125 | 3 |
python_programs/to_base.py | PatrickShaw/QuixBugs | 22 | 12767238 | <gh_stars>10-100
import string
def to_base(num, b):
result = ''
alphabet = string.digits + string.ascii_uppercase
while num > 0:
i = num % b
num = num // b
result = result + alphabet[i]
return result
"""
Integer Base Conversion
base-conversion
Input:
num: A base-10 integer to convert.
b: The target base to convert it to.
Precondition:
num > 0, 2 <= b <= 36.
Output:
A string representing the value of num in base b.
Example:
>>> to_base(31, 16)
'1F'
"""
| 4.03125 | 4 |
core/processors/UNKNOWN_TASKProcessor.py | LoriSun/petp | 4 | 12767239 | <filename>core/processors/UNKNOWN_TASKProcessor.py<gh_stars>1-10
import logging
from core.processor import Processor
class UNKNOWN_TASKProcessor(Processor):
TPL: str = '{"msg":""}'
DESC: str = f'''
This is empty task, only should be used when converting Selenium IDE recording to PETP execution.
{TPL}
'''
def process(self):
logging.warning('UNKNOWN_TASK!!! ' + self.get_param("msg"))
| 2.5625 | 3 |
Singleton (__new__ version).py | joe513/Miscellaneous | 0 | 12767240 | <filename>Singleton (__new__ version).py
class Person:
IS = None
def __init__(self, name, hours, rate):
self.name = name
self.hours = hours
self.rate = rate
def __new__(cls, *args, **kwargs):
if cls.IS == None:
cls.IS = object.__new__(Person)
return cls.IS
def pay(self):
return self.hours * self.rate
z = Person('Bob', 14, 4)
x = Person('Jack', 44, 4)
print(z, x)
print(z.hours, x.hours)
| 3.5 | 4 |
src/hotels/utils.py | bee-travels/data-generator | 2 | 12767241 | import json
def load_json(file_name):
with open(file_name) as json_data:
return json.load(json_data) | 2.421875 | 2 |
app/waterQual/legacy/silica/silica_overfit.py | fkwai/geolearn | 0 | 12767242 | import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality, wqLinear
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# test
outName = 'Silica64-Y8090-00955-opt1'
wqData = waterQuality.DataModelWQ('Silica64')
code = '00955'
trainset = 'Y8090'
testset = 'Y0010'
# trainset = 'Y0010'
# testset = 'Y8090'
optT = trainset
master = basins.loadMaster(outName)
# seq test
siteNoLst = wqData.info['siteNo'].unique().tolist()
basins.testModelSeq(outName, siteNoLst, wqData=wqData)
ns = len(siteNoLst)
# calculate error from sequence
rmseMat = np.ndarray([ns, 2])
corrMat = np.ndarray([ns, 2])
for k, siteNo in enumerate(siteNoLst):
print(k, siteNo)
dfPred, dfObs = basins.loadSeq(outName, siteNo)
rmseLSTM, corrLSTM = waterQuality.calErrSeq(dfPred[code], dfObs[code])
rmseMat[k, :] = rmseLSTM
corrMat[k, :] = corrLSTM
# time series map
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
codePdf = usgs.codePdf
def funcMap():
figM, axM = plt.subplots(2, 1, figsize=(8, 6))
axplot.mapPoint(axM[0], lat, lon, corrMat[:, 0]-corrMat[:, 1], s=12)
axplot.mapPoint(axM[1], lat, lon, corrMat[:, 1], s=12)
figP, axP = plt.subplots(1, 1, figsize=(8, 6))
return figM, axM, figP, axP, lon, lat
def funcPoint(iP, axP):
siteNo = siteNoLst[iP]
dfP1, dfObs = basins.loadSeq(outName, siteNo)
rmse1, corr1 = waterQuality.calErrSeq(dfP1[code], dfObs[code])
t = dfObs.index.values
tBar = np.datetime64('2000-01-01')
axplot.plotTS(axP, t, [dfP1[code], dfObs[code]], tBar=tBar,
legLst=['LSTM', 'obs'], styLst='-*', cLst='br')
tStr = '{}, rmse [{:.2f} {:.2f}], corr [{:.2f} {:.2f}]'.format(
siteNo, rmse1[0], rmse1[1], corr1[0], corr1[1])
axP.set_title(tStr)
importlib.reload(figplot)
figM, figP = figplot.clickMap(funcMap, funcPoint)
for ax in figP.axes:
ax.set_xlim(np.datetime64('2010-01-01'), np.datetime64('2015-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_xlim(np.datetime64('1990-01-01'), np.datetime64('1995-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_xlim(np.datetime64('1980-01-01'), np.datetime64('2020-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_ylim(5, 30)
figP.canvas.draw()
| 2.21875 | 2 |
gpytorch/priors/_compatibility.py | bdecost/gpytorch | 0 | 12767243 | <gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import torch
from gpytorch.priors import SmoothedBoxPrior
logger = logging.getLogger()
def _bounds_to_prior(prior, bounds, batch_size=None, log_transform=True):
if prior is not None:
return prior
elif bounds is not None:
logger.warning(
"Parameter bounds have been deprecated and will be removed in a future release. "
"Please either remove them or use a SmoothedBoxPrior instead!"
)
a = torch.full((batch_size or 1,), float(bounds[0]))
b = torch.full((batch_size or 1,), float(bounds[1]))
return SmoothedBoxPrior(a, b, log_transform=log_transform)
else:
return None
| 2.015625 | 2 |
format.py | my-personal-forks/dart-sublime-bundle | 182 | 12767244 | <gh_stars>100-1000
# Copyright (c) 2014, <NAME>. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
from subprocess import PIPE
from subprocess import Popen
import sublime
import sublime_plugin
from Dart.sublime_plugin_lib import PluginLogger
from Dart.sublime_plugin_lib.plat import supress_window
from Dart import analyzer
from Dart.lib.sdk import DartFormat
_logger = PluginLogger(__name__)
class DartFormatCommand(sublime_plugin.WindowCommand):
'''Formats the selected text in Sublime Text using `dartfmt`.
Notes:
- Can be used as a build system.
'''
def run(self, **kwargs):
view = self.window.active_view()
if not view:
return
analyzer.g_server.send_format_file(view)
class DartReplaceRegion(sublime_plugin.TextCommand):
def run(self, edit, region, text):
reg = sublime.Region(*region)
self.view.replace(edit, reg, text)
self.view.run_command('reindent')
| 2.390625 | 2 |
ircawp-learn.py | Fortyseven/ircawp2020-markov | 0 | 12767245 | #!/bin/python3
import sys
import os
import Markov
if __name__ == "__main__":
if (len(sys.argv) < 2 or len(sys.argv) > 3):
print("usage: {} sourcefile.txt [existingbrain.json]".format(
sys.argv[0]))
sys.exit(-1)
if (not os.path.exists(sys.argv[1])):
print("Can't find source corpus {}".format(sys.argv[1]))
sys.exit(-1)
brain = Markov.Brain()
# If specified, load the existing brain and get it ready to merge
if (len(sys.argv) == 3):
if (not os.path.exists(sys.argv[2])):
print("Can't find brain '{}' to merge with".format(sys.argv[2]))
sys.exit(-1)
brain.loadExistingBrain(sys.argv[2])
brain.compileCorupus(sys.argv[1])
print(brain.toJSON())
| 2.671875 | 3 |
src/player.py | Flowshu/gym-mtg | 0 | 12767246 | <reponame>Flowshu/gym-mtg
from creature import Creature
from random import choice
class Player():
def __init__(self):
self.life = 5
self.creatures = []
#self.agent = 0
#self.battlefield = 0
#self.deck = 0
#self.hand = []
#self.lands = []
#self.graveyard = []
#self.exile = []
def play_creature(self, creature: Creature):
self.creatures.append(creature)
def declare_attacks(self):
attack_vector = []
for _ in range(len(self.creatures)):
attack_vector.append(choice(range(2)))
return attack_vector
def declare_blocks(self, attack_vector):
blockables = [-1]
for attacker in range(len(attack_vector)):
if attack_vector[attacker] == 1:
blockables.append(attacker)
block_vector = []
for _ in range(len(self.creatures)):
block_vector.append(choice(blockables))
return block_vector
| 3.015625 | 3 |
mailing/mail/views.py | todd-sudo/mailing | 0 | 12767247 | <reponame>todd-sudo/mailing
import datetime
import uuid
from rest_framework import generics, status
from django.db.models import Q
from rest_framework.response import Response
from .models import Client, Message, MailingList
from .serializers import (
ClientCreateUpdateSerializer,
MailingCreateUpdateSerializer,
ClientSerializer,
MailingDetailSerializer,
)
from .utils import create_task_message_send, utc_to_local
from logger.logger import logger
# Client
class CreateClientView(generics.CreateAPIView):
""" Создание клиента
"""
queryset = Client.objects.all()
serializer_class = ClientCreateUpdateSerializer
class UpdateClientView(generics.UpdateAPIView):
""" Обновление данных клинта
"""
queryset = Client.objects.all()
serializer_class = ClientCreateUpdateSerializer
lookup_field = "id"
class DestroyClientView(generics.DestroyAPIView):
""" Удаление данных клиента
"""
queryset = Client.objects.all()
lookup_field = "id"
# MailingList
class MailingCreateView(generics.CreateAPIView):
""" Создание рассылки
"""
serializer_class = MailingCreateUpdateSerializer
queryset = MailingList.objects.all()
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
mail_external_id = dict(serializer.data).get("external_id")
ml = MailingList.objects.get(external_id=mail_external_id)
date_start = utc_to_local(ml.date_start)
date_stop = utc_to_local(ml.date_stop)
clients = Client.objects.filter(
Q(tag=ml.tag) | Q(operator_code=ml.operator_code)
)
logger.info(f"Рассылка: {ml.external_id}|Клиентов: {len(clients)}")
new_message = Message.objects.create(
external_id=uuid.uuid4(),
create_at=datetime.datetime.now(),
status=True,
mailing_list=ml,
)
logger.info(f"Сообщение: {new_message} создано")
new_message.clients.add(*clients)
tasks = []
for client in clients:
task = create_task_message_send(
name_task=f"task_{uuid.uuid4().hex}",
text_message=ml.text,
message_id=new_message.pk,
phone=int(client.phone),
start_time=date_start,
expires=date_stop,
)
logger.info(f"Задача: {task} создана")
tasks.append(task)
logger.info(f"Всего задач: {len(tasks)}")
if tasks:
return Response(
serializer.data,
status=status.HTTP_201_CREATED,
headers=headers
)
logger.error(
"Рассылка не запущена, нет клиентов, попадающих под фильтры"
)
return Response(
{
"detail": "рассылка не запущена, нет клиентов, "
"попадающих под фильтры"
},
status=status.HTTP_400_BAD_REQUEST,
headers=headers
)
class MailingUpdateView(generics.UpdateAPIView):
""" Обновление рассылки
"""
queryset = MailingList.objects.all()
serializer_class = MailingCreateUpdateSerializer
lookup_field = "id"
class MailingDestroyView(generics.DestroyAPIView):
""" Удаление рассылки
"""
queryset = MailingList.objects.all()
lookup_field = "id"
class MailingListClientsListView(generics.ListAPIView):
""" Список клиентов одной рассылки
"""
lookup_field = "id"
serializer_class = ClientSerializer
def get_queryset(self):
ml = MailingList.objects.get(id=self.kwargs[self.lookup_field])
clients = Client.objects.filter(messages__mailing_list=ml)
return clients
class MailingListView(generics.ListAPIView):
""" Просмотр списка рассылок
"""
serializer_class = MailingCreateUpdateSerializer
def get_queryset(self):
queryset = MailingList.objects.all()
return queryset
class MailingDetailView(generics.RetrieveAPIView):
""" Просмотр детальной информации о рассылке
"""
serializer_class = MailingDetailSerializer
lookup_field = "id"
queryset = MailingList.objects.all()
| 1.851563 | 2 |
attacks/fgsm.py | Jackie-LJQ/efficientdet | 0 | 12767248 | <reponame>Jackie-LJQ/efficientdet
from torch.autograd import Variable
import torch
def linf_clamp(x, _min, _max):
'''
Inplace linf clamping on Tensor x.
Args:
x: Tensor. shape=(N,C,W,H)
_min: Tensor with same shape as x.
_max: Tensor with same shape as x.
'''
idx = x.data < _min
x.data[idx] = _min[idx]
idx = x.data > _max
x.data[idx] = _max[idx]
return x
class FGSM():
def __init__(self, eps, alpha=1, targeted=True):
'''
Args:
eps: float. noise bound.
targeted: bool. If Ture, do targeted attack.
'''
self.eps = eps
self.targeted = targeted
self.alpha = alpha
def attack(self, model, x, gtlabels, targets=None):
'''
Args:
x: Tensor. Original images. size=(N,C,W,H)
model: nn.Module. The model to be attacked.
gtlabels: Tensor. ground truth labels for x. size=(N,). Useful only under untargeted attack.
targets: Tensor. target attack class for x. size=(N,). Useful only under targeted attack.
Return:
x_adv: Tensor. Adversarial images. size=(N,C,W,H)
'''
model.eval()
x_adv = Variable(x.detach().cuda(), requires_grad=True)
dummy_x = torch.cat([x_adv, torch.zeros_like(x_adv), torch.zeros_like(x_adv)], dim=0)
if self.targeted:
# total_loss ccls_loss and box_loss of clean sample
total_loss, cls_loss, box_loss, _ = model(dummy_x, targets)[0].values()
else:
total_loss, cls_loss, box_loss, _ = model(dummy_x, gtlabels)[0].values()
cls_grad_adv = torch.autograd.grad(cls_loss, x_adv, only_inputs=True, retain_graph=True)[0]
box_grad_adv = torch.autograd.grad(box_loss, x_adv, only_inputs=True)[0]
x_cls_adv = x_adv.data.add_(self.alpha * torch.sign(cls_grad_adv.data)) # gradient assend by Sign-SGD
x_cls_adv = linf_clamp(x_cls_adv, _min=x-self.eps, _max=x+self.eps) # clamp to linf ball centered by x
x_cls_adv = torch.clamp(x_cls_adv, 0, 1) # clamp to RGB range [0,1]
x_box_adv = x_adv.data.add_(self.alpha * torch.sign(box_grad_adv.data)) # gradient assend by Sign-SGD
x_box_adv = linf_clamp(x_box_adv, _min=x-self.eps, _max=x+self.eps) # clamp to linf ball centered by x
x_box_adv = torch.clamp(x_box_adv, 0, 1) # clamp to RGB range [0,1]
cat_input = torch.cat([x, x_cls_adv, x_box_adv], dim=0)
# total_loss of cls_adv sample and box_adv sample
cat_loss = model(cat_input, gtlabels)
cls_loss = cat_loss[1]['loss']
box_loss = cat_loss[2]['loss']
model.train()
if box_loss > cls_loss:
return x_box_adv, 'box'
return x_cls_adv, 'cls' | 2.296875 | 2 |
lib/models/Transformer.py | fivosts/pinkySpeaker | 1 | 12767249 | <gh_stars>1-10
#!/usr/bin/env python
import numpy as np
import gensim
from os import path, makedirs
from keras import backend as K
from keras.models import Sequential, load_model
from keras.callbacks import LambdaCallback
from keras.layers.recurrent import LSTM
from keras.layers.embeddings import Embedding
from keras.layers import Dense, Activation, TimeDistributed, Dropout
from keras_transformer import get_model, decode
from eupy.native import logger as l
class Transformer:
## Model is based on
## "https://www.tensorflow.org/tutorials/text/transformer"
_logger = None
def __init__(self, data = None, model = None, LSTM_Depth = 3, sequence_length = 30):
self._logger = l.getLogger()
self._logger.debug("pinkySpeaker.lib.model.Transformer.__init__()")
## _dataset and _model are the two member variables of the class
self._raw_data = data
self._model = model
self._dataset = None
runTransformer(data)
self._lyric_sequence_length = sequence_length
self._startToken = "</START>"
self._endToken = "<PASSWORD>" ## TODO
self._padToken = "</PAD>"
if data:
self._initArchitecture(data)
elif model:
self._model = self._loadNNModel(model)
self._logger.info("Transformer model")
return
## Booting function of NN Model + dataset initialization
def _initArchitecture(self, raw_data):
self._logger.debug("pinkySpeaker.lib.model.Transformer._initArchitecture()")
vocab_size, max_title_length, all_titles_length, inp_sentences = self._initNNModel(raw_data)
self._initDataset(raw_data, vocab_size, max_title_length, all_titles_length, inp_sentences)
return
## Booting function of NN Model initialization
def _initNNModel(self, raw_data):
self._logger.debug("pinkySpeaker.lib.model.Transformer._initNNModel()")
self._logger.info("Initialize NN Model")
inp_sent, max_title_length, all_titles_length = self._constructSentences(raw_data)
word_model = self._initWordModel(inp_sent)
pretrained_weights = word_model.wv.vectors
vocab_size, _ = pretrained_weights.shape
## Any new sub-model should be registered here
## The according function should be written to initialize it
self._model = { 'word_model' : None,
'lyric_model' : None
}
## The order matters because of word2idx usage, therefore manual initialization here
self._model['word_model'] = word_model
self._model['Transformer'] = self._initLyricModel(pretrained_weights)
self._logger.info("Transformer Compiled successfully")
return vocab_size, max_title_length, all_titles_length, inp_sent
## Loads a model from file.
def _loadNNModel(self, modelpath):
return { 'word_model' : gensim.models.Word2Vec.load(path.join(modelpath, "word_model.h5")),
'lyric_model' : load_model(path.join(modelpath, "lyric_model.h5"))
}
## Booting function of dataset creation.
## Assigns the dataset to self._dataset
def _initDataset(self, raw_data, vocab_size, mx_t_l, all_t_l, inp_sent):
self._logger.debug("pinkySpeaker.lib.model.Transformer._initDataset()")
lyric_set = self._constructTLSet(raw_data, vocab_size, mx_t_l, all_t_l)
if len(lyric_set['encoder_input']) != len(lyric_set['output']) or len(lyric_set['decoder_input']) != len(lyric_set['output']):
raise ValueError("Wrong lyric set dimensions!")
self._dataset = { 'word_model' : inp_sent,
'lyric_model' : lyric_set
}
self._logger.info("Dataset constructed successfully")
return
## Initialize and return word model
def _initWordModel(self, inp_sentences):
self._logger.debug("pinkySpeaker.lib.model.Transformer._initWordModel()")
inp_sentences.append([self._padToken]) # Token that ensembles masking of training weights. Used to pad sequence length
inp_sentences.append([self._startToken]) # Token that ensembles the start of a sequence
wm = gensim.models.Word2Vec(inp_sentences, size = 300, min_count = 1, window = 4, iter = 200)
self._logger.info("Word2Vec word model initialized")
return wm
## Function to initialize and return title model
## Needs to be fixed
def _initTitleModel(self, weights, LSTM_Depth):
self._logger.debug("pinkySpeaker.lib.model.Transformer._initTitleModel()")
vocab_size, embedding_size = weights.shape
tm = Sequential()
tm.add(Embedding(input_dim=vocab_size, output_dim=embedding_size, weights=[weights]))
tm.add(Dropout(0.2))
for _ in range(LSTM_Depth - 1):
tm.add(LSTM(units=embedding_size, return_sequences=True))
tm.add(Dropout(0.2))
tm.add(LSTM(units=2 * embedding_size, return_sequences=False))
tm.add(Dropout(0.2))
tm.add(Dense(units=vocab_size))
tm.add(Activation('softmax'))
tm.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
self._logger.info("Title model initialized")
self._logger.info(tm.summary())
return tm
## Function to initialize and return lyric model
def _initLyricModel(self, weights):
self._logger.debug("pinkySpeaker.lib.model.Transformer._initLyricModel()")
vocab_size, embedding_size = weights.shape
lm = get_model(
token_num = vocab_size,
embed_dim = embedding_size,
encoder_num = 2,
decoder_num = 2,
head_num = 2,
hidden_dim = 128,
attention_activation = 'relu',
feed_forward_activation = 'relu',
dropout_rate = 0.05,
embed_weights = weights,
embed_trainable = False
)
lm.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
self._logger.info("Transformer model initialized")
self._logger.info(lm.summary())
return lm
## Set class weights for pad token and start token to 0.
def _setClassWeight(self, vocab_size):
self._logger.debug("pinkySpeaker.lib.model.Transformer._setClassWeight()")
clw = {}
for i in range(vocab_size):
clw[i] = 1
clw[self.word2idx(self._padToken)] = 0
clw[self.word2idx(self._startToken)] = 0
return clw
## Precompute raw data information to construct word model
## Returns a list of chunks of sentences, the max length of titles
## and the total length of titles
def _constructSentences(self, raw_data):
self._logger.debug("pinkySpeaker.lib.model.Transformer._constructSentences()")
self._logger.info("Sentence preprocessing for word model")
sentence_size = 10
max_title_length = 0
all_titles_length = 0
words = []
for song in raw_data:
curr_title_length = len(song['title'])
all_titles_length += curr_title_length - 1
if curr_title_length > max_title_length:
max_title_length = curr_title_length
for word in song['title']:
words.append(word)
for sent in song['lyrics']:
for word in sent:
words.append(word)
return self._listToChunksList(words, sentence_size), max_title_length, all_titles_length
## Converts a sentence-list of words-list to a list of chunks of sentences
def _listToChunksList(self, lst, n):
self._logger.debug("pinkySpeaker.lib.model.Transformer._listToChunksList()")
chunk_list = []
for i in range(0, len(lst), n):
chunk_list.append(lst[i: i + n])
return chunk_list
def _constructTLSet(self, raw_data, vocab_size, max_title_length, all_titles_length):
self._logger.debug("pinkySpeaker.lib.model.Transformer._constructTLSet()")
# lyric_set = {
# 'input' : np.zeros([2, len(raw_data), self._lyric_sequence_length], dtype = np.int32),
# 'sample_weight' : np.zeros([len(raw_data), self._lyric_sequence_length], dtype = np.int32),
# # lyric target will be the output of a softmax, i.e. a float and should be considered as such.
# 'output' : np.zeros([len(raw_data), self._lyric_sequence_length, 1], dtype = np.float64)
# }
encoder_input = []
decoder_input = []
decoder_output = []
sample_weight = []
## Iterate over each song. Keep index
for song in raw_data:
## We need a fixed sequence length. The next function will grab a song and will return NN inputs and targets, sized _lyric_sequence_length
## If the song is bigger than this, multiple pairs of inputs/target will be returned.
encoder_in, decoder_in, decoder_target, song_sample_weight = self._splitSongtoSentence(" ".join([" ".join(x) for x in ([song['title']] + song['lyrics'])]).split())
## For each input/target pair...
for enc_in, dec_in, dec_out, weight in zip(encoder_in, decoder_in, decoder_target, song_sample_weight):
## Convert str array to embed index tensor
## And convert target str tokens to indices. Indices to one hot vecs vocab_size sized. Pass one-hot vecs through softmax to construct final target
encoder_input.append(np.asarray([self.word2idx(x) for x in enc_in]))
decoder_input.append(np.asarray([self.word2idx(x) for x in dec_in]))
decoder_output.append(np.asarray([[self.word2idx(x)] for x in dec_out]))
sample_weight.append(np.asarray(weight))
lyric_set = {
'encoder_input' : np.asarray(encoder_input, dtype = np.int32),
'decoder_input' : np.asarray(decoder_input, dtype = np.int32),
'output' : np.asarray(decoder_output, dtype = np.int32),
'sample_weight' : np.asarray(sample_weight, dtype = np.int32)
}
self._logger.info("Lyric encoder input tensor dimensions: {}".format(lyric_set['encoder_input'].shape))
self._logger.info("Lyric decoder input tensor dimensions: {}".format(lyric_set['decoder_input'].shape))
self._logger.info("Lyric Target tensor dimensions: {}".format(lyric_set['output'].shape))
return lyric_set
## Receives an input tensor and returns an elem-by-elem softmax computed vector of the same dims
def _softmax(self, inp_tensor):
self._logger.debug("pinkySpeaker.lib.model.Transformer._softmax()")
m = np.max(inp_tensor)
e = np.exp(inp_tensor - m)
return e / np.sum(e)
def _splitSongtoSentence(self, curr_song):
self._logger.debug("pinkySpeaker.lib.model.Transformer._splitSongtoSentence()")
step = self._lyric_sequence_length - 1
encoder_input = [ [self._startToken] + curr_song[x : min(len(curr_song), x + step)] for x in range(0, len(curr_song), step)]
decoder_input = [ curr_song[x : min(len(curr_song), x + step)] + [self._endToken] for x in range(0, len(curr_song), step)]
decoder_output = decoder_input
## Pad input and output sequence to match the batch sequence length
encoder_input[-1] += [self._padToken] * (step + 1 - len(encoder_input[-1]))
decoder_input[-1] += [self._padToken] * (step + 1 - len(decoder_input[-1]))
decoder_output[-1] += [self._padToken] * (step + 1 - len(decoder_output[-1]))
song_sample_weight = [[ 0 if x == self._padToken
else 0 if x == self._startToken
else 50 if x == "endfile"
else 10 if x == "<ENDLINE>"
else 1 for x in inp]
for inp in encoder_input]
return encoder_input, decoder_input, decoder_output, song_sample_weight
def _setClassWeight(self, vocab_size):
self._logger.debug("pinkySpeaker.lib.model.Transformer._setClassWeight()")
clw = {}
for i in range(vocab_size):
clw[i] = 1
clw[self.word2idx("<ENDLINE>")] = 50
return clw
## Receive a word, return the index in the vocabulary
def word2idx(self, word):
self._logger.debug("pinkySpeaker.lib.model.Transformer.word2idx()")
return self._model['word_model'].wv.vocab[word].index
## Receive a vocab index, return the workd
def idx2word(self, idx):
self._logger.debug("pinkySpeaker.lib.model.Transformer.idx2word()")
return self._model['word_model'].wv.index2word[idx]
## Receive a vocab index, return its one hot vector
def idx2onehot(self, idx, size):
self._logger.debug("pinkySpeaker.lib.model.Transformer.idx2onehot()")
ret = np.zeros(size)
ret[idx] = 1000
return ret
## Converts "<ENDLINE>" to '\n' for pretty printing
## Also masks meta-tokens but throws a warning
def _prettyPrint(self, text):
self._logger.debug("pinkySpeaker.lib.model.Transformer._prettyPrint()")
if self._startToken in text:
self._logger.warning("</START> has been found to generated text!")
if self._padToken in text:
self._logger.warning("</PAD> has been found to generated text!")
if "<ENDLINE>" in text:
self._logger.warning("Endline found in text!")
return text.replace("<ENDLINE> ", "\n").replace("endfile", "\nEND")
## Just fit it!
def fit(self, epochs = 50, save_model = None):
self._logger.debug("pinkySpeaker.lib.model.Transformer.fit()")
# Build a small toy token dictionary
tokens = 'all work and no play makes jack a dull boy'.split(' ')
token_dict = {
'<PAD>': 0,
'<START>': 1,
'<END>': 2,
}
for token in tokens:
if token not in token_dict:
token_dict[token] = len(token_dict)
# Generate toy data
encoder_inputs_no_padding = []
encoder_inputs, decoder_inputs, decoder_outputs = [], [], []
for i in range(1, len(tokens) - 1):
encode_tokens, decode_tokens = tokens[:i], tokens[i:]
encode_tokens = ['<START>'] + encode_tokens + ['<END>'] + ['<PAD>'] * (len(tokens) - len(encode_tokens))
output_tokens = decode_tokens + ['<END>', '<PAD>'] + ['<PAD>'] * (len(tokens) - len(decode_tokens))
decode_tokens = ['<START>'] + decode_tokens + ['<END>'] + ['<PAD>'] * (len(tokens) - len(decode_tokens))
encode_tokens = list(map(lambda x: token_dict[x], encode_tokens))
decode_tokens = list(map(lambda x: token_dict[x], decode_tokens))
output_tokens = list(map(lambda x: [token_dict[x]], output_tokens))
encoder_inputs_no_padding.append(encode_tokens[:i + 2])
encoder_inputs.append(encode_tokens)
decoder_inputs.append(decode_tokens)
decoder_outputs.append(output_tokens)
## TODO: You are here. Check input dimensions.
## Fork example to see how it works
hist = self._model['Transformer'].fit( x = [self._dataset['lyric_model']['encoder_input'], self._dataset['lyric_model']['decoder_input']],
y = self._dataset['lyric_model']['output'],
# sample_weight = self._dataset['lyric_model']['sample_weight'],
batch_size = 4,
epochs = 50,
callbacks = [LambdaCallback(on_epoch_end=self._lyrics_per_epoch)] )
if save_model:
save_model = path.join(save_model, "Transformer")
makedirs(save_model, exist_ok = True)
self._model['Transformer'].save(path.join(save_model, "Transformer.h5"))
return hist.history['loss']
## Run a model prediction based on sample input
def predict(self, seed, load_model = None):
self._logger.debug("pinkySpeaker.lib.model.Transformer.predict()")
if not self._model and not load_model:
self._logger.critical("Load model path has not been provided! Predict failed!")
raise ValueError("Model is not cached. Model load path has not been provided. Predict failed!")
else:
if load_model:
if self._model:
## TODO not necessarily
self._logger.info("New model has been provided. Overriding cached model...")
self._model = self._loadNNModel(load_model)
title = self._generate_next(seed, self._model['title_model'], True, num_generated = 10)
lyrics = self._generate_next(title, self._model['Transformer'], False, num_generated = 540)
lyrics = " ".join(lyrics.split()[len(title.split()):])
self._logger.info("\nSeed: {}\nSong Title\n{}\nLyrics\n{}".format(seed, self._prettyPrint(title), self._prettyPrint(lyrics)))
return
## Booting callback on title generation between epochs
def _title_per_epoch(self, epoch, _):
self._logger.debug("pinkySpeaker.lib.model.Transformer._title_per_epoch()")
self._logger.info('\nGenerating text after epoch: %d' % epoch)
texts = [
'dark',
'dark side',
'another',
'echoes',
'high',
'shine',
'on',
'have',
'comfortably'
]
for text in texts:
_sample = self._generate_next(text, self._model['title_model'], title = True, num_generated = 20)
self._logger.info('%s... -> \n%s\n' % (text, self._prettyPrint(_sample)))
return
## Booting callback on lyric generation between epochs
def _lyrics_per_epoch(self, epoch, _):
self._logger.debug("pinkySpeaker.lib.model.Transformer._lyrics_per_epoch()")
self._logger.info('\nGenerating text after epoch: %d' % epoch)
texts = [
'dark side',
'another brick in the wall',
'echoes',
'high hopes',
'shine on you crazy diamond',
'breathe',
'have a cigar',
'comfortably numb'
]
for text in texts:
_sample = self._generate_next(text, self._model['Transformer'], title = False)
self._logger.info('\n%s... -> \n%s\n' % (text, self._prettyPrint(_sample)))
return
## Model sampling setup function
def _generate_next(self, text, model, title, num_generated = 320):
self._logger.debug("pinkySpeaker.lib.model.Transformer._generate_next()")
word_idxs = [self.word2idx(word) for word in text.lower().split()]
print(word_idxs)
prediction = decode(
model,
word_idxs,
start_token = self.word2idx(self._startToken),
end_token = self.word2idx(self._endToken),
pad_token = self.word2idx(self._padToken),
max_len = num_generated,
top_k = 10,
temperature = 1.0
)
# for i in range(num_generated):
# prediction = decode(
# model,
# np.array(word_idxs),
# start_token = self.word2idx(self._startToken),
# end_token = self.word2idx(self._endToken),
# pad_token = self.word2idx(self._padToken),
# max_len = num_generated,
# top_k = 10,
# temperature = 1.0
# )
# prediction = model.predict(x=np.array(word_idxs))
# max_cl = 0
# max_indx = 0
# samples = prediction[-1] if title else prediction[-1][0]
# for ind, item in enumerate(samples): ## TODO plz fix this for title model
# if item > max_cl:
# max_cl = item
# max_indx = ind
# idx = self._sample(samples, temperature=0.7)
# word_idxs.append(idx)
# if self.idx2word(idx) == "endfile" or (title and self.idx2word(idx) == "<ENDLINE>"):
# break
return ' '.join(self.idx2word(idx) for idx in word_idxs + prediction)
## Take prediction vector, return the index of most likely class
def _sample(self, preds, temperature=1.0):
self._logger.debug("pinkySpeaker.lib.model.Transformer._sample()")
if temperature <= 0:
return np.argmax(preds)
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
###################################################################################################################################################
###################################################################################################################################################
###################################################################################################################################################
###################################################################################################################################################
###################################################################################################################################################
###################################################################################################################################################
import tensorflow_datasets as tfds
import tensorflow as tf
import time
import numpy as np
import matplotlib.pyplot as plt
import os
from random import randint
BUFFER_SIZE = 2000
BATCH_SIZE = 64
MAX_LENGTH = 40
tokenizer_pt = None
tokenizer_en = None
def runTransformer(raw_data):
path = "/home/fivosts/PhD/Code/pinkySpeaker/dataset/pink_floyd"
temp_dataset = []
src_dataset = []
random_lines = []
for file in os.listdir(path):
with open(path.join(path, file), 'r') as f:
lines = f.readlines()
lines = [x.replace("\n", "") for x in lines if x.replace("\n", "") != ""]
random_lines += lines
# random_lines = [x for sublist in random_lines for x in sublist]
def labeler(example):
return example, random_lines[randint(0, len(random_lines) - 1)]
for file in os.listdir(path):
line = tf.data.TextLineDataset(path.join(path, file))
labelled_line = line.map(lambda key: labeler(key))
temp_dataset.append(labelled_line)
global tokenizer_en
global tokenizer_pt
tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(x.numpy() for sublist in temp_dataset for x, _ in sublist), target_vocab_size = 2**13)
tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(x.numpy() for sublist in temp_dataset for x, _ in sublist), target_vocab_size = 2**13)
for file in os.listdir(path):
line = tf.data.TextLineDataset(path.join(path, file))
for l in line:
if l.numpy().decode("utf-8") != "":
# src_dataset.append(([tokenizer_en.vocab_size] + tokenizer_en.encode(l.numpy()) + [tokenizer_en.vocab_size + 1], [tokenizer_en.vocab_size] + tokenizer_en.encode("".join(random_lines[randint(0, len(random_lines) - 1)])) + [tokenizer_en.vocab_size + 1]))
src_dataset.append(([tokenizer_en.vocab_size] + tokenizer_en.encode(l.numpy()) + [tokenizer_en.vocab_size + 1], [tokenizer_en.vocab_size] + tokenizer_en.encode(l.numpy()) + [tokenizer_en.vocab_size + 1]))
# examples, metadata = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True,
# as_supervised=True)
# train_examples1, val_examples1 = examples['train'], examples['validation']
# train_examples, val_examples = tf.data.Dataset.from_tensor_slices(song_dataset), tf.data.Dataset.from_tensor_slices(song_dataset)
train_examples = src_dataset
val_examples = src_dataset
# print(type(train_examples))
# print(type(train_examples1))
# for i in train_examples:
# print(type(i))
# print(i)
# for x in i:
# print(type(x))
# break
# break
# for i in train_examples1:
# print(type(i))
# print(i)
# for x in i:
# print(type(x))
# break
# break
sample_string = 'Hello? Is there anybody out there? Is there anyone at home?'
tokenized_string = tokenizer_en.encode(sample_string)
print ('Tokenized string is {}'.format(tokenized_string))
original_string = tokenizer_en.decode(tokenized_string)
print ('The original string: {}'.format(original_string))
assert original_string == sample_string
for ts in tokenized_string:
print ('{} ----> {}'.format(ts, tokenizer_en.decode([ts])))
# train_preprocessed = (
# tf.convert_to_tensor([x.map(tf_encode) for x in train_examples]))
#.filter(filter_max_length)
# cache the dataset to memory to get a speedup while reading from it.
#.cache()
# .shuffle(BUFFER_SIZE))
# val_preprocessed = (
# tf.convert_to_tensor([x.map(tf_encode) for x in train_examples]))
# val_preprocessed = (
# val_examples
# .map(tf_encode)
# .filter(filter_max_length))
# train_dataset = (train_preprocessed
# .padded_batch(BATCH_SIZE, padded_shapes=([None], [None]))
# .prefetch(tf.data.experimental.AUTOTUNE))
# val_dataset = (train_preprocessed
# .padded_batch(BATCH_SIZE, padded_shapes=([None], [None])))
# print(tokenizer_pt.vocab_size)
# print(tokenizer_en.vocab_size)
train_dataset = train_examples
val_dataset = train_examples
# for (batch, (inp, tar)) in enumerate(train_dataset):
# print(batch)
# print("\n################################\n")
# print(inp)
# for x, y in zip(inp, tar):
# print([t.numpy() for t in x])
# print(len([t.numpy() for t in x]))
# print(tokenizer_pt.decode(x))
# print([t.numpy() for t in y])
# print(len([t.numpy() for t in y]))
# print(tokenizer_en.decode(y))
# break
# for (batch, (inp, tar)) in enumerate(train_dataset):
# for x, y in zip(inp, tar):
# print(len([t.numpy() for t in x]))
# print(len([t.numpy() for t in y]))
# train_dataset = (train_preprocessed
# .padded_batch(BATCH_SIZE)
# .prefetch(tf.data.experimental.AUTOTUNE))
# val_dataset = (val_preprocessed
# .padded_batch(BATCH_SIZE))
print(val_dataset[1])
pt_batch, en_batch = val_dataset[1]
pt_batch, en_batch
pos_encoding = positional_encoding(50, 512)
print (pos_encoding.shape)
plt.pcolormesh(pos_encoding[0], cmap='RdBu')
plt.xlabel('Depth')
plt.xlim((0, 512))
plt.ylabel('Position')
plt.colorbar()
plt.show()
x = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
create_padding_mask(x)
x = tf.random.uniform((1, 3))
temp = create_look_ahead_mask(x.shape[1])
temp
np.set_printoptions(suppress=True)
temp_k = tf.constant([[10,0,0],
[0,10,0],
[0,0,10],
[0,0,10]], dtype=tf.float32) # (4, 3)
temp_v = tf.constant([[ 1,0],
[ 10,0],
[ 100,5],
[1000,6]], dtype=tf.float32) # (4, 2)
# This `query` aligns with the second `key`,
# so the second `value` is returned.
temp_q = tf.constant([[0, 10, 0]], dtype=tf.float32) # (1, 3)
print_out(temp_q, temp_k, temp_v)
# This query aligns with a repeated key (third and fourth),
# so all associated values get averaged.
temp_q = tf.constant([[0, 0, 10]], dtype=tf.float32) # (1, 3)
print_out(temp_q, temp_k, temp_v)
# This query aligns equally with the first and second key,
# so their values get averaged.
temp_q = tf.constant([[10, 10, 0]], dtype=tf.float32) # (1, 3)
print_out(temp_q, temp_k, temp_v)
temp_q = tf.constant([[0, 0, 10], [0, 10, 0], [10, 10, 0]], dtype=tf.float32) # (3, 3)
print_out(temp_q, temp_k, temp_v)
temp_mha = MultiHeadAttention(d_model=512, num_heads=8)
y = tf.random.uniform((1, 60, 512)) # (batch_size, encoder_sequence, d_model)
out, attn = temp_mha(y, k=y, q=y, mask=None)
out.shape, attn.shape
sample_ffn = point_wise_feed_forward_network(512, 2048)
sample_ffn(tf.random.uniform((64, 50, 512))).shape
sample_encoder_layer = EncoderLayer(512, 8, 2048)
sample_encoder_layer_output = sample_encoder_layer(
tf.random.uniform((64, 43, 512)), False, None)
sample_encoder_layer_output.shape # (batch_size, input_seq_len, d_model)
sample_decoder_layer = DecoderLayer(512, 8, 2048)
sample_decoder_layer_output, _, _ = sample_decoder_layer(
tf.random.uniform((64, 50, 512)), sample_encoder_layer_output,
False, None, None)
sample_decoder_layer_output.shape # (batch_size, target_seq_len, d_model)
sample_encoder = Encoder(num_layers=2, d_model=512, num_heads=8,
dff=2048, input_vocab_size=8500,
maximum_position_encoding=10000)
temp_input = tf.random.uniform((64, 62), dtype=tf.int64, minval=0, maxval=200)
sample_encoder_output = sample_encoder(temp_input, training=False, mask=None)
print (sample_encoder_output.shape) # (batch_size, input_seq_len, d_model)
sample_decoder = Decoder(num_layers=2, d_model=512, num_heads=8,
dff=2048, target_vocab_size=8000,
maximum_position_encoding=5000)
temp_input = tf.random.uniform((64, 26), dtype=tf.int64, minval=0, maxval=200)
output, attn = sample_decoder(temp_input,
enc_output=sample_encoder_output,
training=False,
look_ahead_mask=None,
padding_mask=None)
output.shape, attn['decoder_layer2_block2'].shape
sample_transformer = TFTransformer(
num_layers=2, d_model=512, num_heads=8, dff=2048,
input_vocab_size=tokenizer_en.vocab_size, target_vocab_size=tokenizer_en.vocab_size,
pe_input=10000, pe_target=6000)
temp_input = tf.random.uniform((64, 38), dtype=tf.int64, minval=0, maxval=200)
temp_target = tf.random.uniform((64, 36), dtype=tf.int64, minval=0, maxval=200)
fn_out, _ = sample_transformer(temp_input, temp_target, training=False,
enc_padding_mask=None,
look_ahead_mask=None,
dec_padding_mask=None)
fn_out.shape # (batch_size, tar_seq_len, target_vocab_size)
num_layers = 4
d_model = 128
dff = 512
num_heads = 8
input_vocab_size = tokenizer_pt.vocab_size + 2
target_vocab_size = tokenizer_en.vocab_size + 2
dropout_rate = 0.1
learning_rate = CustomSchedule(d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
temp_learning_rate_schedule = CustomSchedule(d_model)
plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='train_accuracy')
transformer = TFTransformer(num_layers, d_model, num_heads, dff,
input_vocab_size, target_vocab_size,
pe_input=input_vocab_size,
pe_target=target_vocab_size,
rate=dropout_rate)
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(transformer=transformer,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
# if ckpt_manager.latest_checkpoint:
# ckpt.restore(ckpt_manager.latest_checkpoint)
# print ('Latest checkpoint restored!!')
EPOCHS = 20
train_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
]
@tf.function(input_signature=train_step_signature)
def train_step(inp, tar):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions, loss_object)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
train_loss(loss)
train_accuracy(tar_real, predictions)
for epoch in range(EPOCHS):
try:
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
# inp -> portuguese, tar -> english
for (batch, (inp, tar)) in enumerate(train_dataset):
# print(len(inp))
# print(len(tar))
# print(batch)
new_inp = tf.convert_to_tensor([[x for x in inp]], dtype = tf.int64)
new_tar = tf.convert_to_tensor([[x for x in tar]], dtype = tf.int64)
# print(new_inp)
# print(new_tar)
train_step(new_inp, new_tar)
if batch % 50 == 0:
print ('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(
epoch + 1, batch, train_loss.result(), train_accuracy.result()))
if (epoch + 1) % 5 == 0:
ckpt_save_path = ckpt_manager.save()
print ('Saving checkpoint for epoch {} at {}'.format(epoch+1, ckpt_save_path))
print ('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1,
train_loss.result(),
train_accuracy.result()))
print ('Time taken for 1 epoch: {} secs\n'.format(time.time() - start))
except KeyboardInterrupt:
continue
## Run 20 times and see how it works....
for i in range(20):
line_index = randint(0, len(src_dataset) - 1)
seed_sentence, real_sentence = src_dataset[line_index]
seed_sentence = tokenizer_en.decode(seed_sentence[1:-1])
real_sentence = tokenizer_en.decode(real_sentence[1:-1])
predicted_sentence = translate(seed_sentence, transformer) ## START and END token
print("Seed sentence: {}\nReal target: {}\nModel prediction: {}\n".format(seed_sentence, real_sentence, predicted_sentence))
final_line = randint(0, len(src_dataset) - 1)
seed_sentence, real_sentence = src_dataset[final_line]
seed_sentence = tokenizer_en.decode(seed_sentence[1:-1])
real_sentence = tokenizer_en.decode(real_sentence[1:-1])
predicted_sentence = translate(seed_sentence, transformer, plot="decoder_layer4_block2") ## START and END token
print("Seed sentence: {}\nReal target: {}\nModel prediction: {}".format(seed_sentence, real_sentence, predicted_sentence))
exit(1)
# translate("este é o primeiro livro que eu fiz.", plot='decoder_layer4_block2')
# print ("Real translation: this is the first book i've ever done.")
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding, self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
maximum_position_encoding, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](x, enc_output, training, look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i+1)] = block1
attention_weights['decoder_layer{}_block2'.format(i+1)] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
class TFTransformer(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
target_vocab_size, pe_input, pe_target, rate=0.1):
super(TFTransformer, self).__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff,
input_vocab_size, pe_input, rate)
self.decoder = Decoder(num_layers, d_model, num_heads, dff,
target_vocab_size, pe_target, rate)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training, enc_padding_mask,
look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
def encode(lang1, lang2):
lang1 = [tokenizer_pt.vocab_size] + tokenizer_pt.encode(
lang1.numpy()) + [tokenizer_pt.vocab_size+1]
lang2 = [tokenizer_en.vocab_size] + tokenizer_en.encode(
lang2.numpy()) + [tokenizer_en.vocab_size+1]
return lang1, lang2
def tf_encode(pt, en):
result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
return result_pt, result_en
def filter_max_length(x, y, max_length=MAX_LENGTH):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
def print_out(q, k, v):
temp_out, temp_attn = scaled_dot_product_attention(
q, k, v, None)
print ('Attention weights are:')
print (temp_attn)
print ('Output is:')
print (temp_out)
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
])
def loss_function(real, pred, loss_object):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
def create_masks(inp, tar):
# Encoder padding mask
enc_padding_mask = create_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
def evaluate(inp_sentence, transformer):
start_token = [tokenizer_pt.vocab_size]
end_token = [tokenizer_pt.vocab_size + 1]
# inp sentence is portuguese, hence adding the start and end token
inp_sentence = start_token + tokenizer_pt.encode(inp_sentence) + end_token
encoder_input = tf.expand_dims(inp_sentence, 0)
# as the target is english, the first word to the transformer should be the
# english start token.
decoder_input = [tokenizer_en.vocab_size]
output = tf.expand_dims(decoder_input, 0)
for i in range(MAX_LENGTH):
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(
encoder_input, output)
# predictions.shape == (batch_size, seq_len, vocab_size)
predictions, attention_weights = transformer(encoder_input,
output,
False,
enc_padding_mask,
combined_mask,
dec_padding_mask)
# select the last word from the seq_len dimension
predictions = predictions[: ,-1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
# return the result if the predicted_id is equal to the end token
if predicted_id == tokenizer_en.vocab_size+1:
return tf.squeeze(output, axis=0), attention_weights
# concatentate the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0), attention_weights
def plot_attention_weights(attention, sentence, result, layer):
fig = plt.figure(figsize=(16, 8))
sentence = tokenizer_pt.encode(sentence)
attention = tf.squeeze(attention[layer], axis=0)
for head in range(attention.shape[0]):
ax = fig.add_subplot(2, 4, head+1)
# plot the attention weights
ax.matshow(attention[head][:-1, :], cmap='viridis')
fontdict = {'fontsize': 10}
ax.set_xticks(range(len(sentence)+2))
ax.set_yticks(range(len(result)))
ax.set_ylim(len(result)-1.5, -0.5)
ax.set_xticklabels(
['<start>']+[tokenizer_pt.decode([i]) for i in sentence]+['<end>'],
fontdict=fontdict, rotation=90)
ax.set_yticklabels([tokenizer_en.decode([i]) for i in result
if i < tokenizer_en.vocab_size],
fontdict=fontdict)
ax.set_xlabel('Head {}'.format(head+1))
plt.tight_layout()
plt.show()
def translate(sentence, transformer, plot=''):
result, attention_weights = evaluate(sentence, transformer)
predicted_sentence = tokenizer_en.decode([i for i in result if i < tokenizer_en.vocab_size])
if plot:
plot_attention_weights(attention_weights, sentence, result, plot)
return predicted_sentence
| 2.46875 | 2 |
detector.py | Xia-Xinyu/X-Trace | 3 | 12767250 | <gh_stars>1-10
from deepsort import DeepSort
from model.embedding import Embedding
from sqlite import *
import numpy as np
import cv2, time
global pts, ROIs
class Detector(object):
def __init__(self,video_path = None,img_arr = None,track = None, ROI=None):
global pts, ROIs
self.deepsort = DeepSort('../model/detection', '../model/embedding', True)
self.video_path = video_path
self.img_arr = img_arr
self.track = track
self.object_dic = {}
self.frame_num = 0
self.capture = cv2.VideoCapture(video_path)
self.emb = Embedding('../model/embedding', use_gpu=True)
self.cap = cv2.VideoCapture(video_path)
ROIs = ROI
pts = []
def get_img(self,videos):
imgs = []
for video in videos:
cap = cv2.VideoCapture(video)
_, img = cap.read()
imgs.append(img)
return imgs
def img_detect(self,threshold):
results = []
imgs = np.array([]).astype(np.float)
for i, img in enumerate(self.img_arr):
confid = 0
temp_dic = {}
bboxes = []
img = cv2.imread(img)
result = self.deepsort.detector.predict(img)
for j in range(len(result)):
if result[j]['score'] < threshold:
continue
bboxes.append(result[j]['bbox'])
confid += result[j]['score']
temp_dic['person_num'] = len(bboxes)
if len(bboxes) == 0:
temp_dic['confid'] = 0
else:
temp_dic['confid'] = confid * 1.0 / len(bboxes)
results.append(temp_dic)
for k, bbox in enumerate(bboxes):
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])),
(0, 69, 255), 2)
cv2.putText(img, 'id' + "-" + str(k + 1), (int(bbox[0]), int(bbox[1] - 10)), 0, 0.75, (0, 69, 255), 2)
imgs = np.append(imgs, img)
cv2.imwrite('./temp_imgs/' + f'{i + 1}.jpg', img)
return results
def video_detect(self, threshold, ID=None, video_cnt=None):
global pts
current_time = time.strftime('%Y-%m-%d %H.%M.%S', time.localtime(time.time()))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video = cv2.VideoWriter("./temp_vdo/" + current_time + ".mp4", fourcc, 30, (768, 576), True)
if ROIs and video_cnt == 1:
_, temp = self.capture.read()
def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
cv2.circle(temp, (x, y), 1, (0, 0, 255), thickness=-1)
cv2.putText(temp, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 0), thickness=1)
cv2.imshow("image", temp)
pts.append([x, y])
# if len(pts1) == 4:
# print(pts1)
cv2.namedWindow("image")
cv2.setMouseCallback("image", on_EVENT_LBUTTONDOWN)
while (1):
cv2.imshow('image', temp)
k = cv2.waitKey(1) & 0xFF
if k == ord('q'):
break
cv2.destroyAllWindows()
while True:
success, frame = self.capture.read()
self.frame_num += 1
# if not select and frame_cnt < frame_id:
# continue
if not success:
break
outputs, confid = self.deepsort.update(frame, threshold)
target = []
person = 0
id_bbox = {}
for output in outputs:
# if not "%d" % output[-1] in self.object_dic:
# # 创建当前id的字典:key(ID):val{轨迹,丢帧计数器} 当丢帧数超过10帧就删除该对象
# self.object_dic["%d" % output[-1]] = {"trace": [], 'traced_frames': 10}
# self.object_dic["%d" % output[-1]]["trace"].append(center)
# self.object_dic["%d" % output[-1]]["traced_frames"] += 1
# 如果有,直接写入
# else:
# self.object_dic["%d" % output[-1]]["trace"].append(center)
# self.object_dic["%d" % output[-1]]["traced_frames"] += 1
if ID == None:
person = person + 1
cv2.rectangle(frame, (int(output[0]), int(output[1])), (int(output[2]), int(output[3])), (0, 69, 255),
2)
cv2.putText(frame, 'id' + "-" + str(int(output[-1])), (int(output[0]), int(output[1] - 10)), 0, 0.75,
(0, 69, 255), 2)
if ID == output[-1]:
person = person + 1
cv2.rectangle(frame, (int(output[0]), int(output[1])), (int(output[2]), int(output[3])),
(0, 69, 255),
2)
cv2.putText(frame, 'id' + "-" + str(int(output[-1])), (int(output[0]), int(output[1] - 10)), 0,
0.75,
(0, 69, 255), 2)
center = [int((output[0] + output[2]) / 2), int((output[1] + output[3]) / 2),
int(output[2] - output[0]),
int(output[3] - output[1])]
id_bbox[str(int(output[4]))] = [output[0], output[1], output[2], output[3]]
if not "%d" % output[-1] in self.object_dic:
# 创建当前id的字典:key(ID):val{轨迹,丢帧计数器} 当丢帧数超过30帧就删除该对象
self.object_dic["%d" % output[-1]] = {"trace": [], 'traced_frames': 30}
self.object_dic["%d" % output[-1]]["trace"].append(center)
self.object_dic["%d" % output[-1]]["traced_frames"] += 1
# 如果有,直接写入
else:
self.object_dic["%d" % output[-1]]["trace"].append(center)
self.object_dic["%d" % output[-1]]["traced_frames"] += 1
if ROIs:
pts1 = np.array(pts, np.int32).reshape((-1, 1, 2))
cv2.polylines(frame, [pts1], True, (0, 255, 0), thickness=2)
# 判断目标是否在roi区域内
# 这部分使用了PNPloy算
testp = [int(output[2]), int(output[3])]
n = len(pts)
j = n - 1
res = False
for i in range(n):
if (pts[i][1] > testp[1]) != (pts[j][1] > testp[1]) and \
testp[0] < (pts[j][0] - pts[i][0]) * (testp[1] - pts[i][1]) / (
pts[j][1] - pts[i][1]) + pts[i][0]:
res = not res
j = i
if res == True:
target.append(testp)
cv2.putText(frame, str('enter'), (int(output[2] - 65), int(output[3] - 5)),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (60, 20, 220), 2)
# 绘制轨迹
if self.track:
track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 255, 255), (255, 0, 255), (255, 127, 255),
(127, 0, 255), (127, 0, 127), (193, 182, 255), (139, 0, 139)]
for s in self.object_dic:
i = int(s)
# 限制轨迹最大长度
if len(self.object_dic["%d" % i]["trace"]) > 10:
for k in range(len(self.object_dic["%d" % i]["trace"]) - 10):
del self.object_dic["%d" % i]["trace"][k]
# # # 绘制轨迹
if len(self.object_dic["%d" % i]["trace"]) > 2:
for j in range(1, len(self.object_dic["%d" % i]["trace"]) - 1):
pot1_x = self.object_dic["%d" % i]["trace"][j][0]
pot1_y = self.object_dic["%d" % i]["trace"][j][1]
pot2_x = self.object_dic["%d" % i]["trace"][j + 1][0]
pot2_y = self.object_dic["%d" % i]["trace"][j + 1][1]
# if pot2_x == pot1_x and pot1_y == pot2_y:
# del self.object_dic["%d" % i]
clr = i % 10 # 轨迹颜色随机
cv2.line(frame, (pot1_x, pot1_y), (pot2_x, pot2_y), track_colors[clr], 5)
# 对已经消失的目标予以排除
for s in self.object_dic:
if self.object_dic["%d" % int(s)]["traced_frames"] > 0:
self.object_dic["%d" % int(s)]["traced_frames"] -= 1
for n in list(self.object_dic):
if self.object_dic["%d" % int(n)]["traced_frames"] == 0:
del self.object_dic["%d" % int(n)]
id_bbox["所有行人"] = [0, 0, 0, 0]
# get方法参数按顺序对应下表(从0开始编号,比如这里为了获取视频的总帧数,在下表是排第八个的 CV_CAP_PROP_FRAME_COUNT
minutes = int(self.frame_num / self.cap.get(5)) // 60
seconds = int((self.frame_num / self.cap.get(5))) % 60
video_lenth = str(minutes) + 'min ' + str(seconds) + 's'
frame = cv2.resize(frame, (768, 576))
video.write(frame)
if ROIs:
yield frame, person, len(target), confid, id_bbox, self.frame_num
else:
yield frame, person, confid, id_bbox, self.frame_num
self.data_base = Database("../videos.db")
self.data_base.insert_data("temp_vdo/" + current_time + ".mp4", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), video_lenth, threshold)
self.data_base.close_db()
def rlt_detect(self, threshold):
capture = cv2.VideoCapture(0)
retval = cv2.VideoCapture.isOpened(capture)
self.object_dic = {}
if retval:
while True:
success, frame = capture.read()
if not success:
break
outputs, confid = self.deepsort.update(frame, threshold)
person = 0
id_bbox = {}
for output in outputs:
person = person + 1
cv2.rectangle(frame, (int(output[0]), int(output[1])), (int(output[2]), int(output[3])),
(0, 69, 255),
2)
cv2.putText(frame, 'id' + "-" + str(int(output[-1])), (int(output[0]), int(output[1] - 10)), 0,
0.75,
(0, 69, 255), 2)
center = [int((output[0] + output[2]) / 2), int((output[1] + output[3]) / 2),
int(output[2] - output[0]),
int(output[3] - output[1])]
id_bbox[str(int(output[4]))] = [output[0], output[1], output[2], output[3]]
if not "%d" % output[-1] in self.object_dic:
# 创建当前id的字典:key(ID):val{轨迹,丢帧计数器} 当丢帧数超过10帧就删除该对象
self.object_dic["%d" % output[-1]] = {"trace": [], 'traced_frames': 10}
self.object_dic["%d" % output[-1]]["trace"].append(center)
self.object_dic["%d" % output[-1]]["traced_frames"] += 1
# 如果有,直接写入
else:
self.object_dic["%d" % output[-1]]["trace"].append(center)
self.object_dic["%d" % output[-1]]["traced_frames"] += 1
if self.track:
track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 255, 255), (255, 0, 255), (255, 127, 255),
(127, 0, 255), (127, 0, 127), (193, 182, 255), (139, 0, 139)]
for s in self.object_dic:
i = int(s)
# 限制轨迹最大长度
if len(self.object_dic["%d" % i]["trace"]) > 10:
for k in range(len(self.object_dic["%d" % i]["trace"]) - 10):
del self.object_dic["%d" % i]["trace"][k]
# # # 绘制轨迹
if len(self.object_dic["%d" % i]["trace"]) > 2:
for j in range(1, len(self.object_dic["%d" % i]["trace"]) - 1):
pot1_x = self.object_dic["%d" % i]["trace"][j][0]
pot1_y = self.object_dic["%d" % i]["trace"][j][1]
pot2_x = self.object_dic["%d" % i]["trace"][j + 1][0]
pot2_y = self.object_dic["%d" % i]["trace"][j + 1][1]
# if pot2_x == pot1_x and pot1_y == pot2_y:
# del self.object_dic["%d" % i]
clr = i % 10 # 轨迹颜色随机
cv2.line(frame, (pot1_x, pot1_y), (pot2_x, pot2_y), track_colors[clr], 5)
# 对已经消失的目标予以排除
for s in self.object_dic:
if self.object_dic["%d" % int(s)]["traced_frames"] > 0:
self.object_dic["%d" % int(s)]["traced_frames"] -= 1
for n in list(self.object_dic):
if self.object_dic["%d" % int(n)]["traced_frames"] == 0:
del self.object_dic["%d" % int(n)]
res_img = cv2.resize(frame, (1035, 679))
yield frame, person, confid
def ReID(self, videos=None, img=None, hit_num=None):
imgs_gallery = []
index_score = {}
messages = []
scores = []
ID = 0
for k,video in enumerate(videos):
capture = cv2.VideoCapture(video)
frame_num = 0
frame_rate = capture.get(5)
while True:
success, frame = capture.read()
frame_num += 1
persons = []
if not success:
break
result = self.deepsort.detector.predict(frame)
for j in range(len(result)):
if result[j]['score'] < 0.8:
continue
persons.append(result[j]['bbox'])
for _ in persons:
crop = frame[int(_[1]):int(_[1] + _[3]), int(_[0]):int(_[0] + _[2])]
# cv2.imwrite('./output/'+'ID'+'.jpg', crop)
result1 = self.emb.predict([crop])[0]
result2 = self.emb.predict([cv2.imread(img)])[0]
score = np.sum((np.array(result1) - np.array(result2)) * (np.array(result1) - np.array(result2)))
imgs_gallery.append(crop)
index_score[score] = ID
scores.append(score)
s = frame_num / frame_rate
messages.append([k,s])
ID += 1
yield 0, 0, ID
scores = sorted(scores)
# print(index_score)
results = []
for i,score in enumerate(scores):
# cv2.imwrite('./output/'+str(i)+'.jpg',imgs_gallery[index_score[score]])
results.append(imgs_gallery[index_score[score]])
if i == hit_num:
break
# return messages,scores
sort_mes = []
for i, score in enumerate(scores):
sort_mes.append(messages[index_score[score]])
if i == hit_num:
break
yield results, sort_mes, ID | 2.28125 | 2 |