hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
163fa95d3fb127b7b15a5db2c0a0882c6a667bb8
| 5,536
|
py
|
Python
|
ml-agents/mlagents/trainers/tests/test_learn.py
|
Iceman3/ml-agents
|
eb2a4a19450c6b8f2bfc1fe6ade10c518652f299
|
[
"Apache-2.0"
] | 5
|
2019-02-01T04:53:19.000Z
|
2021-02-17T21:27:56.000Z
|
ml-agents/mlagents/trainers/tests/test_learn.py
|
AdriDmgz/ml-agents
|
608fdcf4ff39aa94675ff6fc27f4941f96b53f6a
|
[
"Apache-2.0"
] | 4
|
2020-09-26T00:51:48.000Z
|
2022-02-10T01:28:32.000Z
|
ml-agents-0.15.1/ml-agents/mlagents/trainers/tests/test_learn.py
|
StandInTheRiver/Pong_AI
|
481e2af4a96377c257a6aa5559d8b7230c432bad
|
[
"MIT"
] | 4
|
2020-03-05T15:29:26.000Z
|
2020-03-05T16:50:06.000Z
|
import pytest
from unittest.mock import MagicMock, patch, mock_open
from mlagents.trainers import learn
from mlagents.trainers.trainer_controller import TrainerController
from mlagents.trainers.learn import parse_command_line
from mlagents_envs.exception import UnityEnvironmentException
def basic_options(extra_args=None):
extra_args = extra_args or {}
args = ["basic_path"]
if extra_args:
args += [f"{k}={v}" for k, v in extra_args.items()]
return parse_command_line(args)
@patch("mlagents.trainers.learn.TrainerFactory")
@patch("mlagents.trainers.learn.SamplerManager")
@patch("mlagents.trainers.learn.SubprocessEnvManager")
@patch("mlagents.trainers.learn.create_environment_factory")
@patch("mlagents.trainers.learn.load_config")
def test_run_training(
load_config,
create_environment_factory,
subproc_env_mock,
sampler_manager_mock,
trainer_factory_mock,
):
mock_env = MagicMock()
mock_env.external_brain_names = []
mock_env.academy_name = "TestAcademyName"
create_environment_factory.return_value = mock_env
trainer_config_mock = MagicMock()
load_config.return_value = trainer_config_mock
mock_init = MagicMock(return_value=None)
with patch.object(TrainerController, "__init__", mock_init):
with patch.object(TrainerController, "start_learning", MagicMock()):
learn.run_training(0, basic_options())
mock_init.assert_called_once_with(
trainer_factory_mock.return_value,
"./models/ppo",
"./summaries",
"ppo",
50000,
None,
False,
0,
sampler_manager_mock.return_value,
None,
)
@patch("mlagents.trainers.learn.SamplerManager")
@patch("mlagents.trainers.learn.SubprocessEnvManager")
@patch("mlagents.trainers.learn.create_environment_factory")
@patch("mlagents.trainers.learn.load_config")
def test_docker_target_path(
load_config, create_environment_factory, subproc_env_mock, sampler_manager_mock
):
mock_env = MagicMock()
mock_env.external_brain_names = []
mock_env.academy_name = "TestAcademyName"
create_environment_factory.return_value = mock_env
trainer_config_mock = MagicMock()
load_config.return_value = trainer_config_mock
options_with_docker_target = basic_options({"--docker-target-name": "dockertarget"})
mock_init = MagicMock(return_value=None)
with patch.object(TrainerController, "__init__", mock_init):
with patch.object(TrainerController, "start_learning", MagicMock()):
learn.run_training(0, options_with_docker_target)
mock_init.assert_called_once()
assert mock_init.call_args[0][1] == "/dockertarget/models/ppo"
assert mock_init.call_args[0][2] == "/dockertarget/summaries"
def test_bad_env_path():
with pytest.raises(UnityEnvironmentException):
learn.create_environment_factory(
env_path="/foo/bar",
docker_target_name=None,
no_graphics=True,
seed=None,
start_port=8000,
env_args=None,
)
@patch("builtins.open", new_callable=mock_open, read_data="{}")
def test_commandline_args(mock_file):
# No args raises
with pytest.raises(SystemExit):
parse_command_line([])
# Test with defaults
opt = parse_command_line(["mytrainerpath"])
assert opt.trainer_config == {}
assert opt.env_path is None
assert opt.curriculum_config is None
assert opt.sampler_config is None
assert opt.keep_checkpoints == 5
assert opt.lesson == 0
assert opt.load_model is False
assert opt.run_id == "ppo"
assert opt.save_freq == 50000
assert opt.seed == -1
assert opt.train_model is False
assert opt.base_port == 5005
assert opt.num_envs == 1
assert opt.docker_target_name is None
assert opt.no_graphics is False
assert opt.debug is False
assert opt.env_args is None
full_args = [
"mytrainerpath",
"--env=./myenvfile",
"--curriculum=./mycurriculum",
"--sampler=./mysample",
"--keep-checkpoints=42",
"--lesson=3",
"--load",
"--run-id=myawesomerun",
"--save-freq=123456",
"--seed=7890",
"--train",
"--base-port=4004",
"--num-envs=2",
"--docker-target-name=mydockertarget",
"--no-graphics",
"--debug",
]
opt = parse_command_line(full_args)
assert opt.trainer_config == {}
assert opt.env_path == "./myenvfile"
assert opt.curriculum_config == {}
assert opt.sampler_config == {}
assert opt.keep_checkpoints == 42
assert opt.lesson == 3
assert opt.load_model is True
assert opt.run_id == "myawesomerun"
assert opt.save_freq == 123456
assert opt.seed == 7890
assert opt.train_model is True
assert opt.base_port == 4004
assert opt.num_envs == 2
assert opt.docker_target_name == "mydockertarget"
assert opt.no_graphics is True
assert opt.debug is True
@patch("builtins.open", new_callable=mock_open, read_data="{}")
def test_env_args(mock_file):
full_args = [
"mytrainerpath",
"--env=./myenvfile",
"--env-args", # Everything after here will be grouped in a list
"--foo=bar",
"--blah",
"baz",
"100",
]
opt = parse_command_line(full_args)
assert opt.env_args == ["--foo=bar", "--blah", "baz", "100"]
| 32.564706
| 88
| 0.664559
|
bd7f07f9964941d1342a62952379c19611dcfedc
| 2,644
|
py
|
Python
|
tests/configs/o3-timing-mp-ruby.py
|
hyu-iot/gem5
|
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
|
[
"BSD-3-Clause"
] | 765
|
2015-01-14T16:17:04.000Z
|
2022-03-28T07:46:28.000Z
|
tests/configs/o3-timing-mp-ruby.py
|
hyu-iot/gem5
|
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
|
[
"BSD-3-Clause"
] | 30
|
2015-01-01T21:49:38.000Z
|
2021-04-20T19:01:54.000Z
|
tests/configs/o3-timing-mp-ruby.py
|
hyu-iot/gem5
|
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
|
[
"BSD-3-Clause"
] | 807
|
2015-01-06T09:55:38.000Z
|
2022-03-30T10:23:36.000Z
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import m5
from m5.objects import *
nb_cores = 4
cpus = [ DerivO3CPU(cpu_id=i) for i in range(nb_cores) ]
import ruby_config
ruby_memory = ruby_config.generate("TwoLevel_SplitL1UnifiedL2.rb", nb_cores)
# system simulated
system = System(cpu = cpus, physmem = ruby_memory, membus = SystemXBar(),
mem_mode = "timing",
clk_domain = SrcClockDomain(clock = '1GHz'))
# Create a seperate clock domain for components that should run at
# CPUs frequency
system.cpu_clk_domain = SrcClockDomain(clock = '2GHz')
for cpu in cpus:
# create the interrupt controller
cpu.createInterruptController()
cpu.connectBus(system.membus)
# All cpus are associated with cpu_clk_domain
cpu.clk_domain = system.cpu_clk_domain
# connect memory to membus
system.physmem.port = system.membus.mem_side_ports
# Connect the system port for loading of binaries etc
system.system_port = system.membus.cpu_side_ports
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
| 41.3125
| 76
| 0.759077
|
50659ccc0411109d3ba877a0ecda7ab7ff30e7e5
| 149
|
py
|
Python
|
reference/upload/models.py
|
FanJingithub/performer
|
9b4eec771a65a72f712c34f70de844ed9ce67475
|
[
"Apache-1.1"
] | null | null | null |
reference/upload/models.py
|
FanJingithub/performer
|
9b4eec771a65a72f712c34f70de844ed9ce67475
|
[
"Apache-1.1"
] | null | null | null |
reference/upload/models.py
|
FanJingithub/performer
|
9b4eec771a65a72f712c34f70de844ed9ce67475
|
[
"Apache-1.1"
] | null | null | null |
from django.db import models
class Img(models.Model):
id = models.AutoField(primary_key = True)
img = models.ImageField(upload_to = "img")
| 21.285714
| 46
| 0.711409
|
88468cf6f8a6ff216787c4a3133a6d093ef96a38
| 3,158
|
py
|
Python
|
chapter03/template_with_demo/template_with_demo/settings.py
|
Tomtao626/django
|
fe945063593b4bfe82d74842f728b854b501a294
|
[
"Apache-2.0"
] | null | null | null |
chapter03/template_with_demo/template_with_demo/settings.py
|
Tomtao626/django
|
fe945063593b4bfe82d74842f728b854b501a294
|
[
"Apache-2.0"
] | null | null | null |
chapter03/template_with_demo/template_with_demo/settings.py
|
Tomtao626/django
|
fe945063593b4bfe82d74842f728b854b501a294
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for template_with_demo project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w%_vx7u(io%%rbr_kx+qwhqoz%_6st6s5h!i04#qbzzmz#5@57'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'template_with_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'template_with_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| 26.099174
| 91
| 0.701077
|
f3a97299cae29889258d94eadbc27a9ad68f7aa7
| 2,097
|
py
|
Python
|
benchmarks/logs/microbenchmarks/single_regex/runpyspark.py
|
rahulyesantharao/tuplex
|
03733a57ccb5a3770eecaf1c3adcfb520ed82138
|
[
"Apache-2.0"
] | 778
|
2021-06-30T03:40:43.000Z
|
2022-03-28T20:40:20.000Z
|
benchmarks/logs/microbenchmarks/single_regex/runpyspark.py
|
rahulyesantharao/tuplex
|
03733a57ccb5a3770eecaf1c3adcfb520ed82138
|
[
"Apache-2.0"
] | 41
|
2021-07-05T17:55:56.000Z
|
2022-03-31T15:27:19.000Z
|
benchmarks/logs/microbenchmarks/single_regex/runpyspark.py
|
rahulyesantharao/tuplex
|
03733a57ccb5a3770eecaf1c3adcfb520ed82138
|
[
"Apache-2.0"
] | 39
|
2021-07-01T02:40:33.000Z
|
2022-03-30T21:46:55.000Z
|
import time
import argparse
import json
import os
import glob
import sys
import re
parser = argparse.ArgumentParser(description="Apache data cleaning + join")
parser.add_argument(
"--path",
type=str,
dest="data_path",
default="../../test/resources/2000.01.01.txt",
help="path or pattern to log data",
)
parser.add_argument(
"--ip_blacklist_path",
type=str,
dest="ip_blacklist_path",
default="../../test/resources/bad_ips_all.txt",
)
parser.add_argument(
"--pipeline_type",
type=str,
dest="pipeline_type",
choices=["spark_regex"],
default="spark_regex",
help="whether to use the regex clean function or the string strip based one",
)
args = parser.parse_args()
# save the run configuration
output_path = f"spark_output_{args.pipeline_type}"
# get the input files
perf_paths = [args.data_path]
if not os.path.isfile(args.data_path):
file_paths = sorted(glob.glob(os.path.join(args.data_path, "*.*.*.txt")))
perf_paths = file_paths
if not perf_paths:
print("found no log data to process, abort.")
sys.exit(1)
# import spark
startup_time = 0
tstart = time.time()
import pyspark
from pyspark.sql import SparkSession, Row
from pyspark.sql.functions import udf, col, column, regexp_extract
from pyspark.sql.types import (
StructType,
StructField,
IntegerType,
StringType,
FloatType,
BooleanType,
)
spark = SparkSession.builder.appName("apacheLogs").getOrCreate()
startup_time = time.time() - tstart
print("PySpark startup time: {}".format(startup_time))
# open file
tstart = time.time()
df = (
spark.read.text(perf_paths)
.select(
regexp_extract("value", r'^(\S+) \S+ \S+ \[[\w:/]+\s[+\-]\d{4}\] "\S+ \S+\s*\S*\s*" \d{3} \S+', 1).alias("ip"),
)
)
df.select(
["ip"]
).write.csv(
output_path,
mode="overwrite",
sep=",",
header=True,
escape='"',
nullValue="\u0000",
# emptyValue="\u0000",
)
job_time = time.time() - tstart
print("Pyspark job time: {} s".format(job_time))
print(json.dumps({"startupTime": startup_time, "jobTime": job_time}))
| 22.548387
| 119
| 0.667144
|
5e0432cc1e648bfb264e721ad9bccd4da94bcf0b
| 7,546
|
py
|
Python
|
swig/python/scripts/gdal_proximity.py
|
gajgeospatial/gdal-3.1.0
|
ac735c543bbdb0bfa6c817920a852e38faf84645
|
[
"Apache-2.0"
] | null | null | null |
swig/python/scripts/gdal_proximity.py
|
gajgeospatial/gdal-3.1.0
|
ac735c543bbdb0bfa6c817920a852e38faf84645
|
[
"Apache-2.0"
] | null | null | null |
swig/python/scripts/gdal_proximity.py
|
gajgeospatial/gdal-3.1.0
|
ac735c543bbdb0bfa6c817920a852e38faf84645
|
[
"Apache-2.0"
] | 1
|
2020-07-24T11:11:37.000Z
|
2020-07-24T11:11:37.000Z
|
#!/usr/bin/env python3
# ******************************************************************************
# $Id: gdal_proximity.py 428d6fbc987332afb0ba6c7b6913390f7386e864 2020-01-17 22:19:28 +0100 Even Rouault $
#
# Name: gdalproximity
# Project: GDAL Python Interface
# Purpose: Application for computing raster proximity maps.
# Author: Frank Warmerdam, warmerdam@pobox.com
#
# ******************************************************************************
# Copyright (c) 2008, Frank Warmerdam
# Copyright (c) 2009-2011, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import os.path
import sys
from osgeo import gdal
def Usage():
print("""
gdal_proximity.py srcfile dstfile [-srcband n] [-dstband n]
[-of format] [-co name=value]*
[-ot Byte/Int16/Int32/Float32/etc]
[-values n,n,n] [-distunits PIXEL/GEO]
[-maxdist n] [-nodata n] [-use_input_nodata YES/NO]
[-fixed-buf-val n] [-q] """)
sys.exit(1)
def DoesDriverHandleExtension(drv, ext):
exts = drv.GetMetadataItem(gdal.DMD_EXTENSIONS)
return exts is not None and exts.lower().find(ext.lower()) >= 0
def GetExtension(filename):
ext = os.path.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:]
return ext
def GetOutputDriversFor(filename):
drv_list = []
ext = GetExtension(filename)
for i in range(gdal.GetDriverCount()):
drv = gdal.GetDriver(i)
if (drv.GetMetadataItem(gdal.DCAP_CREATE) is not None or
drv.GetMetadataItem(gdal.DCAP_CREATECOPY) is not None) and \
drv.GetMetadataItem(gdal.DCAP_RASTER) is not None:
if ext and DoesDriverHandleExtension(drv, ext):
drv_list.append(drv.ShortName)
else:
prefix = drv.GetMetadataItem(gdal.DMD_CONNECTION_PREFIX)
if prefix is not None and filename.lower().startswith(prefix.lower()):
drv_list.append(drv.ShortName)
# GMT is registered before netCDF for opening reasons, but we want
# netCDF to be used by default for output.
if ext.lower() == 'nc' and not drv_list and \
drv_list[0].upper() == 'GMT' and drv_list[1].upper() == 'NETCDF':
drv_list = ['NETCDF', 'GMT']
return drv_list
def GetOutputDriverFor(filename):
drv_list = GetOutputDriversFor(filename)
ext = GetExtension(filename)
if not drv_list:
if not ext:
return 'GTiff'
else:
raise Exception("Cannot guess driver for %s" % filename)
elif len(drv_list) > 1:
print("Several drivers matching %s extension. Using %s" % (ext if ext else '', drv_list[0]))
return drv_list[0]
# =============================================================================
# Mainline
# =============================================================================
frmt = None
creation_options = []
options = []
src_filename = None
src_band_n = 1
dst_filename = None
dst_band_n = 1
creation_type = 'Float32'
quiet_flag = 0
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor(sys.argv)
if argv is None:
sys.exit(0)
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-of' or arg == '-f':
i = i + 1
frmt = argv[i]
elif arg == '-co':
i = i + 1
creation_options.append(argv[i])
elif arg == '-ot':
i = i + 1
creation_type = argv[i]
elif arg == '-maxdist':
i = i + 1
options.append('MAXDIST=' + argv[i])
elif arg == '-values':
i = i + 1
options.append('VALUES=' + argv[i])
elif arg == '-distunits':
i = i + 1
options.append('DISTUNITS=' + argv[i])
elif arg == '-nodata':
i = i + 1
options.append('NODATA=' + argv[i])
elif arg == '-use_input_nodata':
i = i + 1
options.append('USE_INPUT_NODATA=' + argv[i])
elif arg == '-fixed-buf-val':
i = i + 1
options.append('FIXED_BUF_VAL=' + argv[i])
elif arg == '-srcband':
i = i + 1
src_band_n = int(argv[i])
elif arg == '-dstband':
i = i + 1
dst_band_n = int(argv[i])
elif arg == '-q' or arg == '-quiet':
quiet_flag = 1
elif src_filename is None:
src_filename = argv[i]
elif dst_filename is None:
dst_filename = argv[i]
else:
Usage()
i = i + 1
if src_filename is None or dst_filename is None:
Usage()
# =============================================================================
# Open source file
# =============================================================================
src_ds = gdal.Open(src_filename)
if src_ds is None:
print('Unable to open %s' % src_filename)
sys.exit(1)
srcband = src_ds.GetRasterBand(src_band_n)
# =============================================================================
# Try opening the destination file as an existing file.
# =============================================================================
try:
driver = gdal.IdentifyDriver(dst_filename)
if driver is not None:
dst_ds = gdal.Open(dst_filename, gdal.GA_Update)
dstband = dst_ds.GetRasterBand(dst_band_n)
else:
dst_ds = None
except:
dst_ds = None
# =============================================================================
# Create output file.
# =============================================================================
if dst_ds is None:
if frmt is None:
frmt = GetOutputDriverFor(dst_filename)
drv = gdal.GetDriverByName(frmt)
dst_ds = drv.Create(dst_filename,
src_ds.RasterXSize, src_ds.RasterYSize, 1,
gdal.GetDataTypeByName(creation_type), creation_options)
dst_ds.SetGeoTransform(src_ds.GetGeoTransform())
dst_ds.SetProjection(src_ds.GetProjectionRef())
dstband = dst_ds.GetRasterBand(1)
# =============================================================================
# Invoke algorithm.
# =============================================================================
if quiet_flag:
prog_func = None
else:
prog_func = gdal.TermProgress_nocb
gdal.ComputeProximity(srcband, dstband, options,
callback=prog_func)
srcband = None
dstband = None
src_ds = None
dst_ds = None
| 31.053498
| 107
| 0.548635
|
c49167ddeb8c571692b115df5b8b67b46a44bbde
| 5,057
|
py
|
Python
|
NETCDF scripts/GIMMS NDVI/gimms_ndvi.py
|
DangoMelon0701/PyRemote-Sensing
|
fa12545b89c937baf5f1be39a4b2f4eebf714a9a
|
[
"MIT"
] | 1
|
2019-12-18T22:01:20.000Z
|
2019-12-18T22:01:20.000Z
|
NETCDF scripts/GIMMS NDVI/gimms_ndvi.py
|
DangoMelon0701/PyRemote-Sensing
|
fa12545b89c937baf5f1be39a4b2f4eebf714a9a
|
[
"MIT"
] | null | null | null |
NETCDF scripts/GIMMS NDVI/gimms_ndvi.py
|
DangoMelon0701/PyRemote-Sensing
|
fa12545b89c937baf5f1be39a4b2f4eebf714a9a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue May 16 01:05:05 2017
@author: gerar
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
from osgeo import gdal,osr
import numpy as np
import os, time, math
#%%
def plot_data(data,cbar=0,save_img=0,name='image',norm = 0):
plot,axs = plt.subplots()
if norm == 1:
norm = mpl.colors.Normalize(vmin=-0.5, vmax=0.5)
cmap = mpl.cm.get_cmap('jet')
raw_data = axs.imshow(data,interpolation="gaussian",cmap=cmap,norm=norm)
else:
raw_data = axs.imshow(data,interpolation="gaussian",cmap='Greens')
if cbar == 1:
cbar = plot.colorbar(raw_data)
if save_img == 1:
plt.savefig("{}.png".format(name),dpi=1000,bbox_inches='tight')
#%%
class Gdal_netcdf(object):
def __init__(self,gdal_file):
self.gdal_file = gdal.Open(gdal_file)
self.sub_ds = self.gdal_file.GetSubDatasets()
self.sds_ndvi = gdal.Open(self.sub_ds[0][0])
def get_ndvi(self,subdataset,band_number):
data = subdataset.GetRasterBand(band_number)
no_data_value = data.GetMetadata()['ndvi_missing_value']
fill_value = self.gdal_file.GetMetadata()['_fill_val']
ndvi_data = data.ReadAsArray().astype(float)
ndvi_data[np.where(ndvi_data == float(no_data_value))]=0
ndvi_data[np.where(ndvi_data == float(fill_value))]=np.nan
return ndvi_data/10000
def get_pixel_number(self,origin,resolution,coords):
return np.abs(np.divide(np.subtract(coords,origin),resolution))
def get_lat_lon_corner(self,origin,resolution,pixels):
return np.add(origin,np.multiply(pixels,resolution))
def create_tiff(self,raster_band,raster_out,data,ul_lat,ul_lon,lr_lat,lr_lon):
originX = float(raster_band.GetMetadata()['WesternmostLongitude'])
originY = float(raster_band.GetMetadata()['NorthernmostLatitude'])
cols = raster_band.RasterXSize
rows = raster_band.RasterYSize
pixelWidth = (max(
float(raster_band.GetMetadata()['WesternmostLongitude']),
float(raster_band.GetMetadata()['EasternmostLongitude']))-min(
float(raster_band.GetMetadata()['WesternmostLongitude']),
float(raster_band.GetMetadata()['EasternmostLongitude'])))/cols
pixelHeight = -1*(max(
float(raster_band.GetMetadata()['SouthernmostLatitude']),
float(raster_band.GetMetadata()['NorthernmostLatitude']))-min(
float(raster_band.GetMetadata()['SouthernmostLatitude']),
float(raster_band.GetMetadata()['NorthernmostLatitude'])))/rows
#Encuentro la ubicacion de las coordenadas en numero de pixel
#para la caja definida por los lr/ul lat/lon
y_ul, x_ul = [int(math.ceil(x)) for x in self.get_pixel_number(
(originY,originX),(pixelHeight,pixelWidth),
(ul_lat,ul_lon))]#(0,-82.5))
y_lr, x_lr = [int(math.ceil(x)) for x in self.get_pixel_number(
(originY,originX),(pixelHeight,pixelWidth),
(lr_lat,lr_lon))]#(-20,-68))
map_yUL,map_xUL = self.get_lat_lon_corner(
(originY,originX),(pixelHeight,pixelWidth),(y_ul,x_ul))
#Genero el tiff de salida
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(raster_out, int(x_lr-x_ul), int(y_lr-y_ul), 1, gdal.GDT_Float32)
outRaster.SetGeoTransform((map_xUL-pixelWidth/2, pixelWidth, 0, map_yUL-pixelHeight/2, 0, pixelHeight))
outband = outRaster.GetRasterBand(1)
outband.WriteArray(data[y_ul:y_lr,x_ul:x_lr])
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromEPSG(4326)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
def main(self,path,offset=0):
for band in range(1,self.sds_ndvi.RasterCount+1):
ndvi_data = self.get_ndvi(self.sds_ndvi,band)
self.create_tiff(self.sds_ndvi,
os.path.join(path,'ndvi_peru_band{}.tif'.format(band+offset*12)),
ndvi_data,0,-82.5,-20,-68)
def del_variables(self):
del self.gdal_file,self.sds_ndvi,self.sub_ds
#%%
if __name__ == '__main__':
tif_dir = os.path.join(os.getcwd(),'TIF_Data')
if not os.path.exists(tif_dir):
os.mkdir(tif_dir)
files_list = []
for files in os.listdir(os.getcwd()):
if files.endswith('.nc4'):
files_list.append(files)
print 'Comenzando el proceso'
start_time = time.time()
for num,net_cdf in enumerate(files_list):
temp = Gdal_netcdf(net_cdf)
temp.main(tif_dir,num)
temp.del_variables
print 'Archivo {} terminado'.format(net_cdf)
print 'Proceso terminado en {} segundos\n'.format(round(time.time() - start_time,2))
# de 0 a -20 lat
#de -82.5 a -68 lon
| 44.359649
| 112
| 0.627052
|
47ac7c0c8eb4f65921e63b52a7bee135ed1bc759
| 2,823
|
py
|
Python
|
classifier/utils.py
|
bendikbo/SSED
|
fdd0e74d419687bc8cba65341d7248ca6ccd1a4e
|
[
"MIT"
] | null | null | null |
classifier/utils.py
|
bendikbo/SSED
|
fdd0e74d419687bc8cba65341d7248ca6ccd1a4e
|
[
"MIT"
] | null | null | null |
classifier/utils.py
|
bendikbo/SSED
|
fdd0e74d419687bc8cba65341d7248ca6ccd1a4e
|
[
"MIT"
] | null | null | null |
import torch
import matplotlib.pyplot as plt
import numpy as np
import pathlib
np.random.seed(0)
torch.manual_seed(0)
# Allow torch/cudnn to optimize/analyze the input/output shape of convolutions
# To optimize forward/backward pass.
# This will increase model throughput for fixed input shape to the network
torch.backends.cudnn.benchmark = True
# Cudnn is not deterministic by default. Set this to True if you want
# to be sure to reproduce your results
torch.backends.cudnn.deterministic = True
def to_cuda(elements):
"""
Transfers every object in elements to GPU VRAM if available.
elements can be a object or list/tuple of objects
"""
if torch.cuda.is_available():
if type(elements) == tuple or type(elements) == list:
return [x.cuda() for x in elements]
return elements.cuda()
return elements
def plot_loss(loss_dict: dict, label: str = None, fmt="-"):
"""
Args:
loss_dict: a dictionary where keys are the global step and values are the given loss / accuracy
label: a string to use as label in plot legend
"""
global_steps = list(loss_dict.keys())
loss = list(loss_dict.values())
plt.plot(global_steps, loss, fmt, label=label)
def save_checkpoint(state_dict: dict,
filepath: pathlib.Path,
is_best: bool,
max_keep: int = 1):
"""
Saves state_dict to filepath. Deletes old checkpoints as time passes.
If is_best is toggled, saves a checkpoint to best.ckpt
"""
filepath.parent.mkdir(exist_ok=True, parents=True)
list_path = filepath.parent.joinpath("latest_checkpoint")
torch.save(state_dict, filepath)
if is_best:
torch.save(state_dict, filepath.parent.joinpath("best.ckpt"))
previous_checkpoints = get_previous_checkpoints(filepath.parent)
if filepath.name not in previous_checkpoints:
previous_checkpoints = [filepath.name] + previous_checkpoints
if len(previous_checkpoints) > max_keep:
for ckpt in previous_checkpoints[max_keep:]:
path = filepath.parent.joinpath(ckpt)
if path.exists():
path.unlink()
previous_checkpoints = previous_checkpoints[:max_keep]
with open(list_path, 'w') as fp:
fp.write("\n".join(previous_checkpoints))
def get_previous_checkpoints(directory: pathlib.Path) -> list:
assert directory.is_dir()
list_path = directory.joinpath("latest_checkpoint")
list_path.touch(exist_ok=True)
with open(list_path) as fp:
ckpt_list = fp.readlines()
return [_.strip() for _ in ckpt_list]
def load_best_checkpoint(directory: pathlib.Path):
filepath = directory.joinpath("best.ckpt")
if not filepath.is_file():
return None
return torch.load(directory.joinpath("best.ckpt"))
| 34.851852
| 103
| 0.690755
|
c38a5eeec4c636853576264d7db7f889387424ba
| 549
|
py
|
Python
|
src/mailer/backend.py
|
jaap3/django-mailer
|
ae0475c580a2217508b5aae2c71394c1936cdf5e
|
[
"MIT"
] | 466
|
2015-01-04T02:28:54.000Z
|
2022-03-31T20:23:18.000Z
|
src/mailer/backend.py
|
jaap3/django-mailer
|
ae0475c580a2217508b5aae2c71394c1936cdf5e
|
[
"MIT"
] | 129
|
2015-01-03T10:43:07.000Z
|
2022-03-11T17:07:33.000Z
|
src/mailer/backend.py
|
jaap3/django-mailer
|
ae0475c580a2217508b5aae2c71394c1936cdf5e
|
[
"MIT"
] | 144
|
2015-01-29T20:19:38.000Z
|
2022-02-13T19:48:37.000Z
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from mailer.models import Message
class DbBackend(BaseEmailBackend):
def send_messages(self, email_messages):
# allow for a custom batch size
MESSAGES_BATCH_SIZE = getattr(settings, "MAILER_MESSAGES_BATCH_SIZE", None)
messages = Message.objects.bulk_create([
Message(email=email) for email in email_messages
], MESSAGES_BATCH_SIZE)
return len(messages)
| 27.45
| 83
| 0.741348
|
21345947a462e640f56e04061a2d75224a1fdabb
| 5,368
|
py
|
Python
|
no-std-compat-custom/generate.py
|
peterwilli/nostdcompat_patcher
|
be5d0f5e17497f96fc67dc3e6d56abd589e1b889
|
[
"MIT"
] | null | null | null |
no-std-compat-custom/generate.py
|
peterwilli/nostdcompat_patcher
|
be5d0f5e17497f96fc67dc3e6d56abd589e1b889
|
[
"MIT"
] | null | null | null |
no-std-compat-custom/generate.py
|
peterwilli/nostdcompat_patcher
|
be5d0f5e17497f96fc67dc3e6d56abd589e1b889
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from collections import namedtuple
from dataclasses import dataclass, field
import argparse
import os
import re
import subprocess
import sys
Namespace = namedtuple("Namespace", "name module")
@dataclass
class Module:
unstable: bool
cfgs: list = field(default_factory=list)
# Parse arguments
parser = argparse.ArgumentParser(
description="Generate a std compatibility module"
)
parser.add_argument("--src", help=(
"Specify the location of the rust source code. The default is "
"`$(rustc --print sysroot)/lib/rustlib/src/rust/library`"
))
args = parser.parse_args()
if args.src is None:
output = subprocess.run(["rustc", "--print", "sysroot"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
args.src = os.path.join(output.stdout.decode("utf-8").strip(),
"lib", "rustlib", "src", "rust", "library")
# Read files
modules_regex = re.compile(
r"^(?:\S.*)?pub\s+(?:mod\s+|use\s+(?:[a-zA-Z_][a-zA-Z0-9_]*::)*)"
r"([a-zA-Z_][a-zA-Z0-9_]*);",
re.MULTILINE
)
def modules(crate):
"""
Return a dictionary of all modules and whether they appear unstable or not.
"""
root = os.path.join(args.src, crate, "src")
lib = os.path.join(root, "lib.rs")
with open(lib) as f:
contents = f.read()
modules = dict()
for match in modules_regex.finditer(contents):
module = match.group(1)
unstable = False
path = os.path.join(root, module + ".rs")
if not os.path.isfile(path):
path = os.path.join(root, module, "mod.rs")
try:
with open(path, "r") as f:
unstable = "#![unstable" in f.read()
if unstable:
print(
f"Module '{module}' from '{crate}' appears unstable",
file=sys.stderr
)
except OSError as e:
print(e, file=sys.stderr)
pass
modules[module] = Module(unstable)
return modules
def generate(module, *namespaces):
"""
Generate code for any module, given its name and which namespaces it appears
under and whether it's unstable or not.
"""
out = f"pub mod {module} {{\n"
if module == "prelude":
return None
for namespace in namespaces:
out += " "
cfgs = []
if namespace.name != "core":
cfgs.append(f"feature = \"{namespace.name}\"")
if namespace.module.unstable:
cfgs.append("feature = \"unstable\"")
cfgs += namespace.module.cfgs
if len(cfgs) == 1:
out += f"#[cfg({cfgs[0]})] "
elif len(cfgs) > 1:
out += "#[cfg(all(" + ", ".join(cfgs) + "))] "
out += f"pub use __{namespace.name}::{module}::*;\n"
if module == "collections":
prefix = (
" #[cfg(all("
"feature = \"alloc\", "
"feature = \"compat_hash\""
"))] pub use hashbrown::"
)
out += (
prefix + "HashMap;\n" +
prefix + "HashSet;\n"
)
elif module == "sync":
prefix = (
" #[cfg(all("
"feature = \"alloc\", "
"feature = \"compat_sync\""
"))] pub use spin::"
)
out += (
prefix + "Mutex;\n" +
prefix + "MutexGuard;\n" +
prefix + "Once;\n" +
prefix + "RwLock;\n" +
prefix + "RwLockReadGuard;\n" +
prefix + "RwLockWriteGuard;\n"
)
out += "}"
return out
# Main logic
core = modules("core")
alloc = modules("alloc")
# Module overrides
core["lazy"].unstable = True
alloc["sync"].cfgs.append("not(target_os = \"none\")")
alloc["task"].cfgs.append("not(target_os = \"none\")")
generated = {}
core_keys = set(core.keys())
alloc_keys = set(alloc.keys())
# Appearing in both
for module in core_keys & alloc_keys:
generated[module] = generate(
module,
Namespace("core", core[module]),
Namespace("alloc", alloc[module]),
)
# Only in core
for module in core_keys - alloc_keys:
generated[module] = generate(
module,
Namespace("core", core[module]),
)
# Only in alloc
for module in alloc_keys - core_keys:
generated[module] = generate(
module,
Namespace("alloc", alloc[module]),
)
# Complete module overrides
generated["prelude"] = """pub mod prelude {
pub mod v1 {
// Prelude
pub use __core::prelude::v1::*;
#[cfg(all(feature = "alloc", feature = "unstable"))]
pub use __alloc::prelude::v1::*;
#[cfg(all(feature = "alloc", not(feature = "unstable")))]
pub use __alloc::{
borrow::ToOwned,
boxed::Box,
// UNSTABLE: slice::SliceConcatExt,
string::String,
string::ToString,
vec::Vec,
};
// Other imports
#[cfg(feature = "alloc")]
pub use __alloc::{format, vec};
#[cfg(feature = "compat_macros")]
pub use crate::{print, println, eprint, eprintln, dbg};
}
}"""
print("""//! Generated by generate.py located at the repository root
//! ./generate.py > src/generated.rs""")
for module in sorted(generated.items(), key=lambda i: i[0]):
print(module[1])
| 26.44335
| 80
| 0.540611
|
c947bf6492a8eab627f60283a3d7e4036105e1fc
| 1,633
|
py
|
Python
|
oscar_ecomenv/Scripts/enhancer.py
|
PamilerinId/Ecommerce-Boiler
|
1d706f88c8c828e86309793cb33ea102f385bf2f
|
[
"Apache-2.0"
] | null | null | null |
oscar_ecomenv/Scripts/enhancer.py
|
PamilerinId/Ecommerce-Boiler
|
1d706f88c8c828e86309793cb33ea102f385bf2f
|
[
"Apache-2.0"
] | null | null | null |
oscar_ecomenv/Scripts/enhancer.py
|
PamilerinId/Ecommerce-Boiler
|
1d706f88c8c828e86309793cb33ea102f385bf2f
|
[
"Apache-2.0"
] | null | null | null |
#!c:\users\pi\documents\batcave\web\ecommerce\oscar_ecomenv\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk, ImageEnhance
#
# enhancer widget
class Enhance(tkinter.Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
tkinter.Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
tkinter.Label(self, image=self.tkim).pack()
# scale
s = tkinter.Scale(self, label=name, orient=tkinter.HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = float(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
if len(sys.argv) != 2:
print("Usage: enhancer file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(tkinter.Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(tkinter.Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(tkinter.Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
| 25.123077
| 88
| 0.652786
|
97161da1ce2a3741ff22beffdfffdf165f1c21ae
| 8,346
|
py
|
Python
|
test/test_request_params.py
|
eduramirezh/pyinaturalist
|
e5da7ced7fae31f27310868bdb2d349bdff8e0d4
|
[
"MIT"
] | null | null | null |
test/test_request_params.py
|
eduramirezh/pyinaturalist
|
e5da7ced7fae31f27310868bdb2d349bdff8e0d4
|
[
"MIT"
] | null | null | null |
test/test_request_params.py
|
eduramirezh/pyinaturalist
|
e5da7ced7fae31f27310868bdb2d349bdff8e0d4
|
[
"MIT"
] | null | null | null |
import pytest
from datetime import date, datetime
from dateutil.tz import gettz
from unittest.mock import patch
from pyinaturalist.request_params import (
convert_bool_params,
convert_datetime_params,
convert_list_params,
convert_observation_fields,
convert_pagination_params,
get_interval_ranges,
preprocess_request_body,
preprocess_request_params,
strip_empty_values,
validate_ids,
validate_multiple_choice_param,
validate_multiple_choice_params,
)
TEST_PARAMS = {
'is_active': False,
'only_id': 'true',
'preferred_place_id': [1, 2],
'rank': ['phylum', 'class'],
'q': '',
'locale': None,
'parent_id': 1,
'observation_fields': {1: 'value'},
}
def test_convert_bool_params():
params = convert_bool_params(TEST_PARAMS)
assert params['is_active'] == 'false'
assert params['only_id'] == 'true'
# Test some recognized date(time) formats, with and without TZ info, in date and non-date params
@pytest.mark.parametrize(
'param, value, expected',
[
('created_d1', '19951231T235959', '1995-12-31T23:59:59-08:00'),
('created_d2', '2008-08-08 08:08:08Z', '2008-08-08T08:08:08+00:00'),
('created_on', '2010-10-10 10:10:10-05:00', '2010-10-10T10:10:10-05:00'),
('created_on', 'Jan 1 2000', '2000-01-01T00:00:00-08:00'),
('d1', '19970716', '1997-07-16T00:00:00-07:00'),
('q', date(1954, 2, 5), '1954-02-05'),
('q', datetime(1954, 2, 5), '1954-02-05T00:00:00-08:00'),
('q', 'not a datetime', 'not a datetime'),
],
)
@patch('pyinaturalist.converters.tzlocal', return_value=gettz('US/Pacific'))
def test_convert_datetime_params(tzlocal, param, value, expected):
converted = convert_datetime_params({param: value})
assert converted[param] == expected
# Test both int and string lists
def test_convert_list_params():
params = convert_list_params(TEST_PARAMS)
assert params['preferred_place_id'] == '1,2'
assert params['rank'] == 'phylum,class'
def test_convert_observation_fields():
params = convert_observation_fields(TEST_PARAMS)
assert params['observation_field_values_attributes'] == [
{'observation_field_id': 1, 'value': 'value'}
]
def test_convert_pagination_params():
params = convert_pagination_params({'per_page': 100})
assert params['per_page'] == 100
params = convert_pagination_params({'per_page': 100, 'count_only': True})
assert params['per_page'] == 0
assert 'count_only' not in params
params = convert_pagination_params({'per_page': 100, 'count_only': False})
assert params['per_page'] == 100
assert 'count_only' not in params
def test_get_interval_ranges__monthly():
expected_ranges = [
(datetime(2020, 1, 1, 0, 0), datetime(2020, 1, 31, 0, 0)),
(datetime(2020, 2, 1, 0, 0), datetime(2020, 2, 29, 0, 0)),
(datetime(2020, 3, 1, 0, 0), datetime(2020, 3, 31, 0, 0)),
(datetime(2020, 4, 1, 0, 0), datetime(2020, 4, 30, 0, 0)),
(datetime(2020, 5, 1, 0, 0), datetime(2020, 5, 31, 0, 0)),
(datetime(2020, 6, 1, 0, 0), datetime(2020, 6, 30, 0, 0)),
(datetime(2020, 7, 1, 0, 0), datetime(2020, 7, 31, 0, 0)),
(datetime(2020, 8, 1, 0, 0), datetime(2020, 8, 31, 0, 0)),
(datetime(2020, 9, 1, 0, 0), datetime(2020, 9, 30, 0, 0)),
(datetime(2020, 10, 1, 0, 0), datetime(2020, 10, 31, 0, 0)),
(datetime(2020, 11, 1, 0, 0), datetime(2020, 11, 30, 0, 0)),
(datetime(2020, 12, 1, 0, 0), datetime(2020, 12, 31, 0, 0)),
]
ranges = get_interval_ranges(datetime(2020, 1, 1), datetime(2020, 12, 31), 'monthly')
assert ranges == expected_ranges
def test_get_interval_ranges__yearly():
expected_ranges = [
(datetime(2010, 1, 1, 0, 0), datetime(2010, 12, 31, 0, 0)),
(datetime(2011, 1, 1, 0, 0), datetime(2011, 12, 31, 0, 0)),
(datetime(2012, 1, 1, 0, 0), datetime(2012, 12, 31, 0, 0)),
(datetime(2013, 1, 1, 0, 0), datetime(2013, 12, 31, 0, 0)),
(datetime(2014, 1, 1, 0, 0), datetime(2014, 12, 31, 0, 0)),
(datetime(2015, 1, 1, 0, 0), datetime(2015, 12, 31, 0, 0)),
(datetime(2016, 1, 1, 0, 0), datetime(2016, 12, 31, 0, 0)),
(datetime(2017, 1, 1, 0, 0), datetime(2017, 12, 31, 0, 0)),
(datetime(2018, 1, 1, 0, 0), datetime(2018, 12, 31, 0, 0)),
(datetime(2019, 1, 1, 0, 0), datetime(2019, 12, 31, 0, 0)),
(datetime(2020, 1, 1, 0, 0), datetime(2020, 12, 31, 0, 0)),
]
ranges = get_interval_ranges(datetime(2010, 1, 1), datetime(2020, 1, 1), 'yearly')
assert ranges == expected_ranges
def test_get_interval_ranges__invalid():
with pytest.raises(ValueError):
get_interval_ranges(datetime(2020, 1, 1), datetime(2020, 12, 31), 'daily')
def test_strip_empty_params():
params = strip_empty_values(TEST_PARAMS)
assert len(params) == 6
assert 'q' not in params and 'locale' not in params
assert 'is_active' in params and 'only_id' in params
@pytest.mark.parametrize(
'value, expected',
[
('1', '1'),
(1, '1'),
('1,2,3', '1,2,3'),
([1, 2, 3], '1,2,3'),
([1e5, 2e5], '100000,200000'),
],
)
def test_validate_ids(value, expected):
assert validate_ids(value) == expected
def test_validate_ids__invalid():
with pytest.raises(ValueError):
validate_ids('not a number')
# This is just here so that tests will fail if one of the conversion steps is removed
@patch('pyinaturalist.request_params.convert_bool_params')
@patch('pyinaturalist.request_params.convert_datetime_params')
@patch('pyinaturalist.request_params.convert_list_params')
@patch('pyinaturalist.request_params.strip_empty_values')
def test_preprocess_request_params(mock_bool, mock_datetime, mock_list, mock_strip):
preprocess_request_params({'id': 1})
assert all([mock_bool.called, mock_datetime.called, mock_list.called, mock_strip.called])
@patch('pyinaturalist.request_params.convert_bool_params')
@patch('pyinaturalist.request_params.convert_datetime_params')
@patch('pyinaturalist.request_params.convert_list_params')
@patch('pyinaturalist.request_params.strip_empty_values')
def test_preprocess_request_body(mock_bool, mock_datetime, mock_list, mock_strip):
preprocess_request_body({'id': 1})
assert all([mock_bool.called, mock_datetime.called, mock_list.called, mock_strip.called])
def test_validate_multiple_choice_param():
params = {
'param1': 'valid_str',
'param2': 'invalid_str',
}
choices = ['str1', 'str2', 'valid_str']
validated_params = validate_multiple_choice_param(params, 'param1', choices)
assert params == validated_params
with pytest.raises(ValueError):
validate_multiple_choice_param(params, 'param2', choices)
@pytest.mark.parametrize(
'params',
[
{'csi': 'LC'},
{'csi': ['EW', 'EX']},
{'geoprivacy': 'open'},
{'iconic_taxa': 'Animalia'},
{'identifications': 'most_agree'},
{'license': 'CC-BY-NC'},
{'rank': 'order'},
{'quality_grade': 'research'},
{'search_on': 'tags'},
{'geoprivacy': ['open', 'obscured']},
{'geoprivacy': 'open', 'iconic_taxa': 'Animalia', 'license': 'CC-BY-NC'},
],
)
def test_validate_multiple_choice_params(params):
# If valid, no exception should not be raised
validate_multiple_choice_params(params)
# If invalid, an exception should be raised
with pytest.raises(ValueError):
validate_multiple_choice_params({k: 'Invalid_value' for k in params})
# A valid + invalid value should also raise an exception
def append_invalid_value(value):
return [*value, 'Invalid_value'] if isinstance(value, list) else [value, 'Invalid_value']
with pytest.raises(ValueError):
validate_multiple_choice_params({k: append_invalid_value(v) for k, v in params.items()})
@pytest.mark.parametrize(
'params, expected_value',
[
({'identifications': 'most agree'}, 'most_agree'),
({'interval': 'month of year'}, 'month_of_year'),
],
)
def test_validate_multiple_choice_params__normalization(params, expected_value):
validated_params = validate_multiple_choice_params(params)
value = next(iter(validated_params.values()))
assert value == expected_value
| 36.445415
| 97
| 0.65121
|
25d5b6f9f09e5535afb21fa46a7b3ba4b3001bad
| 3,781
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
prapun77/FiatCoin
|
06244ab8fdf4f2d7ba76067403c554892034ca7b
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
prapun77/FiatCoin
|
06244ab8fdf4f2d7ba76067403c554892034ca7b
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
prapun77/FiatCoin
|
06244ab8fdf4f2d7ba76067403c554892034ca7b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00ateccoinuser:\x00Documents:\x00ateccoin:\x00ateccoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/ateccoinuser/Documents/ateccoin/ateccoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['ateccoin-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.983607
| 1,817
| 0.72785
|
253bb298f41ee630c67aa58ffba2d92a3373ce2d
| 33,914
|
py
|
Python
|
wagtail/search/tests/test_elasticsearch5_backend.py
|
wgarlock/wagtail
|
1bfc13952f5ffc0e40a4435d15a5aefd70984430
|
[
"BSD-3-Clause"
] | 2
|
2021-03-18T21:41:05.000Z
|
2021-03-18T21:41:08.000Z
|
wagtail/search/tests/test_elasticsearch5_backend.py
|
wgarlock/wagtail
|
1bfc13952f5ffc0e40a4435d15a5aefd70984430
|
[
"BSD-3-Clause"
] | 1
|
2021-03-06T11:44:33.000Z
|
2021-11-26T16:51:19.000Z
|
wagtail/search/tests/test_elasticsearch5_backend.py
|
wgarlock/wagtail
|
1bfc13952f5ffc0e40a4435d15a5aefd70984430
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
import json
from unittest import mock
from django.db.models import Q
from django.test import TestCase
from elasticsearch.serializer import JSONSerializer
from wagtail.search.backends.elasticsearch5 import Elasticsearch5SearchBackend
from wagtail.search.query import MATCH_ALL, Phrase
from wagtail.tests.search import models
from .elasticsearch_common_tests import ElasticsearchCommonSearchBackendTests
class TestElasticsearch5SearchBackend(ElasticsearchCommonSearchBackendTests, TestCase):
backend_path = 'wagtail.search.backends.elasticsearch5'
class TestElasticsearch5SearchQuery(TestCase):
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps(b, sort_keys=True, default=default)
)
query_compiler_class = Elasticsearch5SearchBackend.query_compiler_class
def test_simple(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), "Hello")
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_match_all(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), MATCH_ALL)
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'match_all': {}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_and_operator(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), "Hello", operator='and')
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials'], 'operator': 'and'}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_filter(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title="Test"), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'term': {'title_filter': 'Test'}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_and_filter(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title="Test", publication_date=datetime.date(2017, 10, 18)), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'bool': {'must': [{'term': {'publication_date_filter': '2017-10-18'}}, {'term': {'title_filter': 'Test'}}]}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
# Make sure field filters are sorted (as they can be in any order which may cause false positives)
query = query_compiler.get_query()
field_filters = query['bool']['filter'][1]['bool']['must']
field_filters[:] = sorted(field_filters, key=lambda f: list(f['term'].keys())[0])
self.assertDictEqual(query, expected_result)
def test_or_filter(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(Q(title="Test") | Q(publication_date=datetime.date(2017, 10, 18))), "Hello")
# Make sure field filters are sorted (as they can be in any order which may cause false positives)
query = query_compiler.get_query()
field_filters = query['bool']['filter'][1]['bool']['should']
field_filters[:] = sorted(field_filters, key=lambda f: list(f['term'].keys())[0])
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'bool': {'should': [{'term': {'publication_date_filter': '2017-10-18'}}, {'term': {'title_filter': 'Test'}}]}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query, expected_result)
def test_negated_filter(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.exclude(publication_date=datetime.date(2017, 10, 18)), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'bool': {'mustNot': {'term': {'publication_date_filter': '2017-10-18'}}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_fields(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), "Hello", fields=['title'])
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'match': {'title': {'query': 'Hello'}}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_fields_with_and_operator(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), "Hello", fields=['title'], operator='and')
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'match': {'title': {'query': 'Hello', 'operator': 'and'}}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_multiple_fields(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), "Hello", fields=['title', 'content'])
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'multi_match': {'fields': ['title', 'content'], 'query': 'Hello'}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_multiple_fields_with_and_operator(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.all(), "Hello", fields=['title', 'content'], operator='and'
)
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'multi_match': {'fields': ['title', 'content'], 'query': 'Hello', 'operator': 'and'}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_exact_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title__exact="Test"), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'term': {'title_filter': 'Test'}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_none_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title=None), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'bool': {'mustNot': {'exists': {'field': 'title_filter'}}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_isnull_true_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title__isnull=True), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'bool': {'mustNot': {'exists': {'field': 'title_filter'}}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_isnull_false_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title__isnull=False), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'exists': {'field': 'title_filter'}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_startswith_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title__startswith="Test"), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'prefix': {'title_filter': 'Test'}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_gt_lookup(self):
# This also tests conversion of python dates to strings
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.filter(publication_date__gt=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'gt': '2014-04-29'}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_lt_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.filter(publication_date__lt=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'lt': '2014-04-29'}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_gte_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.filter(publication_date__gte=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'gte': '2014-04-29'}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_lte_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.filter(publication_date__lte=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'lte': '2014-04-29'}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_range_lookup(self):
start_date = datetime.datetime(2014, 4, 29)
end_date = datetime.datetime(2014, 8, 19)
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.filter(publication_date__range=(start_date, end_date)), "Hello"
)
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'gte': '2014-04-29', 'lte': '2014-08-19'}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_custom_ordering(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.order_by('publication_date'), "Hello", order_by_relevance=False
)
# Check it
expected_result = [{'publication_date_filter': 'asc'}]
self.assertDictEqual(query_compiler.get_sort(), expected_result)
def test_custom_ordering_reversed(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.order_by('-publication_date'), "Hello", order_by_relevance=False
)
# Check it
expected_result = [{'publication_date_filter': 'desc'}]
self.assertDictEqual(query_compiler.get_sort(), expected_result)
def test_custom_ordering_multiple(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.order_by('publication_date', 'number_of_pages'), "Hello", order_by_relevance=False
)
# Check it
expected_result = [{'publication_date_filter': 'asc'}, {'number_of_pages_filter': 'asc'}]
self.assertDictEqual(query_compiler.get_sort(), expected_result)
def test_phrase_query(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), Phrase("Hello world"))
# Check it
expected_result = {'multi_match': {'fields': ['_all', '_partials'], 'query': "Hello world", 'type': 'phrase'}}
self.assertDictEqual(query_compiler.get_inner_query(), expected_result)
def test_phrase_query_single_field(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), Phrase("Hello world"), fields=['title'])
# Check it
expected_result = {'match_phrase': {'title': "Hello world"}}
self.assertDictEqual(query_compiler.get_inner_query(), expected_result)
class TestElasticsearch5SearchResults(TestCase):
fixtures = ['search']
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps
)
def get_results(self):
backend = Elasticsearch5SearchBackend({})
query_compiler = mock.MagicMock()
query_compiler.queryset = models.Book.objects.all()
query_compiler.get_query.return_value = 'QUERY'
query_compiler.get_sort.return_value = None
return backend.results_class(backend, query_compiler)
def construct_search_response(self, results):
return {
'_shards': {'failed': 0, 'successful': 5, 'total': 5},
'hits': {
'hits': [
{
'_id': 'searchtests_book:' + str(result),
'_index': 'wagtail',
'_score': 1,
'_type': 'searchtests_book',
'fields': {
'pk': [str(result)],
}
}
for result in results
],
'max_score': 1,
'total': len(results)
},
'timed_out': False,
'took': 2
}
@mock.patch('elasticsearch.Elasticsearch.search')
def test_basic_search(self, search):
search.return_value = self.construct_search_response([])
results = self.get_results()
list(results) # Performs search
search.assert_any_call(
body={'query': 'QUERY'},
_source=False,
stored_fields='pk',
index='wagtail__searchtests_book',
scroll='2m',
size=100
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_get_single_item(self, search):
# Need to return something to prevent index error
search.return_value = self.construct_search_response([1])
results = self.get_results()
results[10] # Performs search
search.assert_any_call(
from_=10,
body={'query': 'QUERY'},
_source=False,
stored_fields='pk',
index='wagtail__searchtests_book',
size=1
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_slice_results(self, search):
search.return_value = self.construct_search_response([])
results = self.get_results()[1:4]
list(results) # Performs search
search.assert_any_call(
from_=1,
body={'query': 'QUERY'},
_source=False,
stored_fields='pk',
index='wagtail__searchtests_book',
size=3
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_slice_results_multiple_times(self, search):
search.return_value = self.construct_search_response([])
results = self.get_results()[10:][:10]
list(results) # Performs search
search.assert_any_call(
from_=10,
body={'query': 'QUERY'},
_source=False,
stored_fields='pk',
index='wagtail__searchtests_book',
size=10
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_slice_results_and_get_item(self, search):
# Need to return something to prevent index error
search.return_value = self.construct_search_response([1])
results = self.get_results()[10:]
results[10] # Performs search
search.assert_any_call(
from_=20,
body={'query': 'QUERY'},
_source=False,
stored_fields='pk',
index='wagtail__searchtests_book',
size=1
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_result_returned(self, search):
search.return_value = self.construct_search_response([1])
results = self.get_results()
self.assertEqual(results[0], models.Book.objects.get(id=1))
@mock.patch('elasticsearch.Elasticsearch.search')
def test_len_1(self, search):
search.return_value = self.construct_search_response([1])
results = self.get_results()
self.assertEqual(len(results), 1)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_len_2(self, search):
search.return_value = self.construct_search_response([1, 2])
results = self.get_results()
self.assertEqual(len(results), 2)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_duplicate_results(self, search): # Duplicates will not be removed
search.return_value = self.construct_search_response([1, 1])
results = list(self.get_results()) # Must cast to list so we only create one query
self.assertEqual(len(results), 2)
self.assertEqual(results[0], models.Book.objects.get(id=1))
self.assertEqual(results[1], models.Book.objects.get(id=1))
@mock.patch('elasticsearch.Elasticsearch.search')
def test_result_order(self, search):
search.return_value = self.construct_search_response(
[1, 2, 3]
)
results = list(self.get_results()) # Must cast to list so we only create one query
self.assertEqual(results[0], models.Book.objects.get(id=1))
self.assertEqual(results[1], models.Book.objects.get(id=2))
self.assertEqual(results[2], models.Book.objects.get(id=3))
@mock.patch('elasticsearch.Elasticsearch.search')
def test_result_order_2(self, search):
search.return_value = self.construct_search_response(
[3, 2, 1]
)
results = list(self.get_results()) # Must cast to list so we only create one query
self.assertEqual(results[0], models.Book.objects.get(id=3))
self.assertEqual(results[1], models.Book.objects.get(id=2))
self.assertEqual(results[2], models.Book.objects.get(id=1))
class TestElasticsearch5Mapping(TestCase):
fixtures = ['search']
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps(b, sort_keys=True, default=default)
)
def setUp(self):
# Create ES mapping
self.es_mapping = Elasticsearch5SearchBackend.mapping_class(models.Book)
# Create ES document
self.obj = models.Book.objects.get(id=4)
def test_get_document_type(self):
self.assertEqual(self.es_mapping.get_document_type(), 'searchtests_book')
def test_get_mapping(self):
# Build mapping
mapping = self.es_mapping.get_mapping()
# Check
expected_result = {
'searchtests_book': {
'properties': {
'pk': {'type': 'keyword', 'store': True, 'include_in_all': False},
'content_type': {'type': 'keyword', 'include_in_all': False},
'_partials': {'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard', 'include_in_all': False, 'type': 'text'},
'title': {'type': 'text', 'boost': 2.0, 'include_in_all': True, 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_edgengrams': {'type': 'text', 'include_in_all': False, 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_filter': {'type': 'keyword', 'include_in_all': False},
'authors': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'include_in_all': True},
'name_edgengrams': {'analyzer': 'edgengram_analyzer', 'include_in_all': False, 'search_analyzer': 'standard', 'type': 'text'},
'date_of_birth_filter': {'type': 'date', 'include_in_all': False},
},
},
'authors_filter': {'type': 'integer', 'include_in_all': False},
'publication_date_filter': {'type': 'date', 'include_in_all': False},
'number_of_pages_filter': {'type': 'integer', 'include_in_all': False},
'tags': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'include_in_all': True},
'slug_filter': {'type': 'keyword', 'include_in_all': False},
},
},
'tags_filter': {'type': 'integer', 'include_in_all': False}
}
}
}
self.assertDictEqual(mapping, expected_result)
def test_get_document_id(self):
self.assertEqual(self.es_mapping.get_document_id(self.obj), 'searchtests_book:' + str(self.obj.pk))
def test_get_document(self):
# Get document
document = self.es_mapping.get_document(self.obj)
# Sort partials
if '_partials' in document:
document['_partials'].sort()
# Check
expected_result = {
'pk': '4',
'content_type': ["searchtests.Book"],
'_partials': ['J. R. R. Tolkien', 'The Fellowship of the Ring', 'The Fellowship of the Ring'],
'title': 'The Fellowship of the Ring',
'title_edgengrams': 'The Fellowship of the Ring',
'title_filter': 'The Fellowship of the Ring',
'authors': [
{
'name': 'J. R. R. Tolkien',
'name_edgengrams': 'J. R. R. Tolkien',
'date_of_birth_filter': datetime.date(1892, 1, 3)
}
],
'authors_filter': [2],
'publication_date_filter': datetime.date(1954, 7, 29),
'number_of_pages_filter': 423,
'tags': [],
'tags_filter': []
}
self.assertDictEqual(document, expected_result)
class TestElasticsearch5MappingInheritance(TestCase):
fixtures = ['search']
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps(b, sort_keys=True, default=default)
)
def setUp(self):
# Create ES mapping
self.es_mapping = Elasticsearch5SearchBackend.mapping_class(models.Novel)
self.obj = models.Novel.objects.get(id=4)
def test_get_document_type(self):
self.assertEqual(self.es_mapping.get_document_type(), 'searchtests_book_searchtests_novel')
def test_get_mapping(self):
# Build mapping
mapping = self.es_mapping.get_mapping()
# Check
expected_result = {
'searchtests_book_searchtests_novel': {
'properties': {
# New
'searchtests_novel__setting': {'type': 'text', 'include_in_all': True, 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'searchtests_novel__protagonist': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'boost': 0.5, 'include_in_all': True},
'novel_id_filter': {'type': 'integer', 'include_in_all': False}
}
},
'searchtests_novel__protagonist_id_filter': {'type': 'integer', 'include_in_all': False},
'searchtests_novel__characters': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'boost': 0.25, 'include_in_all': True}
}
},
# Inherited
'pk': {'type': 'keyword', 'store': True, 'include_in_all': False},
'content_type': {'type': 'keyword', 'include_in_all': False},
'_partials': {'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard', 'include_in_all': False, 'type': 'text'},
'title': {'type': 'text', 'boost': 2.0, 'include_in_all': True, 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_edgengrams': {'type': 'text', 'include_in_all': False, 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_filter': {'type': 'keyword', 'include_in_all': False},
'authors': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'include_in_all': True},
'name_edgengrams': {'analyzer': 'edgengram_analyzer', 'include_in_all': False, 'search_analyzer': 'standard', 'type': 'text'},
'date_of_birth_filter': {'type': 'date', 'include_in_all': False},
},
},
'authors_filter': {'type': 'integer', 'include_in_all': False},
'publication_date_filter': {'type': 'date', 'include_in_all': False},
'number_of_pages_filter': {'type': 'integer', 'include_in_all': False},
'tags': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'include_in_all': True},
'slug_filter': {'type': 'keyword', 'include_in_all': False},
},
},
'tags_filter': {'type': 'integer', 'include_in_all': False}
}
}
}
self.assertDictEqual(mapping, expected_result)
def test_get_document_id(self):
# This must be tests_searchtest instead of 'tests_searchtest_tests_searchtestchild'
# as it uses the contents base content type name.
# This prevents the same object being accidentally indexed twice.
self.assertEqual(self.es_mapping.get_document_id(self.obj), 'searchtests_book:' + str(self.obj.pk))
def test_get_document(self):
# Build document
document = self.es_mapping.get_document(self.obj)
# Sort partials
if '_partials' in document:
document['_partials'].sort()
# Sort characters
if 'searchtests_novel__characters' in document:
document['searchtests_novel__characters'].sort(key=lambda c: c['name'])
# Check
expected_result = {
# New
'searchtests_novel__setting': "Middle Earth",
'searchtests_novel__protagonist': {
'name': "Frodo Baggins",
'novel_id_filter': 4
},
'searchtests_novel__protagonist_id_filter': 8,
'searchtests_novel__characters': [
{
'name': "Bilbo Baggins"
},
{
'name': "Frodo Baggins"
},
{
'name': "Gandalf"
}
],
# Changed
'content_type': ["searchtests.Novel", "searchtests.Book"],
'_partials': ['J. R. R. Tolkien', 'Middle Earth', 'The Fellowship of the Ring', 'The Fellowship of the Ring'],
# Inherited
'pk': '4',
'title': 'The Fellowship of the Ring',
'title_edgengrams': 'The Fellowship of the Ring',
'title_filter': 'The Fellowship of the Ring',
'authors': [
{
'name': 'J. R. R. Tolkien',
'name_edgengrams': 'J. R. R. Tolkien',
'date_of_birth_filter': datetime.date(1892, 1, 3)
}
],
'authors_filter': [2],
'publication_date_filter': datetime.date(1954, 7, 29),
'number_of_pages_filter': 423,
'tags': [],
'tags_filter': []
}
self.assertDictEqual(document, expected_result)
@mock.patch('wagtail.search.backends.elasticsearch5.Elasticsearch')
class TestBackendConfiguration(TestCase):
def test_default_settings(self, Elasticsearch):
Elasticsearch5SearchBackend(params={})
Elasticsearch.assert_called_with(
hosts=[
{
'host': 'localhost',
'port': 9200,
'url_prefix': '',
'use_ssl': False,
'verify_certs': False,
'http_auth': None
}
],
timeout=10
)
def test_hosts(self, Elasticsearch):
Elasticsearch5SearchBackend(params={
'HOSTS': [
{
'host': '127.0.0.1',
'port': 9300,
'use_ssl': True,
'verify_certs': True,
}
]
})
Elasticsearch.assert_called_with(
hosts=[
{
'host': '127.0.0.1',
'port': 9300,
'use_ssl': True,
'verify_certs': True,
}
],
timeout=10
)
def test_urls(self, Elasticsearch):
# This test backwards compatibility with old URLS setting
Elasticsearch5SearchBackend(params={
'URLS': [
'http://localhost:12345',
'https://127.0.0.1:54321',
'http://username:password@elasticsearch.mysite.com',
'https://elasticsearch.mysite.com/hello',
],
})
Elasticsearch.assert_called_with(
hosts=[
{
'host': 'localhost',
'port': 12345,
'url_prefix': '',
'use_ssl': False,
'verify_certs': False,
'http_auth': None,
},
{
'host': '127.0.0.1',
'port': 54321,
'url_prefix': '',
'use_ssl': True,
'verify_certs': True,
'http_auth': None,
},
{
'host': 'elasticsearch.mysite.com',
'port': 80,
'url_prefix': '',
'use_ssl': False,
'verify_certs': False,
'http_auth': ('username', 'password')
},
{
'host': 'elasticsearch.mysite.com',
'port': 443,
'url_prefix': '/hello',
'use_ssl': True,
'verify_certs': True,
'http_auth': None,
},
],
timeout=10
)
| 40.518519
| 156
| 0.561361
|
6b0154c0b158e31bab477fbb3149186a51065ec1
| 135
|
py
|
Python
|
pyipip/__init__.py
|
georgexsh/pyipip
|
48f3ab44f11ad2cd41a478f08bfdccedcecf2247
|
[
"MIT"
] | 24
|
2017-10-12T11:20:13.000Z
|
2020-12-08T15:22:03.000Z
|
pyipip/__init__.py
|
georgexsh/pyipip
|
48f3ab44f11ad2cd41a478f08bfdccedcecf2247
|
[
"MIT"
] | null | null | null |
pyipip/__init__.py
|
georgexsh/pyipip
|
48f3ab44f11ad2cd41a478f08bfdccedcecf2247
|
[
"MIT"
] | 3
|
2018-10-26T03:19:53.000Z
|
2020-12-08T08:28:23.000Z
|
__version__ = '0.1.1'
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
from .ipipdb import IPIPDatabase
| 16.875
| 61
| 0.792593
|
e0be7ca7eb91acd58c62cfd8ccf75db991592b0c
| 3,031
|
py
|
Python
|
osclib/list_command.py
|
okurz/osc-plugin-factory
|
37499194ce23e426f8be8860437b29f9d97dd533
|
[
"MIT"
] | null | null | null |
osclib/list_command.py
|
okurz/osc-plugin-factory
|
37499194ce23e426f8be8860437b29f9d97dd533
|
[
"MIT"
] | null | null | null |
osclib/list_command.py
|
okurz/osc-plugin-factory
|
37499194ce23e426f8be8860437b29f9d97dd533
|
[
"MIT"
] | null | null | null |
from osc import oscerr
from osclib.request_splitter import RequestSplitter
class ListCommand:
SOURCE_PROJECT_STRIP = [
'SUSE:SLE-12:',
'SUSE:SLE-12-',
'openSUSE:Leap:'
'openSUSE:',
'home:',
]
def __init__(self, api):
self.api = api
def perform(self, packages=None, supersede=False):
"""
Perform the list command
"""
if supersede:
self.api.dispatch_open_requests(packages)
requests = self.api.get_open_requests()
requests_ignored = self.api.get_ignored_requests()
splitter = RequestSplitter(self.api, requests, in_ring=True)
splitter.filter_add('./action[@type="change_devel"]')
change_devel_requests = splitter.filter_only()
splitter.reset()
splitter.filter_add('./action[not(@type="add_role" or @type="change_devel")]')
splitter.group_by('./action/target/@devel_project')
splitter.split()
is_factory = self.api.project != 'openSUSE:Factory'
for group in sorted(splitter.grouped.keys()):
print group
for request in splitter.grouped[group]['requests']:
request_id = int(request.get('id'))
action = request.find('action')
target_package = action.find('target').get('package')
ring = action.find('target').get('ring')
if action.get('type') == 'delete':
ring += ' (delete request)'
line = 'sr#{}: {:<30} -> {:<12}'.format(request_id, target_package, ring)
if is_factory and action.find('source') != None:
source_project = action.find('source').get('project')
source_project = self.project_strip(source_project)
line += ' ({})'.format(source_project)
if request_id in requests_ignored:
line += '\n ignored: ' + str(requests_ignored[request_id])
print ' ', line
if len(splitter.other):
non_ring_packages = []
for request in splitter.other:
non_ring_packages.append(request.find('./action/target').get('package'))
print 'Not in a ring:', ' '.join(sorted(non_ring_packages))
if len(change_devel_requests):
print '\nChange devel requests:'
for request in change_devel_requests:
target_package = request.find('./action/target').get('package')
url = self.api.makeurl(['request', 'show', request.get('id')])
print('- request({}): {}'.format(target_package, url))
def project_strip(self, source_project):
home = source_project.startswith('home:')
for prefix in self.SOURCE_PROJECT_STRIP:
if source_project.startswith(prefix):
source_project = source_project[len(prefix):]
if home:
source_project = '~' + source_project
return source_project
| 35.658824
| 89
| 0.574728
|
dd0a8003be5f3b27893818b05a86cd2c1f10ca50
| 9,971
|
py
|
Python
|
monai/data/decathlon_datalist.py
|
Spenhouet/MONAI
|
fb66ba0625c3a64ba7cdba9811a9997b336e3702
|
[
"Apache-2.0"
] | null | null | null |
monai/data/decathlon_datalist.py
|
Spenhouet/MONAI
|
fb66ba0625c3a64ba7cdba9811a9997b336e3702
|
[
"Apache-2.0"
] | null | null | null |
monai/data/decathlon_datalist.py
|
Spenhouet/MONAI
|
fb66ba0625c3a64ba7cdba9811a9997b336e3702
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Union, overload
from monai.config import KeysCollection
from monai.data.utils import partition_dataset, select_cross_validation_folds
from monai.utils import ensure_tuple
@overload
def _compute_path(base_dir: str, element: str, check_path: bool = False) -> str:
...
@overload
def _compute_path(base_dir: str, element: List[str], check_path: bool = False) -> List[str]:
...
def _compute_path(base_dir, element, check_path=False):
"""
Args:
base_dir: the base directory of the dataset.
element: file path(s) to append to directory.
check_path: if `True`, only compute when the result is an existing path.
Raises:
TypeError: When ``element`` contains a non ``str``.
TypeError: When ``element`` type is not in ``Union[list, str]``.
"""
def _join_path(base_dir: str, item: str):
result = os.path.normpath(os.path.join(base_dir, item))
if check_path and not os.path.exists(result):
# if not an existing path, don't join with base dir
return item
return result
if isinstance(element, str):
return _join_path(base_dir, element)
if isinstance(element, list):
for e in element:
if not isinstance(e, str):
return element
return [_join_path(base_dir, e) for e in element]
return element
def _append_paths(base_dir: str, is_segmentation: bool, items: List[Dict]) -> List[Dict]:
"""
Args:
base_dir: the base directory of the dataset.
is_segmentation: whether the datalist is for segmentation task.
items: list of data items, each of which is a dict keyed by element names.
Raises:
TypeError: When ``items`` contains a non ``dict``.
"""
for item in items:
if not isinstance(item, dict):
raise TypeError(f"Every item in items must be a dict but got {type(item).__name__}.")
for k, v in item.items():
if k == "image" or is_segmentation and k == "label":
item[k] = _compute_path(base_dir, v, check_path=False)
else:
# for other items, auto detect whether it's a valid path
item[k] = _compute_path(base_dir, v, check_path=True)
return items
def load_decathlon_datalist(
data_list_file_path: str,
is_segmentation: bool = True,
data_list_key: str = "training",
base_dir: Optional[str] = None,
) -> List[Dict]:
"""Load image/label paths of decathlon challenge from JSON file
Json file is similar to what you get from http://medicaldecathlon.com/
Those dataset.json files
Args:
data_list_file_path: the path to the json file of datalist.
is_segmentation: whether the datalist is for segmentation task, default is True.
data_list_key: the key to get a list of dictionary to be used, default is "training".
base_dir: the base directory of the dataset, if None, use the datalist directory.
Raises:
ValueError: When ``data_list_file_path`` does not point to a file.
ValueError: When ``data_list_key`` is not specified in the data list file.
Returns a list of data items, each of which is a dict keyed by element names, for example:
.. code-block::
[
{'image': '/workspace/data/chest_19.nii.gz', 'label': 0},
{'image': '/workspace/data/chest_31.nii.gz', 'label': 1}
]
"""
if not os.path.isfile(data_list_file_path):
raise ValueError(f"Data list file {data_list_file_path} does not exist.")
with open(data_list_file_path) as json_file:
json_data = json.load(json_file)
if data_list_key not in json_data:
raise ValueError(f'Data list {data_list_key} not specified in "{data_list_file_path}".')
expected_data = json_data[data_list_key]
if data_list_key == "test":
expected_data = [{"image": i} for i in expected_data]
if base_dir is None:
base_dir = os.path.dirname(data_list_file_path)
return _append_paths(base_dir, is_segmentation, expected_data)
def load_decathlon_properties(data_property_file_path: str, property_keys: Union[Sequence[str], str]) -> Dict:
"""Load the properties from the JSON file contains data property with specified `property_keys`.
Args:
data_property_file_path: the path to the JSON file of data properties.
property_keys: expected keys to load from the JSON file, for example, we have these keys
in the decathlon challenge:
`name`, `description`, `reference`, `licence`, `tensorImageSize`,
`modality`, `labels`, `numTraining`, `numTest`, etc.
"""
if not os.path.isfile(data_property_file_path):
raise ValueError(f"Data property file {data_property_file_path} does not exist.")
with open(data_property_file_path) as json_file:
json_data = json.load(json_file)
properties = {}
for key in ensure_tuple(property_keys):
if key not in json_data:
raise KeyError(f"key {key} is not in the data property file.")
properties[key] = json_data[key]
return properties
def check_missing_files(
datalist: List[Dict],
keys: KeysCollection,
root_dir: Optional[Union[Path, str]] = None,
allow_missing_keys: bool = False,
):
"""Checks whether some files in the Decathlon datalist are missing.
It would be helpful to check missing files before a heavy training run.
Args:
datalist: a list of data items, every item is a dictionary.
ususally generated by `load_decathlon_datalist` API.
keys: expected keys to check in the datalist.
root_dir: if not None, provides the root dir for the relative file paths in `datalist`.
allow_missing_keys: whether allow missing keys in the datalist items.
if False, raise exception if missing. default to False.
Returns:
A list of missing filenames.
"""
missing_files = []
for item in datalist:
for k in ensure_tuple(keys):
if k not in item:
if not allow_missing_keys:
raise ValueError(f"key `{k}` is missing in the datalist item: {item}")
continue
for f in ensure_tuple(item[k]):
if not isinstance(f, (str, Path)):
raise ValueError(f"filepath of key `{k}` must be a string or a list of strings, but got: {f}.")
if isinstance(root_dir, (str, Path)):
f = os.path.join(root_dir, f)
if not os.path.exists(f):
missing_files.append(f)
return missing_files
def create_cross_validation_datalist(
datalist: List[Dict],
nfolds: int,
train_folds: Union[Sequence[int], int],
val_folds: Union[Sequence[int], int],
train_key: str = "training",
val_key: str = "validation",
filename: Optional[Union[Path, str]] = None,
shuffle: bool = True,
seed: int = 0,
check_missing: bool = False,
keys: Optional[KeysCollection] = None,
root_dir: Optional[str] = None,
allow_missing_keys: bool = False,
raise_error: bool = True,
):
"""
Utility to create new Decathlon style datalist based on cross validation partition.
Args:
datalist: loaded list of dictionaries for all the items to partition.
nfolds: number of the kfold split.
train_folds: indices of folds for training part.
val_folds: indices of folds for validation part.
train_key: the key of train part in the new datalist, defaults to "training".
val_key: the key of validation part in the new datalist, defaults to "validation".
filename: if not None and ends with ".json", save the new datalist into JSON file.
shuffle: whether to shuffle the datalist before partition, defaults to `True`.
seed: if `shuffle` is True, set the random seed, defaults to `0`.
check_missing: whether to check all the files specified by `keys` are existing.
keys: if not None and check_missing_files is True, the expected keys to check in the datalist.
root_dir: if not None, provides the root dir for the relative file paths in `datalist`.
allow_missing_keys: if check_missing_files is `True`, whether allow missing keys in the datalist items.
if False, raise exception if missing. default to False.
raise_error: when found missing files, if `True`, raise exception and stop, if `False`, print warining.
"""
if check_missing and keys is not None:
files = check_missing_files(datalist, keys, root_dir, allow_missing_keys)
if files:
msg = f"some files of the datalist are missing: {files}"
if raise_error:
raise ValueError(msg)
warnings.warn(msg)
data = partition_dataset(data=datalist, num_partitions=nfolds, shuffle=shuffle, seed=seed)
train_list = select_cross_validation_folds(partitions=data, folds=train_folds)
val_list = select_cross_validation_folds(partitions=data, folds=val_folds)
ret = {train_key: train_list, val_key: val_list}
if isinstance(filename, (str, Path)):
json.dump(ret, open(filename, "w"), indent=4)
return ret
| 39.884
| 115
| 0.667536
|
2b4f28a5015e0a1c8214f3c80389e414ab7f4f0b
| 7,315
|
py
|
Python
|
xp/tests/var_expansion.py
|
druths/xp
|
a4f66ae3551fc0b5c66ece816a8276bd7b7e3ccf
|
[
"Apache-2.0"
] | 46
|
2016-03-05T22:56:04.000Z
|
2021-12-27T22:25:08.000Z
|
xp/tests/var_expansion.py
|
druths/xp
|
a4f66ae3551fc0b5c66ece816a8276bd7b7e3ccf
|
[
"Apache-2.0"
] | 15
|
2016-03-05T04:19:00.000Z
|
2021-01-29T05:00:24.000Z
|
xp/tests/var_expansion.py
|
druths/xp
|
a4f66ae3551fc0b5c66ece816a8276bd7b7e3ccf
|
[
"Apache-2.0"
] | 11
|
2016-02-22T19:38:41.000Z
|
2021-11-06T00:55:01.000Z
|
"""
Copyright 2016 Derek Ruths
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from xp.pipeline import get_pipeline, expand_variables, PIPELINE_PREFIX_VARNAME, ParseException, USE_FILE_PREFIX
import xp.pipeline as pipeline
import os, os.path
import shutil
BASE_PATH = os.path.dirname(__file__)
def get_complete_filename(fname):
return os.path.join(BASE_PATH,'pipelines',fname)
class InnerVarExpansionTestCase(unittest.TestCase):
"""
This tests the functionality of the expand_variables function.
"""
def test_basic1(self):
context = {'var1':'hello'}
cwd = '.'
exval = expand_variables('$var1.txt',context,cwd,None,None,-1)
self.assertEquals(exval,'hello.txt')
exval = expand_variables('${var1}.txt',context,cwd,None,None,-1)
self.assertEquals(exval,'hello.txt')
exval = expand_variables('hello_$var1.txt',context,cwd,None,None,-1)
self.assertEquals(exval,'hello_hello.txt')
def test_multvars1(self):
context = {'var1':'hello', 'foobar':'test'}
cwd = '.'
exval = expand_variables('${var1}_$foobar.txt',context,cwd,None,None,-1)
self.assertEquals(exval,'hello_test.txt')
exval = expand_variables('${var1}_${foobar}.txt',context,cwd,None,None,-1)
self.assertEquals(exval,'hello_test.txt')
exval = expand_variables('echo $var1 $foobar',context,cwd,None,None,-1)
self.assertEquals(exval,'echo hello test')
def test_pipeline_fxn1(self):
context = {'var1':'hello', PIPELINE_PREFIX_VARNAME:'/foo/bar_'}
cwd = '.'
exval = expand_variables('touch $PLN(test1.txt)',context,cwd,None,None,-1)
self.assertEquals(exval,'touch /foo/bar_test1.txt')
exval = expand_variables('touch $PLN($var1.txt)',context,cwd,None,None,-1)
self.assertEquals(exval,'touch /foo/bar_hello.txt')
exval = expand_variables('touch $PLN(${var1}.txt)',context,cwd,None,None,-1)
self.assertEquals(exval,'touch /foo/bar_hello.txt')
def test_shell_fxn1(self):
context = {'var1':'hello'}
cwd = '.'
exval = expand_variables('touch $(echo hi)',context,cwd,None,None,-1)
self.assertEquals(exval,'touch hi')
def test_shell_fxn_newline_error1(self):
context = {'var1':'hello'}
cwd = '.'
raised_exc = False
try:
expand_variables('touch $(ls None,-1)',context,cwd,None,-1)
except Exception:
raised_exc = True
self.assertTrue(raised_exc,'inline shell functions should fail on output containing newlines')
def test_escape1(self):
context = {'var1':'hello'}
cwd = '.'
exval = expand_variables('\$var1.txt',context,cwd,None,None,-1)
self.assertEquals(exval,'$var1.txt')
exval = expand_variables('\${var1.txt',context,cwd,None,None,-1)
self.assertEquals(exval,'${var1.txt')
exval = expand_variables('\${var1}.txt',context,cwd,None,None,-1)
self.assertEquals(exval,'${var1}.txt')
exval = expand_variables('hello_$var1.txt',context,cwd,None,None,-1)
self.assertEquals(exval,'hello_hello.txt')
def test_invalid_escape1(self):
context = {'var1':'hello'}
cwd = '.'
try:
exval = expand_variables(r'\n',context,cwd,None,None,-1)
self.fail('\\n should raise a ParseException')
except ParseException as e:
pass
except:
self.fail('\\n should raise a ParseException')
def test_grep_error(self):
cmd = """grep -o '/article/[^"]+' $PLN(lists_html/\$TAG.*) > $PLN(article_lists/\$TAG.txt)"""
cwd = '.'
context = {PIPELINE_PREFIX_VARNAME:'/foo/bar_'}
exval = expand_variables(cmd,context,cwd,None,None,-1)
# this should get here without an exception...
class VarExpansionTestCase(unittest.TestCase):
def test_varexpand1(self):
"""
This test case checks:
- basic variable expansion
- basic pipeline filename expansion
- nested pipeline filename expansion
"""
p = get_pipeline(get_complete_filename('varexpand1'),default_prefix=USE_FILE_PREFIX)
p.unmark_all_tasks(recur=True)
p.run()
# check the output
self.assertTrue(os.path.exists(get_complete_filename('hello_world.txt')))
self.assertTrue(os.path.exists(get_complete_filename('varexpand1_pln_expand1.txt')))
self.assertTrue(os.path.exists(get_complete_filename('varexpand1_hello.txt')))
self.assertTrue(os.path.exists(get_complete_filename('varexpand1_pytest.dat')))
os.remove(get_complete_filename('hello_world.txt'))
os.remove(get_complete_filename('varexpand1_pln_expand1.txt'))
os.remove(get_complete_filename('varexpand1_hello.txt'))
os.remove(get_complete_filename('varexpand1_pytest.dat'))
p.unmark_all_tasks(recur=True)
def test_used_pln_expand1(self):
"""
This test and pln_expand2 (the next one) concerns the rather nuanced interpretation of
using another pipeline. Only those tasks in the used pipeline are invoked that are in
the dependency chain for the tasks in the present pipeline.
"""
p = get_pipeline(get_complete_filename('sdir_prefix2'),default_prefix=USE_FILE_PREFIX)
p.unmark_all_tasks(recur=True)
p.run()
# check the output
self.assertTrue(os.path.exists(get_complete_filename(os.path.join('sdir_foo2','bar','hello_world.txt'))))
shutil.rmtree(get_complete_filename('sdir_foo2'))
p.unmark_all_tasks(recur=True)
def test_used_pln_expand2(self):
p = get_pipeline(get_complete_filename('sdir_prefix3'),default_prefix=USE_FILE_PREFIX)
p.unmark_all_tasks(recur=True)
p.run()
# check the output
self.assertTrue(os.path.exists(get_complete_filename(os.path.join('sdir_foo2','bar','hello_world.txt'))))
self.assertTrue(os.path.exists(get_complete_filename(os.path.join('sdir_foo','bar','hello_world.txt'))))
shutil.rmtree(get_complete_filename('sdir_foo'))
shutil.rmtree(get_complete_filename('sdir_foo2'))
p.unmark_all_tasks(recur=True)
def test_plnref_varexpand1(self):
"""
This test case checks:
- basic variable expansion
- basic pipeline filename expansion
- nested pipeline filename expansion
"""
p = get_pipeline(get_complete_filename('plnref_varexpand1'),default_prefix=USE_FILE_PREFIX)
context = p.get_context()
self.assertTrue(context['VAR1'],'test')
self.assertTrue(context['VAR2'],'varexpand1_test_xyz')
| 36.758794
| 113
| 0.658783
|
6ac0a7f41c0ab8be5494b68086c795f0d67364cf
| 1,802
|
py
|
Python
|
lesson2/13Strings.py
|
katyduncan/pythonintro
|
facf3c623a0634b3cf1e117959aba810b126aeed
|
[
"MIT"
] | null | null | null |
lesson2/13Strings.py
|
katyduncan/pythonintro
|
facf3c623a0634b3cf1e117959aba810b126aeed
|
[
"MIT"
] | null | null | null |
lesson2/13Strings.py
|
katyduncan/pythonintro
|
facf3c623a0634b3cf1e117959aba810b126aeed
|
[
"MIT"
] | null | null | null |
print("hello")
print('hello')
welcome_message = "Hello, welcome to Udacity!"
print(welcome_message)
# pet_halibut = "Why should I be tarred with the epithet "loony" merely because I have a pet halibut?""
# SyntaxError: invalid syntax
pet_halibut = 'Why should I be tarred with the epithet "loony" merely because I have a pet halibut?'
salesman = '"I think you\'re an encyclopedia salesman"'
this_string = "Simon's skateboard is in the garage."
print(this_string)
# Combine strings with +
first_word = "Hello"
second_word = "There"
print(first_word + " " + second_word)
print(first_word[0])
print(first_word[1])
# Repeat strings with *
word = "Hello"
print(word * 5)
# Length function
udacity_length = len("Udacity")
print(udacity_length)
print(len("ababa") / len("ab"))
# QUIZ
# TODO: Fix this string!
# ford_quote = 'Whether you think you can, or you think you can't--you're right.'
ford_quote = 'Whether you think you can, or you think you can\'t--you\'re right.'
# TODO: print a log message using the variables above.
# The message should have the same format as this one:
# "Yogesh accessed the site http://petshop.com/pets/reptiles/pythons at 16:20."
username = "Kinari"
timestamp = "04:50"
url = "http://petshop.com/pets/mammals/cats"
message = username + " accessed the site " + url + " at " + timestamp + "."
print(message)
# Quiz len()
given_name = "William"
middle_names = "Bradley"
family_name = "Pitt"
#todo: calculate how long this name is
full_name = given_name + " " + middle_names + " " + family_name
name_length = len(full_name)
print (name_length)
# Now we check to make sure that the name fits within the driving license character limit
# Nothing you need to do here
driving_license_character_limit = 28
print(name_length <= driving_license_character_limit)
len(835)
| 27.723077
| 103
| 0.72919
|
881f6050a73d1923bde296c36e06c49823cae75c
| 13,682
|
py
|
Python
|
cls_model_zoo/densenet.py
|
MaybeShewill-CV/image-classification-tensorflow
|
1587fa7acfaba6d33fb07c2c25248570c5d41927
|
[
"MIT"
] | 4
|
2021-03-24T13:39:10.000Z
|
2021-12-16T16:49:22.000Z
|
cls_model_zoo/densenet.py
|
MaybeShewill-CV/image-classification-tensorflow
|
1587fa7acfaba6d33fb07c2c25248570c5d41927
|
[
"MIT"
] | null | null | null |
cls_model_zoo/densenet.py
|
MaybeShewill-CV/image-classification-tensorflow
|
1587fa7acfaba6d33fb07c2c25248570c5d41927
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021/3/25 下午1:25
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/image-classification-tensorflow
# @File : densenet.py
# @IDE: PyCharm
"""
DenseNet model
"""
import time
import numpy as np
import tensorflow as tf
from cls_model_zoo import cnn_basenet
from cls_model_zoo import loss
from local_utils import config_utils
class DenseNet(cnn_basenet.CNNBaseModel):
"""
densenet model for image classification
"""
def __init__(self, phase, cfg):
"""
:param phase:
:param cfg:
"""
super(DenseNet, self).__init__()
self._cfg = cfg
self._phase = phase
self._is_training = self._is_net_for_training()
self._block_nums = self._cfg.MODEL.DENSENET.BLOCK_NUMS
self._densenet_size = cfg.MODEL.DENSENET.NET_SIZE
self._block_depths = self._get_block_sizes()
self._growth_rate = self._cfg.MODEL.DENSENET.GROWTH_RATE
self._with_bc = self._cfg.MODEL.DENSENET.ENABLE_BC
self._bc_theta = self._cfg.MODEL.DENSENET.BC_THETA
self._composite_channels = self._cfg.MODEL.DENSENET.COMPOSITE_CHANNELS
self._loss_type = self._cfg.SOLVER.LOSS_TYPE.lower()
self._loss_func = getattr(loss, '{:s}_loss'.format(self._loss_type))
self._class_nums = self._cfg.DATASET.NUM_CLASSES
self._weights_decay = self._cfg.SOLVER.WEIGHT_DECAY
self._enable_dropout = self._cfg.TRAIN.DROPOUT.ENABLE
if self._enable_dropout:
self._dropout_keep_prob = self._cfg.TRAIN.DROPOUT.KEEP_PROB
self._enable_label_smooth = self._cfg.TRAIN.LABEL_SMOOTH.ENABLE
if self._enable_label_smooth:
self._smooth_value = self._cfg.TRAIN.LABEL_SMOOTH.SMOOTH_VALUE
else:
self._smooth_value = 0.0
def __str__(self):
"""
:return:
"""
encoder_info = 'A densenet with net size: {:d} block nums: ' \
'{:d} growth rate: {:d} block depth: {:d}'. \
format(self._densenet_size, self._block_nums, self._growth_rate, self._block_depths)
return encoder_info
def _is_net_for_training(self):
"""
if the net is used for training or not
:return:
"""
if isinstance(self._phase, tf.Tensor):
phase = self._phase
else:
phase = tf.constant(self._phase, dtype=tf.string)
return tf.equal(phase, tf.constant('train', dtype=tf.string))
def _get_block_sizes(self):
"""
:return:
"""
densenet_name = 'densenet-{:d}'.format(self._densenet_size)
block_sizes = {
'densenet-121': [6, 12, 24, 16],
'densenet-169': [6, 12, 32, 32],
'densenet-201': [6, 12, 48, 32],
'densenet-264': [6, 12, 64, 48],
}
try:
return block_sizes[densenet_name]
except KeyError:
raise RuntimeError('Wrong densenet name, only '
'[densenet-18, densenet-34, densenet-50, '
'densenet-101, densenet-152] supported')
def _densenet_conv_block(self, input_tensor, k_size, output_channels, name,
stride=1, padding='VALID', need_bn=True, use_bias=False):
"""
:param input_tensor:
:param k_size:
:param output_channels:
:param name:
:param stride:
:param padding:
:param need_bn:
:param use_bias:
:return:
"""
with tf.variable_scope(name_or_scope=name):
result = self.conv2d(
inputdata=input_tensor,
out_channel=output_channels,
kernel_size=k_size,
padding=padding,
stride=stride,
use_bias=use_bias,
name='conv'
)
if need_bn:
result = self.layerbn(inputdata=result, is_training=self._is_training, name='bn', scale=False)
result = self.relu(inputdata=result, name='densenet_conv_block_output')
return result
def _composite_conv(self, inputdata, out_channel, name):
"""
Implement the composite function mentioned in DenseNet paper
:param inputdata:
:param out_channel:
:param name:
:return:
"""
with tf.variable_scope(name):
output = self.layerbn(inputdata=inputdata, is_training=self._is_training, name='bn_1')
output = self.relu(output, name='relu_1')
if self._with_bc:
output = self.conv2d(
inputdata=output, out_channel=self._composite_channels,
kernel_size=1, padding='SAME', stride=1, use_bias=False, name='conv_1'
)
output = self.layerbn(inputdata=output, is_training=self._is_training, name='bn_2')
output = self.relu(inputdata=output, name='relu_2')
output = self.conv2d(
inputdata=output, out_channel=out_channel,
kernel_size=3, stride=1, padding='SAME', use_bias=False, name='conv_2')
else:
output = self.conv2d(
inputdata=output, out_channel=out_channel, kernel_size=3,
stride=1, padding='SAME', use_bias=False, name='conv_2')
return output
def _denseconnect_layers(self, inputdata, name):
"""
Mainly implement the equation (2) in DenseNet paper concatenate the
dense block feature maps
:param inputdata:
:param name:
:return:
"""
with tf.variable_scope(name):
conv_out = self._composite_conv(
inputdata=inputdata,
name='composite_conv',
out_channel=self._growth_rate
)
concate_cout = tf.concat(
values=[conv_out, inputdata], axis=3, name='concatenate'
)
return concate_cout
def _transition_layers(self, inputdata, name):
"""
Mainly implement the Pooling layer mentioned in DenseNet paper
:param inputdata:
:param name:
:return:
"""
input_channels = inputdata.get_shape().as_list()[3]
with tf.variable_scope(name):
# First batch norm
output = self.layerbn(inputdata=inputdata, is_training=self._is_training, name='bn')
# Second 1*1 conv
if self._with_bc:
out_channels = int(input_channels * self._bc_theta)
else:
out_channels = input_channels
output = self.conv2d(
inputdata=output,
out_channel=out_channels,
kernel_size=1,
stride=1,
use_bias=False,
name='conv'
)
# Third average pooling
output = self.avgpooling(
inputdata=output,
kernel_size=2,
stride=2,
name='avgpool'
)
return output
def _dense_block(self, inputdata, name, block_depth):
"""
Mainly implement the dense block mentioned in DenseNet figure 1
:param inputdata:
:param name:
:param block_depth:
:return:
"""
block_input = inputdata
with tf.variable_scope(name):
for i in range(block_depth):
block_layer_name = '{:s}_layer_{:d}'.format(name, i + 1)
block_input = self._denseconnect_layers(
inputdata=block_input,
name=block_layer_name
)
return block_input
def _build_net(self, input_tensor, name, reuse=False):
"""
:param input_tensor:
:param name:
:param reuse:
:return:
"""
with tf.variable_scope(name_or_scope=name, reuse=reuse):
conv1 = self._densenet_conv_block(
input_tensor=input_tensor,
k_size=7,
output_channels=2 * self._growth_rate,
stride=2,
padding='SAME',
need_bn=True,
use_bias=False,
name='conv1'
)
max_pool1 = self.maxpooling(inputdata=conv1, kernel_size=3, stride=2, padding='SAME', name='max_pool1')
dense_block_input = max_pool1
# Second apply dense block stage
for dense_block_nums in range(self._block_nums):
dense_block_name = 'Dense_Block_{:d}'.format(dense_block_nums + 1)
# dense connectivity
dense_block_out = self._dense_block(
inputdata=dense_block_input,
name=dense_block_name,
block_depth=self._block_depths[dense_block_nums]
)
if dense_block_nums == self._block_nums - 1:
break
# apply the trainsition part
dense_block_out = self._transition_layers(
inputdata=dense_block_out,
name=dense_block_name
)
dense_block_input = dense_block_out
output_tensor = self.globalavgpooling(
inputdata=dense_block_out,
name='global_average_pooling'
)
if self._enable_dropout:
output_tensor = tf.cond(
self._is_training,
true_fn=lambda: self.dropout(
inputdata=output_tensor,
keep_prob=self._dropout_keep_prob,
name='dropout_train'
),
false_fn=lambda: tf.identity(output_tensor, name='dropout_test')
)
output_tensor = self.fullyconnect(
inputdata=output_tensor,
out_dim=self._class_nums,
name='final_logits'
)
return output_tensor
def inference(self, input_tensor, name, reuse=False):
"""
:param input_tensor:
:param name:
:param reuse:
:return:
"""
logits = self._build_net(
input_tensor=input_tensor,
name=name,
reuse=reuse
)
return logits
def compute_loss(self, input_tensor, label, name, reuse=False):
"""
:param input_tensor:
:param label:
:param name:
:param reuse:
:return:
"""
logits = self._build_net(
input_tensor=input_tensor,
name=name,
reuse=reuse
)
with tf.variable_scope('densenet_loss', reuse=reuse):
ret = self._loss_func(
logits=logits,
label_tensor=label,
weight_decay=self._weights_decay,
l2_vars=tf.trainable_variables(),
use_label_smooth=self._enable_label_smooth,
lb_smooth_value=self._smooth_value,
class_nums=self._class_nums,
)
return ret
def get_model(phase, cfg):
"""
:param phase:
:param cfg:
:return:
"""
return DenseNet(phase=phase, cfg=cfg)
def _stats_graph(graph):
"""
:param graph:
:return:
"""
flops = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
params = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
print('FLOPs: {}; Trainable params: {}'.format(flops.total_float_ops, params.total_parameters))
return
def _inference_time_profile():
"""
:return:
"""
tf.reset_default_graph()
cfg = config_utils.get_config(config_file_path='./config/ilsvrc_2012_densenet.yaml')
test_input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 224, 224, 3], name='test_input')
test_label_tensor = tf.placeholder(dtype=tf.int32, shape=[1], name='test_label')
model = get_model(phase='train', cfg=cfg)
test_result = model.compute_loss(
input_tensor=test_input_tensor,
label=test_label_tensor,
name='DenseNet',
reuse=False
)
tmp_logits = model.inference(input_tensor=test_input_tensor, name='DenseNet', reuse=True)
print(test_result)
print(tmp_logits)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
test_input = np.random.random((1, 224, 224, 3)).astype(np.float32)
t_start = time.time()
loop_times = 5000
for i in range(loop_times):
_ = sess.run(tmp_logits, feed_dict={test_input_tensor: test_input})
t_cost = time.time() - t_start
print('Cost time: {:.5f}s'.format(t_cost / loop_times))
print('Inference time: {:.5f} fps'.format(loop_times / t_cost))
print('Complete')
def _model_profile():
"""
:return:
"""
tf.reset_default_graph()
cfg = config_utils.get_config(config_file_path='./config/ilsvrc_2012_densenet.yaml')
test_input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 224, 224, 3], name='test_input')
model = get_model(phase='test', cfg=cfg)
_ = model.inference(input_tensor=test_input_tensor, name='DenseNet', reuse=False)
with tf.Session() as sess:
_stats_graph(sess.graph)
print('Complete')
if __name__ == '__main__':
"""
test code
"""
_model_profile()
_inference_time_profile()
| 32.732057
| 115
| 0.571115
|
7efe91aae3deedf2efdf46b70ade6a3bdf7749da
| 2,598
|
py
|
Python
|
solvate/__init__.py
|
michaltykac/SolvateAminoAcids
|
cde6364a1ab9f4188975e1e4ea7296d6655cc6a9
|
[
"BSD-3-Clause"
] | 1
|
2020-12-08T16:24:33.000Z
|
2020-12-08T16:24:33.000Z
|
solvate/__init__.py
|
michaltykac/SolvateAminoAcids
|
cde6364a1ab9f4188975e1e4ea7296d6655cc6a9
|
[
"BSD-3-Clause"
] | null | null | null |
solvate/__init__.py
|
michaltykac/SolvateAminoAcids
|
cde6364a1ab9f4188975e1e4ea7296d6655cc6a9
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: UTF-8 -*-
# \file __init__.py
# \brief This file initialises the package.
#
# This file firstly denotes this folder as containing python package and secondly it makes some of the solvate
# parts easily accessible.
#
# Copyright by the Authors and individual contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3) Neither the name of Michal Tykac nor the names of this code's contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# This software is provided by the copyright holder and contributors "as is" and any express or implied warranties, including, but not limitted to, the implied warranties of merchantibility and fitness for a particular purpose are disclaimed. In no event shall the copyright owner or the contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limitted to, procurement of substitute goods or services, loss of use, data or profits, or business interuption) however caused and on any theory of liability, whether in contract, strict liability or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.
#
# \author Michal Tykac
# \author Lada Biedermannová
# \author Jiří Černý
# \version 0.1.0
# \date DEC 2020
######################################################
from solvate.solvate_globals import globalSettings
from solvate.solvate_log import startLog, endLog
from solvate.solvate_structures import parseInputCoordinates
from solvate.solvate_structures import getAllFragmentFragments
from solvate.solvate_structures import getAllResidueFragments
from solvate.solvate_structures import combineAndAddWaters
from solvate.solvate_structures import writeOutStructures
from solvate.solvate_matchFragments import matchFragments
from solvate.solvate_predictWaters import predictWaters
from solvate.solvate_predictWaters import removeClashes
from solvate.solvate_predictWaters import clusterWaters
| 74.228571
| 760
| 0.789838
|
aa465ebf62cb9434237fe46c28253e3398e6213b
| 1,842
|
py
|
Python
|
tests/test_det_city_council.py
|
Anphisa/city-scrapers
|
d8daf6c7a8207efc209277c017faffda430b2ef3
|
[
"MIT"
] | null | null | null |
tests/test_det_city_council.py
|
Anphisa/city-scrapers
|
d8daf6c7a8207efc209277c017faffda430b2ef3
|
[
"MIT"
] | null | null | null |
tests/test_det_city_council.py
|
Anphisa/city-scrapers
|
d8daf6c7a8207efc209277c017faffda430b2ef3
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import pytest # noqa
from freezegun import freeze_time
from tests.utils import file_response
from city_scrapers.constants import COMMITTEE, TENTATIVE
from city_scrapers.spiders.det_city_council import DetCityCouncilSpider
freezer = freeze_time('2019-02-22')
freezer.start()
test_response = file_response(
'files/det_city_council.html',
url='https://detroitmi.gov/events/public-health-and-safety-standing-committee-02-25-19'
)
spider = DetCityCouncilSpider()
item = spider.parse_event_page(test_response)
freezer.stop()
def test_title():
assert item['title'] == 'Public Health and Safety Standing Committee'
def test_description():
assert item['description'] == ''
def test_start():
assert item['start'] == datetime(2019, 2, 25, 10, 0)
def test_end():
assert item['end'] == datetime(2019, 2, 25, 13, 0)
def test_time_notes():
assert item['time_notes'] == 'Estimated 3 hour duration'
def test_id():
assert item['id'
] == 'det_city_council/201902251000/x/public_health_and_safety_standing_committee'
def test_status():
assert item['status'] == TENTATIVE
def test_location():
assert item['location'] == {
'name': 'Committee of the Whole Room',
'address': '2 Woodward Avenue, Suite 1300 Detroit, MI 48226'
}
def test_source():
assert item[
'source'
] == 'https://detroitmi.gov/events/public-health-and-safety-standing-committee-02-25-19'
def test_links():
assert item['links'] == [{
'href':
'https://detroitmi.gov/sites/detroitmi.localhost/files/events/2019-02/cal%202-25-19%20PHS.pdf', # noqa
'title': 'cal 2-25-19 PHS.pdf'
}]
def test_all_day():
assert item['all_day'] is False
def test_classification():
assert item['classification'] == COMMITTEE
| 23.316456
| 115
| 0.689468
|
164ee4a5debd7665d2804744119f59832d1c50f7
| 4,774
|
py
|
Python
|
landbosse/model/Manager.py
|
jp5000/LandBOSSE
|
88b471fa71e0ee17a35ee8a2843a256b04483865
|
[
"Apache-2.0"
] | null | null | null |
landbosse/model/Manager.py
|
jp5000/LandBOSSE
|
88b471fa71e0ee17a35ee8a2843a256b04483865
|
[
"Apache-2.0"
] | null | null | null |
landbosse/model/Manager.py
|
jp5000/LandBOSSE
|
88b471fa71e0ee17a35ee8a2843a256b04483865
|
[
"Apache-2.0"
] | null | null | null |
import traceback
import math
from .ManagementCost import ManagementCost
from .FoundationCost import FoundationCost
from .SubstationCost import SubstationCost
from .GridConnectionCost import GridConnectionCost
from .SitePreparationCost import SitePreparationCost
from .CollectionCost import Cable, Array, ArraySystem
from .ErectionCost import ErectionCost
from .DevelopmentCost import DevelopmentCost
class Manager:
"""
The Manager class distributes input and output dictionaries among
the various modules. It maintains the hierarchical dictionary
structure.
"""
def __init__(self, input_dict, output_dict):
"""
This initializer sets up the instance variables of:
self.cost_modules: A list of cost module instances. Each of the
instances must implement the method input_output.
self.input_dict: A placeholder for the inputs dictionary
self.output_dict: A placeholder for the output dictionary
"""
self.input_dict = input_dict
self.output_dict = output_dict
def execute_landbosse(self, project_name):
try:
# Create weather window that will be used for all tasks (window for entire project; selected to restrict to seasons and hours specified)
weather_data_user_input = self.input_dict['weather_window']
season_construct = self.input_dict['season_construct']
time_construct = self.input_dict['time_construct']
daily_operational_hours = self.input_dict['hour_day'][time_construct]
# Filtered window. Restrict to the seasons and hours specified.
filtered_weather_window = weather_data_user_input.loc[(weather_data_user_input['Season'].isin(season_construct)) & (weather_data_user_input['Time window'] == time_construct)]
filtered_weather_window = filtered_weather_window[0:(math.ceil(self.input_dict['construct_duration'] * 30 * daily_operational_hours))]
# Rename weather data to specify types
self.input_dict['weather_window'] = filtered_weather_window
self.input_dict['weather_data_user_input'] = weather_data_user_input
foundation_cost = FoundationCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
foundation_cost.run_module()
roads_cost = SitePreparationCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
roads_cost.run_module()
substation_cost = SubstationCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
substation_cost.run_module()
transdist_cost = GridConnectionCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
transdist_cost.run_module()
collection_cost = ArraySystem(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
collection_cost.run_module()
development_cost = DevelopmentCost(input_dict=self.input_dict, output_dict=self.output_dict,
project_name=project_name)
development_cost.run_module()
erection_cost_output_dict = dict()
erection_cost = ErectionCost(
input_dict=self.input_dict,
output_dict=self.output_dict,
project_name=project_name
)
erection_cost.run_module()
self.output_dict['erection_cost'] = erection_cost_output_dict
total_costs = self.output_dict['total_collection_cost']
total_costs = total_costs.append(self.output_dict['total_road_cost'], sort=False)
total_costs = total_costs.append(self.output_dict['total_transdist_cost'], sort=False)
total_costs = total_costs.append(self.output_dict['total_substation_cost'], sort=False)
total_costs = total_costs.append(self.output_dict['total_foundation_cost'], sort=False)
total_costs = total_costs.append(self.output_dict['total_erection_cost'], sort=False)
total_costs = total_costs.append(self.output_dict['total_development_cost'],sort=False)
self.input_dict['project_value_usd'] = total_costs.sum(numeric_only=True)[0]
self.input_dict['foundation_cost_usd'] = self.output_dict['total_foundation_cost'].sum(numeric_only=True)[0]
management_cost = ManagementCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
management_cost.run_module()
return 0
except Exception:
traceback.print_exc()
return 1 # module did not run successfully
| 49.216495
| 186
| 0.711982
|
0113a7da0a7f538dcd9ddec7f84e0dcb1d625851
| 1,679
|
py
|
Python
|
warbler.py/analyze/2.0-audio.py
|
braycarlson/warbler.py
|
6746dc6479c9360811634c2d627606d788538d7e
|
[
"MIT"
] | null | null | null |
warbler.py/analyze/2.0-audio.py
|
braycarlson/warbler.py
|
6746dc6479c9360811634c2d627606d788538d7e
|
[
"MIT"
] | null | null | null |
warbler.py/analyze/2.0-audio.py
|
braycarlson/warbler.py
|
6746dc6479c9360811634c2d627606d788538d7e
|
[
"MIT"
] | 1
|
2022-03-31T21:44:03.000Z
|
2022-03-31T21:44:03.000Z
|
import numpy as np
import shutil
import matplotlib.pyplot as plt
from parameters import Parameters
from path import DATA, SEGMENT
from pathlib import Path
from scipy.io import wavfile
from spectrogram.axes import SpectrogramAxes
from spectrogram.plot import (
create_luscinia_spectrogram,
# create_spectrogram
)
from vocalseg.utils import (
butter_bandpass_filter,
int16tofloat32
)
# Print without truncation
np.set_printoptions(threshold=np.inf)
# Parameters
file = SEGMENT.joinpath('parameters.json')
parameters = Parameters(file)
def main():
parameters.n_fft = 4096
parameters.hop_length_ms = 1
parameters.win_length_ms = 5
parameters.ref_level_db = 50
parameters.pre = 0.97
parameters.min_level_db = -50
parameters.butter_lowcut = 1500
parameters.butter_highcut = 10000
# path = DATA.joinpath('LLbLg_STE2017/wav/STE01.1_LLbLg2017.wav')
path = DATA.joinpath('RbRY_STE2017/wav/STE01_RbRY2017.wav')
create_luscinia_spectrogram(path, parameters)
plt.savefig(
path.stem,
bbox_inches='tight',
pad_inches=0.5
)
plt.show()
plt.close()
rate, data = wavfile.read(path)
data = butter_bandpass_filter(
int16tofloat32(data),
parameters.butter_lowcut,
parameters.butter_highcut,
rate
)
original = 'original' + '_' + path.name
filtered = 'filtered' + '_' + path.name
Path(original).unlink(missing_ok=True)
Path(filtered).unlink(missing_ok=True)
# Copy the original .wav
shutil.copy(path, original)
# Save the filtered .wav
wavfile.write(filtered, rate, data)
if __name__ == '__main__':
main()
| 22.386667
| 69
| 0.702204
|
d8634d6c20ecfc527c1cd54fbee7e97302c6a7a9
| 2,363
|
py
|
Python
|
examples/example_asr.py
|
maciekszul/python-meegkit
|
a26e66b362d16c8865c13c488c588400719bae7d
|
[
"BSD-3-Clause"
] | 80
|
2018-02-13T13:51:09.000Z
|
2022-03-31T19:35:09.000Z
|
examples/example_asr.py
|
maciekszul/python-meegkit
|
a26e66b362d16c8865c13c488c588400719bae7d
|
[
"BSD-3-Clause"
] | 56
|
2019-03-13T14:55:42.000Z
|
2022-01-10T15:40:41.000Z
|
examples/example_asr.py
|
maciekszul/python-meegkit
|
a26e66b362d16c8865c13c488c588400719bae7d
|
[
"BSD-3-Clause"
] | 23
|
2018-06-29T07:24:19.000Z
|
2022-03-21T09:25:51.000Z
|
"""
ASR example
===========
Denoise data using Artifact Subspace Reconstruction.
Uses meegkit.ASR().
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from meegkit.asr import ASR
from meegkit.utils.matrix import sliding_window
# THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
raw = np.load(os.path.join('..', 'tests', 'data', 'eeg_raw.npy'))
sfreq = 250
###############################################################################
# Calibration and processing
# -----------------------------------------------------------------------------
# Train on a clean portion of data
asr = ASR(method='euclid')
train_idx = np.arange(0 * sfreq, 30 * sfreq, dtype=int)
_, sample_mask = asr.fit(raw[:, train_idx])
# Apply filter using sliding (non-overlapping) windows
X = sliding_window(raw, window=int(sfreq), step=int(sfreq))
Y = np.zeros_like(X)
for i in range(X.shape[1]):
Y[:, i, :] = asr.transform(X[:, i, :])
raw = X.reshape(8, -1) # reshape to (n_chans, n_times)
clean = Y.reshape(8, -1)
###############################################################################
# Plot the results
# -----------------------------------------------------------------------------
#
# Data was trained on a 40s window from 5s to 45s onwards (gray filled area).
# The algorithm then removes portions of this data with high amplitude
# artifacts before running the calibration (hatched area = good).
times = np.arange(raw.shape[-1]) / sfreq
f, ax = plt.subplots(8, sharex=True, figsize=(8, 5))
for i in range(8):
ax[i].fill_between(train_idx / sfreq, 0, 1, color='grey', alpha=.3,
transform=ax[i].get_xaxis_transform(),
label='calibration window')
ax[i].fill_between(train_idx / sfreq, 0, 1, where=sample_mask.flat,
transform=ax[i].get_xaxis_transform(),
facecolor='none', hatch='...', edgecolor='k',
label='selected window')
ax[i].plot(times, raw[i], lw=.5, label='before ASR')
ax[i].plot(times, clean[i], label='after ASR', lw=.5)
ax[i].set_ylim([-50, 50])
ax[i].set_ylabel(f'ch{i}')
ax[i].set_yticks([])
ax[i].set_xlabel('Time (s)')
ax[0].legend(fontsize='small', bbox_to_anchor=(1.04, 1), borderaxespad=0)
plt.subplots_adjust(hspace=0, right=0.75)
plt.suptitle('Before/after ASR')
plt.show()
| 35.80303
| 79
| 0.562844
|
77247a05d3243a9bc9e82e0852a5cf50bd457a0a
| 37,083
|
py
|
Python
|
great_expectations/execution_engine/sqlalchemy_execution_engine.py
|
vikramaditya91/great_expectations
|
4ebcdc0414bec3cf336b43cc54ca63bddb05bac3
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/execution_engine/sqlalchemy_execution_engine.py
|
vikramaditya91/great_expectations
|
4ebcdc0414bec3cf336b43cc54ca63bddb05bac3
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/execution_engine/sqlalchemy_execution_engine.py
|
vikramaditya91/great_expectations
|
4ebcdc0414bec3cf336b43cc54ca63bddb05bac3
|
[
"Apache-2.0"
] | null | null | null |
import copy
import datetime
import logging
import traceback
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from urllib.parse import urlparse
from packaging.version import parse as parse_version
from great_expectations._version import get_versions # isort:skip
__version__ = get_versions()["version"] # isort:skip
del get_versions # isort:skip
from great_expectations.core import IDDict
from great_expectations.core.batch import BatchMarkers, BatchSpec
from great_expectations.core.batch_spec import (
RuntimeDataBatchSpec,
RuntimeQueryBatchSpec,
SqlAlchemyDatasourceBatchSpec,
)
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.exceptions import (
DatasourceKeyPairAuthBadPassphraseError,
ExecutionEngineError,
GreatExpectationsError,
InvalidBatchSpecError,
InvalidConfigError,
)
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_batch_data import (
SqlAlchemyBatchData,
)
from great_expectations.expectations.row_conditions import parse_condition_to_sqlalchemy
from great_expectations.util import filter_properties_dict, import_library_module
from great_expectations.validator.validation_graph import MetricConfiguration
logger = logging.getLogger(__name__)
try:
import sqlalchemy as sa
except ImportError:
sa = None
try:
from sqlalchemy.engine import reflection
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import OperationalError
from sqlalchemy.sql import Select
from sqlalchemy.sql.elements import TextClause, quoted_name
except ImportError:
reflection = None
DefaultDialect = None
Select = None
TextClause = None
quoted_name = None
OperationalError = None
try:
import psycopg2
import sqlalchemy.dialects.postgresql.psycopg2 as sqlalchemy_psycopg2
except (ImportError, KeyError):
sqlalchemy_psycopg2 = None
try:
import sqlalchemy_redshift.dialect
except ImportError:
sqlalchemy_redshift = None
try:
import snowflake.sqlalchemy.snowdialect
if sa:
# Sometimes "snowflake-sqlalchemy" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
sa.dialects.registry.register("snowflake", "snowflake.sqlalchemy", "dialect")
except (ImportError, KeyError, AttributeError):
snowflake = None
try:
import pybigquery.sqlalchemy_bigquery
# Sometimes "pybigquery.sqlalchemy_bigquery" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
sa.dialects.registry.register(
"bigquery", "pybigquery.sqlalchemy_bigquery", "BigQueryDialect"
)
try:
getattr(pybigquery.sqlalchemy_bigquery, "INTEGER")
bigquery_types_tuple = None
except AttributeError:
# In older versions of the pybigquery driver, types were not exported, so we use a hack
logger.warning(
"Old pybigquery driver version detected. Consider upgrading to 0.4.14 or later."
)
from collections import namedtuple
BigQueryTypes = namedtuple(
"BigQueryTypes", sorted(pybigquery.sqlalchemy_bigquery._type_map)
)
bigquery_types_tuple = BigQueryTypes(**pybigquery.sqlalchemy_bigquery._type_map)
except (ImportError, AttributeError):
bigquery_types_tuple = None
pybigquery = None
def _get_dialect_type_module(dialect):
"""Given a dialect, returns the dialect type, which is defines the engine/system that is used to communicates
with the database/database implementation. Currently checks for RedShift/BigQuery dialects"""
if dialect is None:
logger.warning(
"No sqlalchemy dialect found; relying in top-level sqlalchemy types."
)
return sa
try:
# Redshift does not (yet) export types to top level; only recognize base SA types
if isinstance(dialect, sqlalchemy_redshift.dialect.RedshiftDialect):
return dialect.sa
except (TypeError, AttributeError):
pass
# Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple
try:
if (
isinstance(
dialect,
pybigquery.sqlalchemy_bigquery.BigQueryDialect,
)
and bigquery_types_tuple is not None
):
return bigquery_types_tuple
except (TypeError, AttributeError):
pass
return dialect
class SqlAlchemyExecutionEngine(ExecutionEngine):
def __init__(
self,
name=None,
credentials=None,
data_context=None,
engine=None,
connection_string=None,
url=None,
batch_data_dict=None,
create_temp_table=True,
**kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine
):
"""Builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to access the
desired database. Also initializes the dialect to be used and configures usage statistics.
Args:
name (str): \
The name of the SqlAlchemyExecutionEngine
credentials: \
If the Execution Engine is not provided, the credentials can be used to build the Execution
Engine. If the Engine is provided, it will be used instead
data_context (DataContext): \
An object representing a Great Expectations project that can be used to access Expectation
Suites and the Project Data itself
engine (Engine): \
A SqlAlchemy Engine used to set the SqlAlchemyExecutionEngine being configured, useful if an
Engine has already been configured and should be reused. Will override Credentials
if provided.
connection_string (string): \
If neither the engines nor the credentials have been provided, a connection string can be used
to access the data. This will be overridden by both the engine and credentials if those are
provided.
url (string): \
If neither the engines, the credentials, nor the connection_string have been provided,
a url can be used to access the data. This will be overridden by all other configuration
options if any are provided.
"""
super().__init__(name=name, batch_data_dict=batch_data_dict)
self._name = name
self._credentials = credentials
self._connection_string = connection_string
self._url = url
self._create_temp_table = create_temp_table
if engine is not None:
if credentials is not None:
logger.warning(
"Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. "
"Ignoring credentials."
)
self.engine = engine
elif credentials is not None:
self.engine = self._build_engine(credentials=credentials, **kwargs)
elif connection_string is not None:
self.engine = sa.create_engine(connection_string, **kwargs)
elif url is not None:
self.drivername = urlparse(url).scheme
self.engine = sa.create_engine(url, **kwargs)
else:
raise InvalidConfigError(
"Credentials or an engine are required for a SqlAlchemyExecutionEngine."
)
# Get the dialect **for purposes of identifying types**
if self.engine.dialect.name.lower() in [
"postgresql",
"mysql",
"sqlite",
"oracle",
"mssql",
]:
# These are the officially included and supported dialects by sqlalchemy
self.dialect_module = import_library_module(
module_name="sqlalchemy.dialects." + self.engine.dialect.name
)
elif self.engine.dialect.name.lower() == "snowflake":
self.dialect_module = import_library_module(
module_name="snowflake.sqlalchemy.snowdialect"
)
elif self.engine.dialect.name.lower() == "redshift":
self.dialect_module = import_library_module(
module_name="sqlalchemy_redshift.dialect"
)
elif self.engine.dialect.name.lower() == "bigquery":
self.dialect_module = import_library_module(
module_name="pybigquery.sqlalchemy_bigquery"
)
else:
self.dialect_module = None
if self.engine and self.engine.dialect.name.lower() in [
"sqlite",
"mssql",
"snowflake",
"mysql",
]:
# sqlite/mssql temp tables only persist within a connection so override the engine
self.engine = self.engine.connect()
# Send a connect event to provide dialect type
if data_context is not None and getattr(
data_context, "_usage_statistics_handler", None
):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="execution_engine.sqlalchemy.connect",
event_payload={
"anonymized_name": handler._execution_engine_anonymizer.anonymize(
self.name
),
"sqlalchemy_dialect": self.engine.name,
},
success=True,
)
# Gather the call arguments of the present function (and add the "class_name"), filter out the Falsy values,
# and set the instance "_config" variable equal to the resulting dictionary.
self._config = {
"name": name,
"credentials": credentials,
"data_context": data_context,
"engine": engine,
"connection_string": connection_string,
"url": url,
"batch_data_dict": batch_data_dict,
"module_name": self.__class__.__module__,
"class_name": self.__class__.__name__,
}
self._config.update(kwargs)
filter_properties_dict(properties=self._config, inplace=True)
@property
def credentials(self):
return self._credentials
@property
def connection_string(self):
return self._connection_string
@property
def url(self):
return self._url
def _build_engine(self, credentials, **kwargs) -> "sa.engine.Engine":
"""
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a
private key path.
"""
# Update credentials with anything passed during connection time
drivername = credentials.pop("drivername")
schema_name = credentials.pop("schema_name", None)
if schema_name is not None:
logger.warning(
"schema_name specified creating a URL with schema is not supported. Set a default "
"schema on the user connecting to your database."
)
create_engine_kwargs = kwargs
connect_args = credentials.pop("connect_args", None)
if connect_args:
create_engine_kwargs["connect_args"] = connect_args
if "private_key_path" in credentials:
options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url(
drivername, credentials
)
else:
options = sa.engine.url.URL(drivername, **credentials)
self.drivername = drivername
engine = sa.create_engine(options, **create_engine_kwargs)
return engine
def _get_sqlalchemy_key_pair_auth_url(
self, drivername: str, credentials: dict
) -> Tuple["sa.engine.url.URL", Dict]:
"""
Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided
values into a private key. If passphrase is incorrect, this will fail and an exception is raised.
Args:
drivername(str) - The name of the driver class
credentials(dict) - A dictionary of database credentials used to access the database
Returns:
a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs.
"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
private_key_path = credentials.pop("private_key_path")
private_key_passphrase = credentials.pop("private_key_passphrase")
with Path(private_key_path).expanduser().resolve().open(mode="rb") as key:
try:
p_key = serialization.load_pem_private_key(
key.read(),
password=private_key_passphrase.encode()
if private_key_passphrase
else None,
backend=default_backend(),
)
except ValueError as e:
if "incorrect password" in str(e).lower():
raise DatasourceKeyPairAuthBadPassphraseError(
datasource_name="SqlAlchemyDatasource",
message="Decryption of key failed, was the passphrase incorrect?",
) from e
else:
raise e
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
credentials_driver_name = credentials.pop("drivername", None)
create_engine_kwargs = {"connect_args": {"private_key": pkb}}
return (
sa.engine.url.URL(drivername or credentials_driver_name, **credentials),
create_engine_kwargs,
)
def get_compute_domain(
self,
domain_kwargs: Dict,
domain_type: Union[str, MetricDomainTypes],
accessor_keys: Optional[Iterable[str]] = None,
) -> Tuple[Select, dict, dict]:
"""Uses a given batch dictionary and domain kwargs to obtain a SqlAlchemy column object.
Args:
domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain
domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would
like to be using, or a corresponding string value representing it. String types include "identity",
"column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the
class MetricDomainTypes.
accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when
describing the domain and simply transferred with their associated values into accessor_domain_kwargs.
Returns:
SqlAlchemy column
"""
# Extracting value from enum if it is given for future computation
domain_type = MetricDomainTypes(domain_type)
batch_id = domain_kwargs.get("batch_id")
if batch_id is None:
# We allow no batch id specified if there is only one batch
if self.active_batch_data:
data_object = self.active_batch_data
else:
raise GreatExpectationsError(
"No batch is specified, but could not identify a loaded batch."
)
else:
if batch_id in self.loaded_batch_data_dict:
data_object = self.loaded_batch_data_dict[batch_id]
else:
raise GreatExpectationsError(
f"Unable to find batch with batch_id {batch_id}"
)
compute_domain_kwargs = copy.deepcopy(domain_kwargs)
accessor_domain_kwargs = dict()
if "table" in domain_kwargs and domain_kwargs["table"] is not None:
# TODO: Add logic to handle record_set_name once implemented
# (i.e. multiple record sets (tables) in one batch
if domain_kwargs["table"] != data_object.selectable.name:
selectable = sa.Table(
domain_kwargs["table"],
sa.MetaData(),
schema_name=data_object._schema_name,
)
else:
selectable = data_object.selectable
elif "query" in domain_kwargs:
raise ValueError(
"query is not currently supported by SqlAlchemyExecutionEngine"
)
else:
selectable = data_object.selectable
if (
"row_condition" in domain_kwargs
and domain_kwargs["row_condition"] is not None
):
condition_parser = domain_kwargs["condition_parser"]
if condition_parser == "great_expectations__experimental__":
parsed_condition = parse_condition_to_sqlalchemy(
domain_kwargs["row_condition"]
)
selectable = sa.select(
"*", from_obj=selectable, whereclause=parsed_condition
)
else:
raise GreatExpectationsError(
"SqlAlchemyExecutionEngine only supports the great_expectations condition_parser."
)
# Warning user if accessor keys are in any domain that is not of type table, will be ignored
if (
domain_type != MetricDomainTypes.TABLE
and accessor_keys is not None
and len(list(accessor_keys)) > 0
):
logger.warning(
"Accessor keys ignored since Metric Domain Type is not 'table'"
)
if domain_type == MetricDomainTypes.TABLE:
if accessor_keys is not None and len(list(accessor_keys)) > 0:
for key in accessor_keys:
accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key)
if len(domain_kwargs.keys()) > 0:
# Warn user if kwarg not "normal".
unexpected_keys: set = set(compute_domain_kwargs.keys()).difference(
{
"batch_id",
"table",
"row_condition",
"condition_parser",
}
)
if len(unexpected_keys) > 0:
unexpected_keys_str: str = ", ".join(
map(lambda element: f'"{element}"', unexpected_keys)
)
logger.warning(
f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type "{domain_type.value}".'
)
return selectable, compute_domain_kwargs, accessor_domain_kwargs
# If user has stated they want a column, checking if one is provided, and
elif domain_type == MetricDomainTypes.COLUMN:
if "column" in compute_domain_kwargs:
# Checking if case- sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column"] = quoted_name(
compute_domain_kwargs.pop("column")
)
else:
accessor_domain_kwargs["column"] = compute_domain_kwargs.pop(
"column"
)
else:
# If column not given
raise GreatExpectationsError(
"Column not provided in compute_domain_kwargs"
)
# Else, if column pair values requested
elif domain_type == MetricDomainTypes.COLUMN_PAIR:
# Ensuring column_A and column_B parameters provided
if (
"column_A" in compute_domain_kwargs
and "column_B" in compute_domain_kwargs
):
if self.active_batch_data.use_quoted_name:
# If case matters...
accessor_domain_kwargs["column_A"] = quoted_name(
compute_domain_kwargs.pop("column_A")
)
accessor_domain_kwargs["column_B"] = quoted_name(
compute_domain_kwargs.pop("column_B")
)
else:
accessor_domain_kwargs["column_A"] = compute_domain_kwargs.pop(
"column_A"
)
accessor_domain_kwargs["column_B"] = compute_domain_kwargs.pop(
"column_B"
)
else:
raise GreatExpectationsError(
"column_A or column_B not found within compute_domain_kwargs"
)
# Checking if table or identity or other provided, column is not specified. If it is, warning the user
elif domain_type == MetricDomainTypes.MULTICOLUMN:
if "columns" in compute_domain_kwargs:
# If columns exist
accessor_domain_kwargs["columns"] = compute_domain_kwargs.pop("columns")
# Filtering if identity
elif domain_type == MetricDomainTypes.IDENTITY:
# If we would like our data to become a single column
if "column" in compute_domain_kwargs:
if self.active_batch_data.use_quoted_name:
selectable = sa.select(
[sa.column(quoted_name(compute_domain_kwargs["column"]))]
).select_from(selectable)
else:
selectable = sa.select(
[sa.column(compute_domain_kwargs["column"])]
).select_from(selectable)
# If we would like our data to now become a column pair
elif ("column_A" in compute_domain_kwargs) and (
"column_B" in compute_domain_kwargs
):
if self.active_batch_data.use_quoted_name:
selectable = sa.select(
[
sa.column(quoted_name(compute_domain_kwargs["column_A"])),
sa.column(quoted_name(compute_domain_kwargs["column_B"])),
]
).select_from(selectable)
else:
selectable = sa.select(
[
sa.column(compute_domain_kwargs["column_A"]),
sa.column(compute_domain_kwargs["column_B"]),
]
).select_from(selectable)
else:
# If we would like our data to become a multicolumn
if "columns" in compute_domain_kwargs:
if self.active_batch_data.use_quoted_name:
# Building a list of column objects used for sql alchemy selection
to_select = [
sa.column(quoted_name(col))
for col in compute_domain_kwargs["columns"]
]
selectable = sa.select(to_select).select_from(selectable)
else:
to_select = [
sa.column(col) for col in compute_domain_kwargs["columns"]
]
selectable = sa.select(to_select).select_from(selectable)
# Letting selectable fall through
return selectable, compute_domain_kwargs, accessor_domain_kwargs
def resolve_metric_bundle(
self,
metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Any, dict, dict]],
) -> dict:
"""For every metric in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds
bundles of the metrics into one large query dictionary so that they are all executed simultaneously. Will fail
if bundling the metrics together is not possible.
Args:
metric_fn_bundle (Iterable[Tuple[MetricConfiguration, Callable, dict]): \
A Dictionary containing a MetricProvider's MetricConfiguration (its unique identifier), its metric provider function
(the function that actually executes the metric), and the arguments to pass to the metric provider function.
metrics (Dict[Tuple, Any]): \
A dictionary of metrics defined in the registry and corresponding arguments
Returns:
A dictionary of metric names and their corresponding now-queried values.
"""
resolved_metrics = dict()
# We need a different query for each domain (where clause).
queries: Dict[Tuple, dict] = dict()
for (
metric_to_resolve,
engine_fn,
compute_domain_kwargs,
accessor_domain_kwargs,
metric_provider_kwargs,
) in metric_fn_bundle:
if not isinstance(compute_domain_kwargs, IDDict):
compute_domain_kwargs = IDDict(compute_domain_kwargs)
domain_id = compute_domain_kwargs.to_id()
if domain_id not in queries:
queries[domain_id] = {
"select": [],
"ids": [],
"domain_kwargs": compute_domain_kwargs,
}
queries[domain_id]["select"].append(
engine_fn.label(metric_to_resolve.metric_name)
)
queries[domain_id]["ids"].append(metric_to_resolve.id)
for query in queries.values():
selectable, compute_domain_kwargs, _ = self.get_compute_domain(
query["domain_kwargs"], domain_type=MetricDomainTypes.IDENTITY.value
)
assert len(query["select"]) == len(query["ids"])
try:
res = self.engine.execute(
sa.select(query["select"]).select_from(selectable)
).fetchall()
logger.debug(
f"SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(compute_domain_kwargs).to_id()}"
)
except OperationalError as oe:
exception_message: str = "An SQL execution Exception occurred. "
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(oe).__name__}: "{str(oe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise ExecutionEngineError(message=exception_message)
assert (
len(res) == 1
), "all bundle-computed metrics must be single-value statistics"
assert len(query["ids"]) == len(
res[0]
), "unexpected number of metrics returned"
for idx, id in enumerate(query["ids"]):
resolved_metrics[id] = convert_to_json_serializable(res[0][idx])
return resolved_metrics
### Splitter methods for partitioning tables ###
def _split_on_whole_table(self, table_name: str, batch_identifiers: dict):
"""'Split' by returning the whole table"""
# return sa.column(column_name) == batch_identifiers[column_name]
return 1 == 1
def _split_on_column_value(
self, table_name: str, column_name: str, batch_identifiers: dict
):
"""Split using the values in the named column"""
return sa.column(column_name) == batch_identifiers[column_name]
def _split_on_converted_datetime(
self,
table_name: str,
column_name: str,
batch_identifiers: dict,
date_format_string: str = "%Y-%m-%d",
):
"""Convert the values in the named column to the given date_format, and split on that"""
return (
sa.func.strftime(
date_format_string,
sa.column(column_name),
)
== batch_identifiers[column_name]
)
def _split_on_divided_integer(
self, table_name: str, column_name: str, divisor: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
return (
sa.cast(sa.column(column_name) / divisor, sa.Integer)
== batch_identifiers[column_name]
)
def _split_on_mod_integer(
self, table_name: str, column_name: str, mod: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
return sa.column(column_name) % mod == batch_identifiers[column_name]
def _split_on_multi_column_values(
self, table_name: str, column_names: List[str], batch_identifiers: dict
):
"""Split on the joint values in the named columns"""
return sa.and_(
*[
sa.column(column_name) == column_value
for column_name, column_value in batch_identifiers.items()
]
)
def _split_on_hashed_column(
self,
table_name: str,
column_name: str,
hash_digits: int,
batch_identifiers: dict,
):
"""Split on the hashed value of the named column"""
return (
sa.func.right(sa.func.md5(sa.column(column_name)), hash_digits)
== batch_identifiers[column_name]
)
### Sampling methods ###
# _sample_using_limit
# _sample_using_random
# _sample_using_mod
# _sample_using_a_list
# _sample_using_md5
def _sample_using_random(
self,
p: float = 0.1,
):
"""Take a random sample of rows, retaining proportion p
Note: the Random function behaves differently on different dialects of SQL
"""
return sa.func.random() < p
def _sample_using_mod(
self,
column_name,
mod: int,
value: int,
):
"""Take the mod of named column, and only keep rows that match the given value"""
return sa.column(column_name) % mod == value
def _sample_using_a_list(
self,
column_name: str,
value_list: list,
):
"""Match the values in the named column against value_list, and only keep the matches"""
return sa.column(column_name).in_(value_list)
def _sample_using_md5(
self,
column_name: str,
hash_digits: int = 1,
hash_value: str = "f",
):
"""Hash the values in the named column, and split on that"""
return (
sa.func.right(
sa.func.md5(sa.cast(sa.column(column_name), sa.Text)), hash_digits
)
== hash_value
)
def _build_selectable_from_batch_spec(self, batch_spec) -> Union[Select, str]:
table_name: str = batch_spec["table_name"]
if "splitter_method" in batch_spec:
splitter_fn = getattr(self, batch_spec["splitter_method"])
split_clause = splitter_fn(
table_name=table_name,
batch_identifiers=batch_spec["batch_identifiers"],
**batch_spec["splitter_kwargs"],
)
else:
split_clause = True
if "sampling_method" in batch_spec:
if batch_spec["sampling_method"] == "_sample_using_limit":
# SQLalchemy's semantics for LIMIT are different than normal WHERE clauses,
# so the business logic for building the query needs to be different.
if self.engine.dialect.name.lower() == "oracle":
# limit doesn't compile properly for oracle so we will append rownum to query string later
raw_query = (
sa.select("*")
.select_from(
sa.table(
table_name, schema=batch_spec.get("schema_name", None)
)
)
.where(split_clause)
)
query = str(
raw_query.compile(
self.engine, compile_kwargs={"literal_binds": True}
)
)
query += "\nAND ROWNUM <= %d" % batch_spec["sampling_kwargs"]["n"]
return query
else:
return (
sa.select("*")
.select_from(
sa.table(
table_name, schema=batch_spec.get("schema_name", None)
)
)
.where(split_clause)
.limit(batch_spec["sampling_kwargs"]["n"])
)
else:
sampler_fn = getattr(self, batch_spec["sampling_method"])
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(
sa.and_(
split_clause,
sampler_fn(**batch_spec["sampling_kwargs"]),
)
)
)
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(split_clause)
)
def get_batch_data_and_markers(
self, batch_spec: BatchSpec
) -> Tuple[Any, BatchMarkers]:
if not isinstance(
batch_spec, (SqlAlchemyDatasourceBatchSpec, RuntimeQueryBatchSpec)
):
raise InvalidBatchSpecError(
f"""SqlAlchemyExecutionEngine accepts batch_spec only of type SqlAlchemyDatasourceBatchSpec or
RuntimeQueryBatchSpec (illegal type "{str(type(batch_spec))}" was received).
"""
)
batch_data: SqlAlchemyBatchData
batch_markers: BatchMarkers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
}
)
temp_table_name: Optional[str]
if "bigquery_temp_table" in batch_spec:
temp_table_name = batch_spec.get("bigquery_temp_table")
else:
temp_table_name = None
source_table_name = batch_spec.get("table_name", None)
source_schema_name = batch_spec.get("schema_name", None)
if isinstance(batch_spec, RuntimeQueryBatchSpec):
# query != None is already checked when RuntimeQueryBatchSpec is instantiated
query: str = batch_spec.query
batch_spec.query = "SQLQuery"
batch_data = SqlAlchemyBatchData(
execution_engine=self,
query=query,
temp_table_name=temp_table_name,
create_temp_table=batch_spec.get(
"create_temp_table", self._create_temp_table
),
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
elif isinstance(batch_spec, SqlAlchemyDatasourceBatchSpec):
if self.engine.dialect.name.lower() == "oracle":
selectable: str = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
else:
selectable: Select = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
batch_data = SqlAlchemyBatchData(
execution_engine=self,
selectable=selectable,
temp_table_name=temp_table_name,
create_temp_table=batch_spec.get(
"create_temp_table", self._create_temp_table
),
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
return batch_data, batch_markers
| 40.750549
| 136
| 0.590621
|
cb4d4e5b8f5ab2bb45d02cea8a41f97cfbadc7f8
| 5,223
|
py
|
Python
|
android_complex.py
|
govind794/Python-Appium-Automation
|
093165480332f0fc05dde679a859cee2feccdae1
|
[
"MIT"
] | null | null | null |
android_complex.py
|
govind794/Python-Appium-Automation
|
093165480332f0fc05dde679a859cee2feccdae1
|
[
"MIT"
] | null | null | null |
android_complex.py
|
govind794/Python-Appium-Automation
|
093165480332f0fc05dde679a859cee2feccdae1
|
[
"MIT"
] | null | null | null |
import os
import unittest
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from appium.webdriver.common.multi_action import MultiAction
from time import sleep
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
class ComplexAndroidTests(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '4.2'
desired_caps['deviceName'] = 'emulator-5554'
desired_caps['app'] = PATH(
'../../..//AppiumSandbox/app/com.aefyr.sai_2020-06-19.apk'
)
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
self.driver.quit()
def test_find_elements(self):
# pause a moment, so xml generation can occur
sleep(2)
els = self.driver.find_elements_by_xpath('//android.widget.TextView')
self.assertEqual('Split APKs Installer', els[0].text)
el = self.driver.find_element_by_xpath('//android.widget.TextView[contains(@text, "Animat")]')
self.assertEqual('Animation', el.text)
el = self.driver.find_element_by_accessibility_id("App")
el.click()
els = self.driver.find_elements_by_android_uiautomator('new UiSelector().clickable(true)')
# there are more, but at least 10 visible
self.assertLess(10, len(els))
# the list includes 2 before the main visible elements
self.assertEqual('Action Bar', els[2].text)
els = self.driver.find_elements_by_xpath('//android.widget.TextView')
self.assertLess(10, len(els))
self.assertEqual('Action Bar', els[1].text)
def test_scroll(self):
sleep(2)
els = self.driver.find_elements_by_xpath('android.widget.Button')
self.driver.scroll(els[7], els[3])
el = self.driver.find_element_by_accessibility_id('Views')
def test_smiley_face(self):
# just for the fun of it.
# this doesn't really assert anything.
self.driver.find_element_by_accessibility_id('Graphics').click()
els = self.driver.find_elements_by_class_name('android.widget.TextView')
self.driver.scroll(els[len(els)-1], els[0])
el = None
try:
el = self.driver.find_element_by_accessibility_id('Touch Paint')
except Exception as e:
els = self.driver.find_elements_by_class_name('android.widget.TextView')
self.driver.scroll(els[len(els)-1], els[0])
if el is None:
el = self.driver.find_element_by_accessibility_id('Touch Paint')
el.click()
# paint
e1 = TouchAction()
e1.press(x=150, y=100).release()
e2 = TouchAction()
e2.press(x=250, y=100).release()
smile = TouchAction()
smile.press(x=110, y=200) \
.move_to(x=1, y=1) \
.move_to(x=1, y=1) \
.move_to(x=1, y=1) \
.move_to(x=1, y=1) \
.move_to(x=1, y=1) \
.move_to(x=2, y=1) \
.move_to(x=2, y=1) \
.move_to(x=2, y=1) \
.move_to(x=2, y=1) \
.move_to(x=2, y=1) \
.move_to(x=3, y=1) \
.move_to(x=3, y=1) \
.move_to(x=3, y=1) \
.move_to(x=3, y=1) \
.move_to(x=3, y=1) \
.move_to(x=4, y=1) \
.move_to(x=4, y=1) \
.move_to(x=4, y=1) \
.move_to(x=4, y=1) \
.move_to(x=4, y=1) \
.move_to(x=5, y=1) \
.move_to(x=5, y=1) \
.move_to(x=5, y=1) \
.move_to(x=5, y=1) \
.move_to(x=5, y=1) \
.move_to(x=5, y=0) \
.move_to(x=5, y=0) \
.move_to(x=5, y=0) \
.move_to(x=5, y=0) \
.move_to(x=5, y=0) \
.move_to(x=5, y=0) \
.move_to(x=5, y=0) \
.move_to(x=5, y=0) \
.move_to(x=5, y=-1) \
.move_to(x=5, y=-1) \
.move_to(x=5, y=-1) \
.move_to(x=5, y=-1) \
.move_to(x=5, y=-1) \
.move_to(x=4, y=-1) \
.move_to(x=4, y=-1) \
.move_to(x=4, y=-1) \
.move_to(x=4, y=-1) \
.move_to(x=4, y=-1) \
.move_to(x=3, y=-1) \
.move_to(x=3, y=-1) \
.move_to(x=3, y=-1) \
.move_to(x=3, y=-1) \
.move_to(x=3, y=-1) \
.move_to(x=2, y=-1) \
.move_to(x=2, y=-1) \
.move_to(x=2, y=-1) \
.move_to(x=2, y=-1) \
.move_to(x=2, y=-1) \
.move_to(x=1, y=-1) \
.move_to(x=1, y=-1) \
.move_to(x=1, y=-1) \
.move_to(x=1, y=-1) \
.move_to(x=1, y=-1)
smile.release()
ma = MultiAction(self.driver)
ma.add(e1, e2, smile)
ma.perform()
# so you can see it
sleep(10)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ComplexAndroidTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 32.64375
| 102
| 0.529198
|
7e4d7f3b009cccdbe2d7489cc9b3f8c5c3da1723
| 10,226
|
py
|
Python
|
snorkel/learning/structure/synthetic.py
|
Conengmo/snorkel
|
36868e8a84de19b94e1c4b8eceaa64969a61a46b
|
[
"Apache-2.0"
] | 30
|
2019-08-22T19:27:59.000Z
|
2022-03-13T22:03:15.000Z
|
snorkel/learning/structure/synthetic.py
|
Conengmo/snorkel
|
36868e8a84de19b94e1c4b8eceaa64969a61a46b
|
[
"Apache-2.0"
] | 2
|
2019-08-22T16:51:58.000Z
|
2022-03-21T02:59:18.000Z
|
snorkel/learning/structure/synthetic.py
|
Conengmo/snorkel
|
36868e8a84de19b94e1c4b8eceaa64969a61a46b
|
[
"Apache-2.0"
] | 31
|
2019-08-22T19:28:08.000Z
|
2022-03-23T12:50:49.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from numbskull import NumbSkull
from numbskull.inference import FACTORS
from numbskull.numbskulltypes import Weight, Variable, Factor, FactorToVar
import numpy as np
import random
import scipy.sparse as sparse
from snorkel.learning import GenerativeModel, GenerativeModelWeights
def generate_model(n, dep_density, class_prior=False, lf_propensity=False, lf_prior=False, lf_class_propensity=False,
dep_similar=False, dep_reinforcing=False, dep_fixing=False, dep_exclusive=False, force_dep=False):
weights = GenerativeModelWeights(n)
for i in range(n):
weights.lf_accuracy[i] = 1.1 - 0.2 * random.random()
if class_prior:
weights.class_prior = random.choice((-1.0, -2.0))
if lf_propensity:
for i in range(n):
weights.lf_propensity[i] = random.choice((-1.0, -2.0))
if lf_prior:
for i in range(n):
weights.lf_prior[i] = random.choice((1.0, -1.0))
if lf_class_propensity:
for i in range(n):
weights.lf_class_propensity[i] = random.choice((1.0, -1.0))
if dep_similar:
for i in range(n):
for j in range(i+1, n):
if random.random() < dep_density:
weights.dep_similar[i, j] = 0.25
if dep_fixing:
for i in range(n):
for j in range(i+1, n):
if random.random() < dep_density:
if random.random() < 0.5:
weights.dep_fixing[i, j] = 0.25
else:
weights.dep_fixing[j, i] = 0.25
if dep_reinforcing:
for i in range(n):
for j in range(i+1, n):
if random.random() < dep_density:
if random.random() < 0.5:
weights.dep_reinforcing[i, j] = 0.25
else:
weights.dep_reinforcing[j, i] = 0.25
if dep_exclusive:
for i in range(n):
for j in range(i+1, n):
if random.random() < dep_density:
weights.dep_exclusive[i, j] = 0.25
if force_dep and weights.dep_similar.getnnz() == 0 and weights.dep_fixing.getnnz() == 0 \
and weights.dep_reinforcing.getnnz() == 0 and weights.dep_exclusive.getnnz() == 0:
return generate_model(n, dep_density, class_prior=class_prior, lf_propensity=lf_propensity, lf_prior=lf_prior,
lf_class_propensity=lf_class_propensity, dep_similar=dep_similar, dep_fixing=dep_fixing,
dep_reinforcing=dep_reinforcing, dep_exclusive=dep_exclusive, force_dep=True)
else:
return weights
def generate_label_matrix(weights, m):
# Compilation
# Weights
n_weights = 1 if weights.class_prior != 0.0 else 0
n_weights += weights.n
for optional_name in GenerativeModel.optional_names:
for i in range(weights.n):
if getattr(weights, optional_name)[i] != 0.0:
n_weights += 1
for dep_name in GenerativeModel.dep_names:
for i in range(weights.n):
for j in range(weights.n):
if getattr(weights, dep_name)[i, j] != 0.0:
n_weights += 1
weight = np.zeros(n_weights, Weight)
for i in range(len(weight)):
weight[i]['isFixed'] = True
if weights.class_prior != 0.0:
weight[0]['initialValue'] = np.float64(weights.class_prior)
w_off = 1
else:
w_off = 0
for i in range(weights.n):
weight[w_off + i]['initialValue'] = np.float64(weights.lf_accuracy[i])
w_off += weights.n
for optional_name in GenerativeModel.optional_names:
for i in range(weights.n):
if getattr(weights, optional_name)[i] != 0.0:
weight[w_off]['initialValue'] = np.float64(getattr(weights, optional_name)[i])
w_off += 1
for dep_name in GenerativeModel.dep_names:
for i in range(weights.n):
for j in range(weights.n):
if getattr(weights, dep_name)[i, j] != 0.0:
weight[w_off]['initialValue'] = np.float64(getattr(weights, dep_name)[i, j])
w_off += 1
# Variables
variable = np.zeros(1 + weights.n, Variable)
variable[0]['isEvidence'] = 0
variable[0]['initialValue'] = 0
variable[0]["dataType"] = 0
variable[0]["cardinality"] = 2
for i in range(weights.n):
variable[1 + i]['isEvidence'] = 0
variable[1 + i]['initialValue'] = 0
variable[1 + i]["dataType"] = 0
variable[1 + i]["cardinality"] = 3
# Factors and FactorToVar
n_edges = 1 if weights.class_prior != 0.0 else 0
n_edges += 2 * weights.n
for optional_name in GenerativeModel.optional_names:
for i in range(weights.n):
if getattr(weights, optional_name)[i] != 0.0:
if optional_name == 'lf_prior' or optional_name == 'lf_propensity':
n_edges += 1
elif optional_name == 'lf_class_propensity':
n_edges += 2
else:
raise ValueError()
for dep_name in GenerativeModel.dep_names:
for i in range(weights.n):
for j in range(weights.n):
if getattr(weights, dep_name)[i, j] != 0.0:
if dep_name == 'dep_similar' or dep_name == 'dep_exclusive':
n_edges += 2
elif dep_name == 'dep_fixing' or dep_name == 'dep_reinforcing':
n_edges += 3
else:
raise ValueError()
factor = np.zeros(n_weights, Factor)
ftv = np.zeros(n_edges, FactorToVar)
if weights.class_prior != 0.0:
factor[0]["factorFunction"] = FACTORS["DP_GEN_CLASS_PRIOR"]
factor[0]["weightId"] = 0
factor[0]["featureValue"] = 1
factor[0]["arity"] = 1
factor[0]["ftv_offset"] = 0
ftv[0]["vid"] = 0
f_off = 1
ftv_off = 1
else:
f_off = 0
ftv_off = 0
for i in range(weights.n):
factor[f_off + i]["factorFunction"] = FACTORS["DP_GEN_LF_ACCURACY"]
factor[f_off + i]["weightId"] = f_off + i
factor[f_off + i]["featureValue"] = 1
factor[f_off + i]["arity"] = 2
factor[f_off + i]["ftv_offset"] = ftv_off + 2 * i
ftv[ftv_off + 2 * i]["vid"] = 0
ftv[ftv_off + 2 * i + 1]["vid"] = 1 + i
f_off += weights.n
ftv_off += 2 * weights.n
for i in range(weights.n):
if weights.lf_prior[i] != 0.0:
factor[f_off]["factorFunction"] = FACTORS["DP_GEN_LF_PRIOR"]
factor[f_off]["weightId"] = f_off
factor[f_off]["featureValue"] = 1
factor[f_off]["arity"] = 1
factor[f_off]["ftv_offset"] = ftv_off
ftv[ftv_off]["vid"] = 1 + i
f_off += 1
ftv_off += 1
for i in range(weights.n):
if weights.lf_propensity[i] != 0.0:
factor[f_off]["factorFunction"] = FACTORS["DP_GEN_LF_PROPENSITY"]
factor[f_off]["weightId"] = f_off
factor[f_off]["featureValue"] = 1
factor[f_off]["arity"] = 1
factor[f_off]["ftv_offset"] = ftv_off
ftv[ftv_off]["vid"] = 1 + i
f_off += 1
ftv_off += 1
for i in range(weights.n):
if weights.lf_class_propensity[i] != 0.0:
factor[f_off]["factorFunction"] = FACTORS["DP_GEN_LF_CLASS_PROPENSITY"]
factor[f_off]["weightId"] = f_off
factor[f_off]["featureValue"] = 1
factor[f_off]["arity"] = 2
factor[f_off]["ftv_offset"] = ftv_off
ftv[ftv_off]["vid"] = 0
ftv[ftv_off + 1]["vid"] = 1 + i
f_off += 1
ftv_off += 2
for dep_name in GenerativeModel.dep_names:
for i in range(weights.n):
for j in range(weights.n):
if getattr(weights, dep_name)[i, j] != 0.0:
if dep_name == 'dep_similar' or dep_name == 'dep_exclusive':
factor[f_off]["factorFunction"] = FACTORS["DP_GEN_DEP_SIMILAR"] if dep_name == 'dep_similar' else FACTORS["DP_GEN_DEP_EXCLUSIVE"]
factor[f_off]["weightId"] = f_off
factor[f_off]["featureValue"] = 1
factor[f_off]["arity"] = 2
factor[f_off]["ftv_offset"] = ftv_off
ftv[ftv_off]["vid"] = 1 + i
ftv[ftv_off + 1]["vid"] = 1 + j
f_off += 1
ftv_off += 2
elif dep_name == 'dep_fixing' or dep_name == 'dep_reinforcing':
factor[f_off]["factorFunction"] = FACTORS["DP_GEN_DEP_FIXING"] if dep_name == 'dep_fixing' else FACTORS["DP_GEN_DEP_REINFORCING"]
factor[f_off]["weightId"] = f_off
factor[f_off]["featureValue"] = 1
factor[f_off]["arity"] = 3
factor[f_off]["ftv_offset"] = ftv_off
ftv[ftv_off]["vid"] = 0
ftv[ftv_off + 1]["vid"] = 1 + i
ftv[ftv_off + 2]["vid"] = 1 + j
f_off += 1
ftv_off += 3
else:
raise ValueError()
# Domain mask
domain_mask = np.zeros(1 + weights.n, np.bool)
# Instantiates factor graph
ns = NumbSkull(n_inference_epoch=100, quiet=True)
ns.loadFactorGraph(weight, variable, factor, ftv, domain_mask, n_edges)
fg = ns.getFactorGraph()
y = np.ndarray((m,), np.int64)
L = sparse.lil_matrix((m, weights.n), dtype=np.int64)
for i in range(m):
fg.burnIn(10, False)
y[i] = 1 if fg.var_value[0, 0] == 0 else -1
for j in range(weights.n):
if fg.var_value[0, 1 + j] != 2:
L[i, j] = 1 if fg.var_value[0, 1 + j] == 0 else -1
return y, L.tocsr()
| 36.784173
| 153
| 0.544886
|
f9ce7965fc605e9ddf8f3c4d93f561a8b964f060
| 4,235
|
py
|
Python
|
app/models.py
|
daktari01/sama_automated_incentive_app
|
2b49d504191a10067aa1de4637180c6bc7924054
|
[
"MIT"
] | null | null | null |
app/models.py
|
daktari01/sama_automated_incentive_app
|
2b49d504191a10067aa1de4637180c6bc7924054
|
[
"MIT"
] | null | null | null |
app/models.py
|
daktari01/sama_automated_incentive_app
|
2b49d504191a10067aa1de4637180c6bc7924054
|
[
"MIT"
] | null | null | null |
# app/models.py
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login_manager
class Employee(UserMixin, db.Model):
"""
Create Employee table
"""
# Ensures table will be named in plural and not in singular
# as in the name of the model
__tablename__ = 'employees'
id = db.Column(db.Integer, primary_key=True)
emp_number = db.Column(db.String(10), index=True, unique=True)
username = db.Column(db.String(60), index=True, unique=True)
emp_name = db.Column(db.String(100), index=True)
password_hash = db.Column(db.String(128))
subproject_id = db.Column(db.Integer, db.ForeignKey('projects.id'))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
is_admin = db.Column(db.Boolean, default=False)
attendances = db.relationship('Attendance', backref='employee',
lazy='dynamic')
incentives = db.relationship('Incentive', backref='employee',
lazy='dynamic')
@property
def password(self):
"""
Prevent password from being accessed
"""
raise AttributeError('Password is not readable.')
@password.setter
def password(self, password):
"""
Set password to a hashed password
"""
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
"""
Check if hashed password matches actual password
"""
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<Employee: {}>'.format(self.username)
# Set up user_loader
@login_manager.user_loader
def load_user(user_id):
return Employee.query.get(int(user_id))
class Role(db.Model):
"""
Create a Role table
"""
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), unique=True)
description = db.Column(db.String(200))
employees = db.relationship('Employee', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role: {}>'.format(self.name)
class Project(db.Model):
"""
Create a Project table
"""
__tablename__ = 'projects'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), unique=True)
employee_id = db.Column(db.Integer, db.ForeignKey('employees.id'))
description = db.Column(db.String(200))
subprojects = db.relationship('Subproject', backref='project',
lazy='dynamic')
class Subproject(db.Model):
"""
Create a Subproject class
"""
__tablename__ = 'subprojects'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), unique=True)
description = db.Column(db.String(200))
project_id = db.Column(db.Integer, db.ForeignKey('projects.id'))
employees = db.relationship('Employee', backref='subproject', lazy='dynamic')
incentives = db.relationship('Incentive', backref='subproject', lazy='dynamic')
def __repr__(self):
return '<Subproject: {}>'.format(self.name)
class Attendance(db.Model):
"""
Create an Attendance class
"""
__tablename__ = 'attendances'
id = db.Column(db.Integer, primary_key=True)
employee_id = db.Column(db.Integer, db.ForeignKey('employees.id'))
leave_days = db.Column(db.Integer)
days_present = db.Column(db.Integer)
percentage_attendance = db.Column(db.Integer)
incentives = db.relationship('Incentive', backref='attendance', lazy='dynamic')
class Incentive(db.Model):
"""
Create an Incentive table
"""
__tablename__ = 'incentives'
id = db.Column(db.Integer, primary_key=True)
employee_id = db.Column(db.Integer, db.ForeignKey('employees.id'))
subproject_id = db.Column(db.Integer, db.ForeignKey('subprojects.id'))
attendances_id = db.Column(db.Integer, db.ForeignKey('attendances.id'))
production = db.Column(db.Integer)
av_qa_score = db.Column(db.Integer)
total_points = db.Column(db.Integer)
amount = db.Column(db.Float)
| 31.842105
| 83
| 0.64817
|
c011af72cef762f7ecaedb6f5e6e3cd2971c945c
| 2,552
|
py
|
Python
|
main.py
|
iandraves/AutoDocs
|
164fb03f7c2c956008b969a43ec2b72b81d239c0
|
[
"MIT"
] | null | null | null |
main.py
|
iandraves/AutoDocs
|
164fb03f7c2c956008b969a43ec2b72b81d239c0
|
[
"MIT"
] | null | null | null |
main.py
|
iandraves/AutoDocs
|
164fb03f7c2c956008b969a43ec2b72b81d239c0
|
[
"MIT"
] | null | null | null |
import urllib.request
import datetime
import threading
import os
import shutil
import requests
from bs4 import BeautifulSoup
ARG_TYPES = [
'Affirmatives',
'Case Negatives',
'Counterplans',
'Disadvantages',
'Impact Files',
'Kritik Answers',
'Kritiks',
'Politics',
'Theory',
'Topicality'
]
PREFIX_URL = "https://openev.debatecoaches.org/"
def feedback(file_name):
print(f"Downloading: {file_name}")
def download(num, urls):
# Fetching raw HTML
response = requests.get(urls[num])
# Parse HTML and save to BeautifulSoup object
soup = BeautifulSoup(response.text, "html.parser")
# Downloading and categorizing files
span_links = soup.findAll("span", {"class": "wikiexternallink"})
for span_link in span_links:
download_url = span_link.find("a").get("href").replace(" ", "%20")
file_name = download_url.rsplit('/', 1)[-1].replace("%20", " ")
if os.path.splitext(file_name)[1] != ".docx":
pass
else:
feedback(file_name)
urllib.request.urlretrieve(
download_url, os.path.join(f"./downloads/{num + 1}. {ARG_TYPES[num]}", file_name))
def main():
# Removing folders if they already exist
if os.path.isdir(f"./downloads/"):
shutil.rmtree(f"./downloads/")
# Creating respective folders
os.mkdir("./downloads/")
for num, arg in enumerate(ARG_TYPES):
os.mkdir(f"./downloads/{num + 1}. {arg}")
# Getting current debate year
current_year = datetime.date.today().year
current_month = datetime.date.today().month
if current_month >= 1 and current_month < 8:
current_year -= 1
debate_year = str(current_year)
# Generating URLs
urls = [
f"{PREFIX_URL}{debate_year}/Affirmatives",
f"{PREFIX_URL}{debate_year}/Case%20Negatives",
f"{PREFIX_URL}{debate_year}/Counterplans",
f"{PREFIX_URL}{debate_year}/Disadvantages",
f"{PREFIX_URL}{debate_year}/Impact%20Files",
f"{PREFIX_URL}{debate_year}/Kritik%20Answers",
f"{PREFIX_URL}{debate_year}/Kritiks",
f"{PREFIX_URL}{debate_year}/Politics",
f"{PREFIX_URL}{debate_year}/Theory",
f"{PREFIX_URL}{debate_year}/Topicality"
]
# Creating download threads
threads = [threading.Thread(target=download, args=(i, urls,))
for i in range(len(ARG_TYPES))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == "__main__":
main()
| 27.73913
| 98
| 0.634404
|
c23cc3d44d741552be4b5789de4f88afe6fee089
| 3,921
|
py
|
Python
|
train.py
|
crazyleg/lateral_research
|
e186d218cd4b3ac3770e9fa375bc57133e4dafe5
|
[
"MIT"
] | null | null | null |
train.py
|
crazyleg/lateral_research
|
e186d218cd4b3ac3770e9fa375bc57133e4dafe5
|
[
"MIT"
] | null | null | null |
train.py
|
crazyleg/lateral_research
|
e186d218cd4b3ac3770e9fa375bc57133e4dafe5
|
[
"MIT"
] | null | null | null |
import torch
from torch import optim
from torch import nn
import torchvision
from tqdm import tqdm
import torchvision.transforms as transforms
from torch.autograd import Variable
from model import Net
USE_CUDA = True if torch.cuda.is_available() else False
device = torch.device("cuda" if USE_CUDA else "cpu")
class AddGaussianNoise(object):
def __init__(self, mean=0., std=1.):
self.std = std
self.mean = mean
def __call__(self, tensor):
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
if __name__ == '__main__':
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
AddGaussianNoise(0., 0.5)])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=4)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=16)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
net = Net()
net = net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.5)
for epoch in range(10): # loop over the dataset multiple times
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
correct = 0
total = 0
net.eval()
net = net.to(device)
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print('Finished Training')
PATH = './cifar_net_n06.pth'
torch.save(net.state_dict(), PATH)
# print('Start lateral learning')
#
# net.set_lateral_mode(True)
# with torch.no_grad():
# for i, data in tqdm(enumerate(trainloader, 0)):
# inputs, labels = data
# outputs = net(inputs)
#
# print('Finished lateral learning')
# net.set_lateral_mode(False)
#
#
# PATH = './cifar_net.pth'
# torch.save(net.state_dict(), PATH)
#
# correct = 0
# total = 0
# with torch.no_grad():
# for data in testloader:
# images, labels = data
# outputs = net(images)
# _, predicted = torch.max(outputs.data, 1)
# total += labels.size(0)
# correct += (predicted == labels).sum().item()
#
# print('Accuracy of the network on the 10000 test images: %d %%' % (
# 100 * correct / total))
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| 32.675
| 90
| 0.563377
|
31c6fd7680b5d7d78ac075f6515e3b8ffe3c2647
| 2,003
|
py
|
Python
|
funvert.py
|
dmoney/funvert
|
89895301a1f4583450aa184560916dc7238d4ac5
|
[
"MIT"
] | 1
|
2019-01-16T22:39:42.000Z
|
2019-01-16T22:39:42.000Z
|
funvert.py
|
dmoney/funvert
|
89895301a1f4583450aa184560916dc7238d4ac5
|
[
"MIT"
] | 8
|
2018-11-30T13:59:10.000Z
|
2018-12-26T03:43:18.000Z
|
funvert.py
|
dmoney/funvert
|
89895301a1f4583450aa184560916dc7238d4ac5
|
[
"MIT"
] | null | null | null |
# funvert.py
# A hack to enable calling functions as if they were methods
#
# Author: Dustin King (cathodion@gmail.com)
# Grown from this tweet by Zygmunt Zając:
# https://twitter.com/zygmuntzajac/status/685161914117296128
import inspect
def stackFrameContext(depth):
context = {}
# depth + 1 because 0 is the context of the calling function
frame = inspect.stack()[depth + 1].frame
# add global and local variables from the appropriate context
context.update(frame.f_globals)
context.update(frame.f_locals)
return context
class Funverted:
def __init__(self, obj):
self._obj=obj
def __getattr__(self, name):
try:
return getattr(self._obj, name)
except AttributeError:
globprop = stackFrameContext(1).get(name, None)
if callable(globprop):
return lambda *args, **kwargs: funvert(globprop(self._obj, *args, **kwargs))
else:
raise
def __str__(self):
return str(self._obj)
def __add__(self, rhs):
return self._obj + rhs
def __radd__(self, lhs):
return lhs + self._obj
def __sub__(self, rhs):
return self._obj - rhs
def __rsub__(self, lhs):
return lhs - self._obj
def __mul__(self, rhs):
return self._obj * rhs
def __rmul__(self, lhs):
return lhs * self._obj
def __truediv__(self, rhs):
return self._obj / rhs
def __rtruediv__(self, lhs):
return lhs / self._obj
def __floordiv__(self, rhs):
return self._obj // rhs
def __rfloordiv__(self, lhs):
return lhs // self._obj
def __mod__(self, rhs):
return self._obj % rhs
def __rmod__(self, lhs):
return lhs % self._obj
def funvert(obj):
if isinstance(obj, Funverted):
return obj
else:
return Funverted(obj)
if __name__ == '__main__':
from test_funvert import TestFunvert
import unittest
unittest.main()
| 23.022989
| 92
| 0.623565
|
e0eec29a839da29bca1406ca84ab0e37307335b7
| 4,308
|
py
|
Python
|
common/contrail_services.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | null | null | null |
common/contrail_services.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | 1
|
2021-06-01T22:18:29.000Z
|
2021-06-01T22:18:29.000Z
|
common/contrail_services.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | null | null | null |
CONTRAIL_SERVICES_CONTAINER_MAP = {
'api-server': ['config_api', 'contrail-config-api'],
'schema': ['config_schema', 'contrail-schema-transformer'],
'svc-monitor': ['config_svcmonitor', 'contrail-svcmonitor'],
'device-manager': ['config_devicemgr', 'contrail-devicemgr'],
'control': ['control_control', 'k8s_contrail-control'],
'dns': ['control_dns', 'contrail-dns'],
'named': ['control_named', 'contrail-named'],
'analytics-api': ['analytics_api', 'contrail-analytics-api'],
'alarm-gen': ['analytics_alarm-gen', 'contrail-alarm-gen'],
'query-engine': ['analytics_query-engine', 'contrail-query-engine'],
'topology': ['analytics_topology', 'contrail-topology'],
'collector': ['analytics_collector', 'contrail-collector'],
'snmp-collector': ['analytics_snmp-collector', 'contrail-snmp-collector'],
'agent': ['contrail-agent', 'vrouter-agent', 'contrail-vrouter-agent'],
'webui': ['webui_web', 'contrail-webui_'],
'webui-middleware': ['webui_job', 'contrail-webui-middleware'],
'config-rabbitmq': ['configdatabase_rabbitmq', 'rabbitmq'],
'config-zookeeper': ['configdatabase_zookeeper',
'contrail-config-zookeeper'],
'config-cassandra': ['configdatabase_cassandra', 'contrail-configdb'],
'analytics-kafka': ['analyticsdatabase_kafka', 'contrail-kafka'],
'analytics-zookeeper': ['analyticsdatabase_zookeeper',
'contrail-analytics-zookeeper'],
'analytics-cassandra': ['analyticsdatabase_cassandra',
'contrail-analyticsdb'],
'nova': ['nova_api', 'nova-api-osapi'],
'nova-compute': ['nova_compute', 'nova-compute'],
'nova-conductor': ['nova_conductor', 'nova-conductor'],
'nova-scheduler': ['nova_scheduler', 'nova-scheduler'],
'glance': ['glance_api', 'glance-api'],
'rabbitmq': ['rabbitmq'],
'haproxy': ['haproxy'],
'keystone': ['keystone-api', 'keystone'],
'neutron': ['neutron', 'neutron-server'],
'mysql': ['mariadb'],
'redis': ['webui_redis', 'webui-redis','redis'],
'vrouter-nodemgr': ['vrouter_nodemgr', 'vrouter-nodemgr'],
'config-nodemgr': ['config_nodemgr', 'config-nodemgr'],
'analytics-nodemgr': ['analytics_nodemgr', 'analytics-nodemgr'],
'control-nodemgr': ['control_nodemgr', 'control-nodemgr'],
'analyticsdb-nodemgr': ['analyticsdatabase_nodemgr',
'analyticsdb-nodemgr'],
'contrail-kube-manager': ['contrail-kube-manager', 'kubemanager'],
'kube-apiserver': ['kube-apiserver']
}
CONTRAIL_PODS_SERVICES_MAP = {
'vrouter' : ['vrouter-nodemgr', 'agent'],
'control' : ['control-nodemgr',
'control',
'named',
'dns'],
'config' : ['config-nodemgr',
'api-server',
'schema',
'svc-monitor',
'device-manager'],
'config-database' : ['config-cassandra',
'config-zookeeper',
'config-rabbitmq'],
'analytics' : ['analytics-nodemgr',
'analytics-api',
'collector',
'query-engine',
'alarm-gen',
'snmp-collector',
'topology'],
'analytics-database' : ['analytics-cassandra',
'analyticsdb-nodemgr',
'analytics-zookeeper',
'analytics-kafka'],
'webui' : ['webui', 'webui-middleware', 'redis'],
'kubernetes' : ['contrail-kube-manager'],
}
BackupImplementedServices = ["schema",
"svc-monitor",
"device-manager",
"contrail-kube-manager"]
ServiceHttpPortMap = {
"agent" : 8085,
"control" : 8083,
"collector" : 8089,
"query-engine" : 8091,
"analytics-api" : 8090,
"dns" : 8092,
"api-server" : 8084,
"schema" : 8087,
"svc-monitor" : 8088,
"device-manager" : 8096,
"analytics-nodemgr" : 8104,
"vrouter-nodemgr" : 8102,
"control-nodemgr" : 8101,
"analyticsdb-nodemgr" : 8103,
"config-nodemgr" : 8100,
"alarm-gen" : 5995,
"snmp-collector" : 5920,
"topology" : 5921,
"contrail-kube-manager" : 8108,
}
| 41.825243
| 78
| 0.571495
|
e5a346c1a83c320475fb3fc8f4228dff05b1df34
| 1,312
|
py
|
Python
|
app/__init__.py
|
k-koech/flask-blog
|
8bab442ef644b239e1d7e8bbff3eb4fc93e4cbdc
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
k-koech/flask-blog
|
8bab442ef644b239e1d7e8bbff3eb4fc93e4cbdc
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
k-koech/flask-blog
|
8bab442ef644b239e1d7e8bbff3eb4fc93e4cbdc
|
[
"MIT"
] | null | null | null |
from operator import truediv
from flask import Flask
from flask_bootstrap import Bootstrap
from config import config_options
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
bcrypt = Bcrypt()
bootstrap = Bootstrap()
db = SQLAlchemy()
mail = Mail()
def create_app(config_name):
app = Flask(__name__)
db.init_app(app)
login_manager.init_app(app)
bcrypt.init_app(app)
app.config.update(
MAIL_USE_SSL = True,
MAIL_SERVER = 'smtp.gmail.com',
MAIL_PORT = 465,
MAIL_USE_TLS = False,
MAIL_USERNAME = "kalambanidouglas@gmail.com",
MAIL_PASSWORD ="kalambani97!")
mail.init_app(app)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
bootstrap.init_app(app)
# authenticating
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint,url_prefix = '/authenticate')
# Will add the views and forms
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| 23.428571
| 71
| 0.743902
|
08871a96b177e2a92074c4dc27c0dc78d6f9d9b7
| 1,591
|
py
|
Python
|
elasticsearch/helpers/__init__.py
|
shub1095/elasticsearch-py
|
778c7e4ac000b51ced7c9a1a588200ec395e40ca
|
[
"Apache-2.0"
] | 1
|
2021-09-02T14:50:53.000Z
|
2021-09-02T14:50:53.000Z
|
elasticsearch/helpers/__init__.py
|
shub1095/elasticsearch-py
|
778c7e4ac000b51ced7c9a1a588200ec395e40ca
|
[
"Apache-2.0"
] | null | null | null |
elasticsearch/helpers/__init__.py
|
shub1095/elasticsearch-py
|
778c7e4ac000b51ced7c9a1a588200ec395e40ca
|
[
"Apache-2.0"
] | 1
|
2020-08-04T11:42:43.000Z
|
2020-08-04T11:42:43.000Z
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
from .errors import BulkIndexError, ScanError
from .actions import expand_action, streaming_bulk, bulk, parallel_bulk
from .actions import scan, reindex
from .actions import _chunk_actions, _process_bulk_chunk
__all__ = [
"BulkIndexError",
"ScanError",
"expand_action",
"streaming_bulk",
"bulk",
"parallel_bulk",
"scan",
"reindex",
"_chunk_actions",
"_process_bulk_chunk",
]
try:
# Asyncio only supported on Python 3.6+
if sys.version_info < (3, 6):
raise ImportError
from .._async.helpers import (
async_scan,
async_bulk,
async_reindex,
async_streaming_bulk,
)
__all__ += ["async_scan", "async_bulk", "async_reindex", "async_streaming_bulk"]
except (ImportError, SyntaxError):
pass
| 30.018868
| 84
| 0.717788
|
3d6ae22389d206d8b0cfad1741c4df689a01752f
| 8,304
|
py
|
Python
|
tests/core/full_node/test_transactions.py
|
zcomputerwiz/replaceme-blockchain
|
b6dfc3ff502e0cb6b7b5fbc566c7fb9ae559757a
|
[
"Apache-2.0"
] | null | null | null |
tests/core/full_node/test_transactions.py
|
zcomputerwiz/replaceme-blockchain
|
b6dfc3ff502e0cb6b7b5fbc566c7fb9ae559757a
|
[
"Apache-2.0"
] | null | null | null |
tests/core/full_node/test_transactions.py
|
zcomputerwiz/replaceme-blockchain
|
b6dfc3ff502e0cb6b7b5fbc566c7fb9ae559757a
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from secrets import token_bytes
from typing import Optional
import pytest
from replaceme.consensus.block_record import BlockRecord
from replaceme.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from replaceme.full_node.full_node_api import FullNodeAPI
from replaceme.protocols import full_node_protocol
from replaceme.simulator.simulator_protocol import FarmNewBlockProtocol
from replaceme.types.peer_info import PeerInfo
from replaceme.util.ints import uint16, uint32
from tests.setup_nodes import self_hostname, setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestTransactions:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_nodes_two_wallets(self):
async for _ in setup_simulators_and_wallets(3, 2, {}):
yield _
@pytest.mark.asyncio
async def test_wallet_coinbase(self, wallet_node):
num_blocks = 5
full_nodes, wallets = wallet_node
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
# funds += calculate_base_farmer_reward(0)
await asyncio.sleep(2)
print(await wallet.get_confirmed_balance(), funds)
await time_out_assert(10, wallet.get_confirmed_balance, funds)
@pytest.mark.asyncio
async def test_tx_propagation(self, three_nodes_two_wallets):
num_blocks = 5
full_nodes, wallets = three_nodes_two_wallets
wallet_0, wallet_server_0 = wallets[0]
wallet_1, wallet_server_1 = wallets[1]
full_node_api_0 = full_nodes[0]
server_0 = full_node_api_0.server
full_node_api_1 = full_nodes[1]
server_1 = full_node_api_1.server
full_node_api_2 = full_nodes[2]
server_2 = full_node_api_2.server
ph = await wallet_0.wallet_state_manager.main_wallet.get_new_puzzlehash()
ph1 = await wallet_1.wallet_state_manager.main_wallet.get_new_puzzlehash()
#
# wallet0 <-> sever0 <-> server1 <-> server2 <-> wallet1
#
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
await server_0.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
await server_1.start_client(PeerInfo(self_hostname, uint16(server_2._port)), None)
await wallet_server_1.start_client(PeerInfo(self_hostname, uint16(server_2._port)), None)
for i in range(num_blocks):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(10, wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance, funds)
async def peak_height(fna: FullNodeAPI):
peak: Optional[BlockRecord] = fna.full_node.blockchain.get_peak()
if peak is None:
return -1
peak_height = peak.height
return peak_height
await time_out_assert(10, peak_height, num_blocks, full_node_api_1)
await time_out_assert(10, peak_height, num_blocks, full_node_api_2)
tx = await wallet_0.wallet_state_manager.main_wallet.generate_signed_transaction(10, ph1, 0)
await wallet_0.wallet_state_manager.main_wallet.push_transaction(tx)
await time_out_assert(
10,
full_node_api_0.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_1.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_2.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
# Farm another block
for i in range(1, 8):
await full_node_api_1.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
print(f"Funds: {funds}")
await time_out_assert(
10,
wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance,
(funds - 10),
)
await time_out_assert(15, wallet_1.wallet_state_manager.main_wallet.get_confirmed_balance, 10)
@pytest.mark.asyncio
async def test_mempool_tx_sync(self, three_nodes_two_wallets):
num_blocks = 5
full_nodes, wallets = three_nodes_two_wallets
wallet_0, wallet_server_0 = wallets[0]
full_node_api_0 = full_nodes[0]
server_0 = full_node_api_0.server
full_node_api_1 = full_nodes[1]
server_1 = full_node_api_1.server
full_node_api_2 = full_nodes[2]
server_2 = full_node_api_2.server
ph = await wallet_0.wallet_state_manager.main_wallet.get_new_puzzlehash()
# wallet0 <-> sever0 <-> server1
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
await server_0.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(num_blocks):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(ph))
all_blocks = await full_node_api_0.get_all_full_blocks()
for block in all_blocks:
await full_node_api_2.full_node.respond_block(full_node_protocol.RespondBlock(block))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(10, wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance, funds)
tx = await wallet_0.wallet_state_manager.main_wallet.generate_signed_transaction(10, token_bytes(), 0)
await wallet_0.wallet_state_manager.main_wallet.push_transaction(tx)
await time_out_assert(
10,
full_node_api_0.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_1.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_2.full_node.mempool_manager.get_spendbundle,
None,
tx.name,
)
# make a final connection.
# wallet0 <-> sever0 <-> server1 <-> server2
await server_1.start_client(PeerInfo(self_hostname, uint16(server_2._port)), None)
await time_out_assert(
10,
full_node_api_0.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_1.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_2.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
| 37.405405
| 118
| 0.672206
|
fb64a87cce04841bf26a33f35ea792cae66a26a1
| 602
|
py
|
Python
|
docstrings-to-markdown/regex.py
|
buddheshwarnathkeshari/Neural_Net_Using_NumPy
|
c83e4f8a752e8f5b76b09e8137a99dec13129b93
|
[
"MIT"
] | null | null | null |
docstrings-to-markdown/regex.py
|
buddheshwarnathkeshari/Neural_Net_Using_NumPy
|
c83e4f8a752e8f5b76b09e8137a99dec13129b93
|
[
"MIT"
] | null | null | null |
docstrings-to-markdown/regex.py
|
buddheshwarnathkeshari/Neural_Net_Using_NumPy
|
c83e4f8a752e8f5b76b09e8137a99dec13129b93
|
[
"MIT"
] | null | null | null |
# Extract class or function along with its docstring
# Docstring will be captured in group 1 and group 0 contains the whole class or function along with its docstring.
DEF_WITH_DOCS_REGEX = r'((def|class).*((\s*->\s*.*)|):\n\s*"""(\n\s*.*?)*""")'
# Given a docstring, identity each part, i.e, parameters and return values.
# Note: This regex works only with numpy style docstrings.
# Part Captured Group
# Description 1
# Parameters 3
# Returns 6
IDENTIFY_EACH_PART_REGEX = r'"""\n\s*(.*\s*)*?(Parameters\s*-*\s*((.*\s*)*?))?(Returns\s*-*\s*(.*\s*)*?)?"""'
| 50.166667
| 114
| 0.61794
|
227d61559daafd75ff56e86d9e37e2fe0ac56c1a
| 3,019
|
py
|
Python
|
scripts/save_google_sheets.py
|
pydicom/sendit
|
cc5ef13a37525d73643885e35709d68a35881905
|
[
"MIT"
] | 6
|
2017-07-16T03:21:08.000Z
|
2021-03-07T11:27:44.000Z
|
scripts/save_google_sheets.py
|
pydicom/sendit
|
cc5ef13a37525d73643885e35709d68a35881905
|
[
"MIT"
] | 16
|
2017-06-06T00:19:17.000Z
|
2019-03-25T15:34:21.000Z
|
scripts/save_google_sheets.py
|
pydicom/sendit
|
cc5ef13a37525d73643885e35709d68a35881905
|
[
"MIT"
] | 7
|
2017-11-05T19:03:01.000Z
|
2019-07-21T19:29:13.000Z
|
#!/bin/env python
# Command line script to get GB/day from manager, then save to google sheet.
from som.api.google.sheets import Client
from datetime import datetime, timedelta
import subprocess
import argparse
import json
import os
import sys
def get_parser():
parser = argparse.ArgumentParser(
description="Sendit: save GB-day to Google Sheets")
parser.add_argument("--sheet_id", dest='sheet_id',
help="alpha-numerical string that is id for sheet",
type=str, required=True)
parser.add_argument("--days", dest='days',
help="number of days to ask for metric (default is 1)",
type=int, default=1)
# Compare two images (a similarity tree)
parser.add_argument('--save', dest='save',
help="required flag to save new row (otherwise prints sheet)",
default=False, action='store_true')
return parser
def main():
parser = get_parser()
try:
args = parser.parse_args()
except:
sys.exit(0)
command = ["python", "manage.py", "summary_metrics", "--days", str(args.days)]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
result,error = process.communicate()
if isinstance(result,bytes):
result = result.decode('utf-8')
result = json.loads(result)
gb_day = result["gb_per_day"]
secrets = os.environ.get('GOOGLE_SHEETS_CREDENTIALS')
if secrets is None:
print("Please export client secrets file name at GOOGLE_SHEETS_CREDENTIALS")
sys.exit(1)
cli = Client()
# Define date range for metric
start_date = (datetime.now() - timedelta(days=args.days)).strftime("%m/%d/%Y")
end_date = datetime.now().strftime("%m/%d/%Y")
# Get previous values
values = cli.read_spreadsheet(sheet_id=args.sheet_id, range_name="A:E")
# Only update if we are sure about values
required = ['pipeline',
'start_date',
'end_date',
'G/day GetIt',
'G/day SendIt']
for h in range(len(required)):
if required[h] != values[0][h]:
print("Warning, sheet is possibly changed.")
print("Required: %s" %",".join(required))
print("Found: %s" %",".join(values[0]))
sys.exit(0)
# Create row, append
# pipeline start_date end_date G/day GetIt G/day SendIt
# Define new row, add
row = [1, # pipeline
start_date, # start_date
end_date, # end_date
None, # G/day GetIt
gb_day] # G/day SendIt
values.append(row)
for row in values:
print(' '.join([str(x) for x in row]))
# Update sheet
if args.save is True:
print("Saving result to sheet %s" %args.sheet_id)
result = cli.write_spreadsheet(args.sheet_id, values, range_name="A:E")
if __name__ == '__main__':
main()
| 29.028846
| 87
| 0.585956
|
ce103a92585cc540f17388411e4c5182176dcea5
| 5,934
|
py
|
Python
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Pbr_01_02MasterChefCan_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Pbr_01_02MasterChefCan_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Pbr_01_02MasterChefCan_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = ["../../../_base_/deepim_base.py"]
OUTPUT_DIR = "output/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_ycbvPbr_SO/01_02MasterChefCan"
INPUT = dict(
COLOR_AUG_PROB=0.8,
COLOR_AUG_TYPE="code",
COLOR_AUG_CODE=(
"Sequential(["
# Sometimes(0.5, PerspectiveTransform(0.05)),
# Sometimes(0.5, CropAndPad(percent=(-0.05, 0.1))),
# Sometimes(0.5, Affine(scale=(1.0, 1.2))),
"Sometimes(0.5, CoarseDropout( p=0.2, size_percent=0.05) ),"
"Sometimes(0.4, GaussianBlur((0., 3.))),"
"Sometimes(0.3, pillike.EnhanceSharpness(factor=(0., 50.))),"
"Sometimes(0.3, pillike.EnhanceContrast(factor=(0.2, 50.))),"
"Sometimes(0.5, pillike.EnhanceBrightness(factor=(0.1, 6.))),"
"Sometimes(0.3, pillike.EnhanceColor(factor=(0., 20.))),"
"Sometimes(0.5, Add((-25, 25), per_channel=0.3)),"
"Sometimes(0.3, Invert(0.2, per_channel=True)),"
"Sometimes(0.5, Multiply((0.6, 1.4), per_channel=0.5)),"
"Sometimes(0.5, Multiply((0.6, 1.4))),"
"Sometimes(0.1, AdditiveGaussianNoise(scale=10, per_channel=True)),"
"Sometimes(0.5, iaa.contrast.LinearContrast((0.5, 2.2), per_channel=0.3)),"
"Sometimes(0.5, Grayscale(alpha=(0.0, 1.0)))," # maybe remove for det
"], random_order=True)"
# cosy+aae
),
ZOOM_ENLARGE_SCALE=1.5,
BBOX_TYPE_TEST="from_pose", # from_pose | est | gt | gt_aug (TODO)
INIT_POSE_TYPE_TRAIN=["gt_noise"], # gt_noise | random | canonical
NOISE_ROT_STD_TRAIN=(15, 10, 5, 2.5, 1.25), # randomly choose one
NOISE_TRANS_STD_TRAIN=[(0.01, 0.01, 0.05), (0.01, 0.01, 0.01), (0.005, 0.005, 0.01)],
INIT_POSE_TYPE_TEST="est", # gt_noise | est | canonical
)
SOLVER = dict(
IMS_PER_BATCH=32,
TOTAL_EPOCHS=80,
LR_SCHEDULER_NAME="flat_and_anneal",
ANNEAL_METHOD="cosine", # "cosine"
ANNEAL_POINT=0.72,
# REL_STEPS=(0.3125, 0.625, 0.9375),
OPTIMIZER_CFG=dict(_delete_=True, type="Ranger", lr=1e-4, weight_decay=0),
WEIGHT_DECAY=0.0,
WARMUP_FACTOR=0.001,
WARMUP_ITERS=1000,
)
DATASETS = dict(
TRAIN=("ycbv_002_master_chef_can_train_pbr",),
TEST=("ycbv_bop_test",),
INIT_POSE_FILES_TEST=(
"datasets/BOP_DATASETS/ycbv/test/init_poses/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_ycbv_test_GdrnPose_with_yolov4_pbr_bbox.json",
),
INIT_POSE_THR=0.3,
DET_THR=0.3,
SYM_OBJS=[
"002_master_chef_can",
"024_bowl",
"025_mug",
"036_wood_block",
"040_large_marker",
"051_large_clamp",
"052_extra_large_clamp",
"061_foam_brick",
], # ycbv_bop
)
DATALOADER = dict(
# Number of data loading threads
NUM_WORKERS=4,
FILTER_VISIB_THR=0.1,
FILTER_EMPTY_DETS=True, # filter images with empty detections
)
MODEL = dict(
LOAD_DETS_TEST=False,
LOAD_POSES_TEST=True,
PIXEL_MEAN=[0.0, 0.0, 0.0],
PIXEL_STD=[255.0, 255.0, 255.0],
DEEPIM=dict(
NAME="DeepIM_FlowNet", # used module file name (define different model types)
TASK="refine", # refine | init | init+refine
NUM_CLASSES=21, # only valid for class aware
N_ITER_TRAIN=4,
N_ITER_TRAIN_WARM_EPOCH=4, # linearly increase the refine iter from 1 to N_ITER_TRAIN until this epoch
N_ITER_TEST=4,
## backbone
BACKBONE=dict(
PRETRAINED="pretrained_models/flownet/flownets_EPE1.951.pth.tar",
INIT_CFG=dict(
type="FlowNetS",
# [im_ren, im_obs]
in_channels=6,
use_bn=False,
out_flow_level="flow4",
out_concat4=True,
),
INPUT_H=512, # use squared image to easily combined with gdrn
INPUT_W=512,
INPUT_MASK=False,
),
FLAT_OP="flatten",
## pose head for delta R/T
POSE_HEAD=dict(
ROT_TYPE="ego_rot6d", # {ego|allo}_{quat|rot6d}
INIT_CFG=dict(
type="FC_RotTransHead",
in_dim=1024 * 8 * 8, # should match FLAT_OP
num_layers=2,
feat_dim=256,
norm="GN", # BN | GN | none
num_gn_groups=32,
act="gelu", # relu | lrelu | silu (swish) | gelu | mish
),
),
# mask head
MASK_HEAD=dict(
INIT_CFG=dict(
type="ConvOutHead",
in_dim=770,
num_feat_layers=0, # only output layer
feat_dim=256,
feat_kernel_size=3,
norm="GN",
num_gn_groups=32,
act="gelu",
out_kernel_size=3,
),
),
LOSS_CFG=dict(
# point matching loss ----------------
PM_LOSS_SYM=True, # use symmetric PM loss
PM_NORM_BY_EXTENT=False, # 1. / extent.max(1, keepdim=True)[0]
# if False, the trans loss is in point matching loss
PM_R_ONLY=False, # only do R loss in PM
PM_DISENTANGLE_T=False, # disentangle R/T
PM_DISENTANGLE_Z=True, # disentangle R/xy/z
PM_T_USE_POINTS=True, #
PM_LW=1.0,
# mask loss --------------------
MASK_CLASS_AWARE=False,
MASK_LOSS_TYPE="BCE", # L1 | BCE | CE
MASK_LOSS_GT="trunc", # trunc | visib | obj (not supported yet)
MASK_LW=1.0,
# flow loss ------------------
FLOW_LOSS_TYPE="L1", # L1 | L2
FLOW_LW=0.1,
),
),
)
VAL = dict(
DATASET_NAME="ycbv",
SPLIT_TYPE="",
SCRIPT_PATH="lib/pysixd/scripts/eval_pose_results_more.py",
TARGETS_FILENAME="test_targets_bop19.json",
ERROR_TYPES="mspd,mssd,vsd,reS,teS,reteS,ad",
USE_BOP=True, # whether to use bop toolkit
)
| 36.62963
| 169
| 0.572801
|
212bd9e272226cf0bcae9d332c5c3dbeddf53ad9
| 19,780
|
py
|
Python
|
tests/components/lovelace/test_init.py
|
XRyu/home-assistant
|
c9c707e368be159f0138a40d21fdea7a2a650ffe
|
[
"Apache-2.0"
] | null | null | null |
tests/components/lovelace/test_init.py
|
XRyu/home-assistant
|
c9c707e368be159f0138a40d21fdea7a2a650ffe
|
[
"Apache-2.0"
] | null | null | null |
tests/components/lovelace/test_init.py
|
XRyu/home-assistant
|
c9c707e368be159f0138a40d21fdea7a2a650ffe
|
[
"Apache-2.0"
] | null | null | null |
"""Test the Lovelace initialization."""
import os
import unittest
from unittest.mock import patch
from tempfile import mkdtemp
import pytest
from ruamel.yaml import YAML
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.components.lovelace import (load_yaml, migrate_config,
save_yaml,
UnsupportedYamlError)
TEST_YAML_A = """\
title: My Awesome Home
# Include external resources
resources:
- url: /local/my-custom-card.js
type: js
- url: /local/my-webfont.css
type: css
# Exclude entities from "Unused entities" view
excluded_entities:
- weblink.router
views:
# View tab title.
- title: Example
# Optional unique id for direct access /lovelace/${id}
id: example
# Optional background (overwrites the global background).
background: radial-gradient(crimson, skyblue)
# Each view can have a different theme applied.
theme: dark-mode
# The cards to show on this view.
cards:
# The filter card will filter entities for their state
- type: entity-filter
entities:
- device_tracker.paulus
- device_tracker.anne_there
state_filter:
- 'home'
card:
type: glance
title: People that are home
# The picture entity card will represent an entity with a picture
- type: picture-entity
image: https://www.home-assistant.io/images/default-social.png
entity: light.bed_light
# Specify a tab icon if you want the view tab to be an icon.
- icon: mdi:home-assistant
# Title of the view. Will be used as the tooltip for tab icon
title: Second view
cards:
- id: test
type: entities
title: Test card
# Entities card will take a list of entities and show their state.
- type: entities
# Title of the entities card
title: Example
# The entities here will be shown in the same order as specified.
# Each entry is an entity ID or a map with extra options.
entities:
- light.kitchen
- switch.ac
- entity: light.living_room
# Override the name to use
name: LR Lights
# The markdown card will render markdown text.
- type: markdown
title: Lovelace
content: >
Welcome to your **Lovelace UI**.
"""
TEST_YAML_B = """\
title: Home
views:
- title: Dashboard
id: dashboard
icon: mdi:home
cards:
- id: testid
type: vertical-stack
cards:
- type: picture-entity
entity: group.sample
name: Sample
image: /local/images/sample.jpg
tap_action: toggle
"""
# Test data that can not be loaded as YAML
TEST_BAD_YAML = """\
title: Home
views:
- title: Dashboard
icon: mdi:home
cards:
- id: testid
type: vertical-stack
"""
# Test unsupported YAML
TEST_UNSUP_YAML = """\
title: Home
views:
- title: Dashboard
icon: mdi:home
cards: !include cards.yaml
"""
class TestYAML(unittest.TestCase):
"""Test lovelace.yaml save and load."""
def setUp(self):
"""Set up for tests."""
self.tmp_dir = mkdtemp()
self.yaml = YAML(typ='rt')
def tearDown(self):
"""Clean up after tests."""
for fname in os.listdir(self.tmp_dir):
os.remove(os.path.join(self.tmp_dir, fname))
os.rmdir(self.tmp_dir)
def _path_for(self, leaf_name):
return os.path.join(self.tmp_dir, leaf_name+".yaml")
def test_save_and_load(self):
"""Test saving and loading back."""
fname = self._path_for("test1")
save_yaml(fname, self.yaml.load(TEST_YAML_A))
data = load_yaml(fname)
assert data == self.yaml.load(TEST_YAML_A)
def test_overwrite_and_reload(self):
"""Test that we can overwrite an existing file and read back."""
fname = self._path_for("test3")
save_yaml(fname, self.yaml.load(TEST_YAML_A))
save_yaml(fname, self.yaml.load(TEST_YAML_B))
data = load_yaml(fname)
assert data == self.yaml.load(TEST_YAML_B)
def test_load_bad_data(self):
"""Test error from trying to load unserialisable data."""
fname = self._path_for("test5")
with open(fname, "w") as fh:
fh.write(TEST_BAD_YAML)
with pytest.raises(HomeAssistantError):
load_yaml(fname)
def test_add_id(self):
"""Test if id is added."""
fname = self._path_for("test6")
with patch('homeassistant.components.lovelace.load_yaml',
return_value=self.yaml.load(TEST_YAML_A)), \
patch('homeassistant.components.lovelace.save_yaml'):
data = migrate_config(fname)
assert 'id' in data['views'][0]['cards'][0]
assert 'id' in data['views'][1]
def test_id_not_changed(self):
"""Test if id is not changed if already exists."""
fname = self._path_for("test7")
with patch('homeassistant.components.lovelace.load_yaml',
return_value=self.yaml.load(TEST_YAML_B)):
data = migrate_config(fname)
assert data == self.yaml.load(TEST_YAML_B)
async def test_deprecated_lovelace_ui(hass, hass_ws_client):
"""Test lovelace_ui command."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
with patch('homeassistant.components.lovelace.load_config',
return_value={'hello': 'world'}):
await client.send_json({
'id': 5,
'type': 'frontend/lovelace_config',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
assert msg['result'] == {'hello': 'world'}
async def test_deprecated_lovelace_ui_not_found(hass, hass_ws_client):
"""Test lovelace_ui command cannot find file."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
with patch('homeassistant.components.lovelace.load_config',
side_effect=FileNotFoundError):
await client.send_json({
'id': 5,
'type': 'frontend/lovelace_config',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success'] is False
assert msg['error']['code'] == 'file_not_found'
async def test_deprecated_lovelace_ui_load_err(hass, hass_ws_client):
"""Test lovelace_ui command cannot find file."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
with patch('homeassistant.components.lovelace.load_config',
side_effect=HomeAssistantError):
await client.send_json({
'id': 5,
'type': 'frontend/lovelace_config',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success'] is False
assert msg['error']['code'] == 'load_error'
async def test_lovelace_ui(hass, hass_ws_client):
"""Test lovelace_ui command."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
with patch('homeassistant.components.lovelace.load_config',
return_value={'hello': 'world'}):
await client.send_json({
'id': 5,
'type': 'lovelace/config',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
assert msg['result'] == {'hello': 'world'}
async def test_lovelace_ui_not_found(hass, hass_ws_client):
"""Test lovelace_ui command cannot find file."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
with patch('homeassistant.components.lovelace.load_config',
side_effect=FileNotFoundError):
await client.send_json({
'id': 5,
'type': 'lovelace/config',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success'] is False
assert msg['error']['code'] == 'file_not_found'
async def test_lovelace_ui_load_err(hass, hass_ws_client):
"""Test lovelace_ui command load error."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
with patch('homeassistant.components.lovelace.load_config',
side_effect=HomeAssistantError):
await client.send_json({
'id': 5,
'type': 'lovelace/config',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success'] is False
assert msg['error']['code'] == 'load_error'
async def test_lovelace_ui_load_json_err(hass, hass_ws_client):
"""Test lovelace_ui command load error."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
with patch('homeassistant.components.lovelace.load_config',
side_effect=UnsupportedYamlError):
await client.send_json({
'id': 5,
'type': 'lovelace/config',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success'] is False
assert msg['error']['code'] == 'unsupported_error'
async def test_lovelace_get_card(hass, hass_ws_client):
"""Test get_card command."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)):
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/get',
'card_id': 'test',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
assert msg['result'] == 'id: test\ntype: entities\ntitle: Test card\n'
async def test_lovelace_get_card_not_found(hass, hass_ws_client):
"""Test get_card command cannot find card."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)):
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/get',
'card_id': 'not_found',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success'] is False
assert msg['error']['code'] == 'card_not_found'
async def test_lovelace_get_card_bad_yaml(hass, hass_ws_client):
"""Test get_card command bad yaml."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
with patch('homeassistant.components.lovelace.load_yaml',
side_effect=HomeAssistantError):
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/get',
'card_id': 'testid',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success'] is False
assert msg['error']['code'] == 'load_error'
async def test_lovelace_update_card(hass, hass_ws_client):
"""Test update_card command."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)), \
patch('homeassistant.components.lovelace.save_yaml') \
as save_yaml_mock:
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/update',
'card_id': 'test',
'card_config': 'id: test\ntype: glance\n',
})
msg = await client.receive_json()
result = save_yaml_mock.call_args_list[0][0][1]
assert result.mlget(['views', 1, 'cards', 0, 'type'],
list_ok=True) == 'glance'
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
async def test_lovelace_update_card_not_found(hass, hass_ws_client):
"""Test update_card command cannot find card."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)):
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/update',
'card_id': 'not_found',
'card_config': 'id: test\ntype: glance\n',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success'] is False
assert msg['error']['code'] == 'card_not_found'
async def test_lovelace_update_card_bad_yaml(hass, hass_ws_client):
"""Test update_card command bad yaml."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)), \
patch('homeassistant.components.lovelace.yaml_to_object',
side_effect=HomeAssistantError):
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/update',
'card_id': 'test',
'card_config': 'id: test\ntype: glance\n',
})
msg = await client.receive_json()
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success'] is False
assert msg['error']['code'] == 'save_error'
async def test_lovelace_add_card(hass, hass_ws_client):
"""Test add_card command."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)), \
patch('homeassistant.components.lovelace.save_yaml') \
as save_yaml_mock:
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/add',
'view_id': 'example',
'card_config': 'id: test\ntype: added\n',
})
msg = await client.receive_json()
result = save_yaml_mock.call_args_list[0][0][1]
assert result.mlget(['views', 0, 'cards', 2, 'type'],
list_ok=True) == 'added'
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
async def test_lovelace_add_card_position(hass, hass_ws_client):
"""Test add_card command."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)), \
patch('homeassistant.components.lovelace.save_yaml') \
as save_yaml_mock:
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/add',
'view_id': 'example',
'position': 0,
'card_config': 'id: test\ntype: added\n',
})
msg = await client.receive_json()
result = save_yaml_mock.call_args_list[0][0][1]
assert result.mlget(['views', 0, 'cards', 0, 'type'],
list_ok=True) == 'added'
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
async def test_lovelace_move_card_position(hass, hass_ws_client):
"""Test move_card command."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)), \
patch('homeassistant.components.lovelace.save_yaml') \
as save_yaml_mock:
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/move',
'card_id': 'test',
'new_position': 2,
})
msg = await client.receive_json()
result = save_yaml_mock.call_args_list[0][0][1]
assert result.mlget(['views', 1, 'cards', 2, 'title'],
list_ok=True) == 'Test card'
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
async def test_lovelace_move_card_view(hass, hass_ws_client):
"""Test move_card to view command."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)), \
patch('homeassistant.components.lovelace.save_yaml') \
as save_yaml_mock:
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/move',
'card_id': 'test',
'new_view_id': 'example',
})
msg = await client.receive_json()
result = save_yaml_mock.call_args_list[0][0][1]
assert result.mlget(['views', 0, 'cards', 2, 'title'],
list_ok=True) == 'Test card'
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
async def test_lovelace_move_card_view_position(hass, hass_ws_client):
"""Test move_card to view with position command."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)), \
patch('homeassistant.components.lovelace.save_yaml') \
as save_yaml_mock:
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/move',
'card_id': 'test',
'new_view_id': 'example',
'new_position': 1,
})
msg = await client.receive_json()
result = save_yaml_mock.call_args_list[0][0][1]
assert result.mlget(['views', 0, 'cards', 1, 'title'],
list_ok=True) == 'Test card'
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
async def test_lovelace_delete_card(hass, hass_ws_client):
"""Test delete_card command."""
await async_setup_component(hass, 'lovelace')
client = await hass_ws_client(hass)
yaml = YAML(typ='rt')
with patch('homeassistant.components.lovelace.load_yaml',
return_value=yaml.load(TEST_YAML_A)), \
patch('homeassistant.components.lovelace.save_yaml') \
as save_yaml_mock:
await client.send_json({
'id': 5,
'type': 'lovelace/config/card/delete',
'card_id': 'test',
})
msg = await client.receive_json()
result = save_yaml_mock.call_args_list[0][0][1]
cards = result.mlget(['views', 1, 'cards'], list_ok=True)
assert len(cards) == 2
assert cards[0]['title'] == 'Example'
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
| 32.802653
| 74
| 0.616936
|
f2651455888c2486a80df24399e5cd210f4e42c6
| 784
|
py
|
Python
|
qemu/ui/keycodemapdb/tests/test.py
|
WUSTL-CSPL/RT-TEE
|
aafb3e9ff6c6e744c6bce1e42bcb198e1063efcc
|
[
"MIT"
] | 60
|
2020-10-14T07:11:48.000Z
|
2022-02-14T23:00:51.000Z
|
qemu/ui/keycodemapdb/tests/test.py
|
WUSTL-CSPL/RT-TEE
|
aafb3e9ff6c6e744c6bce1e42bcb198e1063efcc
|
[
"MIT"
] | 8
|
2020-10-19T02:17:19.000Z
|
2022-01-15T05:52:46.000Z
|
qemu/ui/keycodemapdb/tests/test.py
|
WUSTL-CSPL/RT-TEE
|
aafb3e9ff6c6e744c6bce1e42bcb198e1063efcc
|
[
"MIT"
] | 18
|
2022-03-19T04:41:04.000Z
|
2022-03-31T03:32:12.000Z
|
# Keycode Map Generator Python Tests
#
# Copyright 2017 Pierre Ossman for Cendio AB
#
# This file is dual license under the terms of the GPLv2 or later
# and 3-clause BSD licenses.
import osx2win32
import osx2win32_name
import osx2xkb
import osx2xkb_name
import html2win32
import html2win32_name
import osx
import osx_name
assert osx2win32.code_map_osx_to_win32[0x1d] == 0x30
assert osx2win32_name.name_map_osx_to_win32[0x1d] == "VK_0"
assert osx2xkb.code_map_osx_to_xkb[0x1d] == "AE10"
assert osx2xkb_name.name_map_osx_to_xkb[0x1d] == "AE10"
assert html2win32.code_map_html_to_win32["ControlLeft"] == 0x11
assert html2win32_name.name_map_html_to_win32["ControlLeft"] == "VK_CONTROL"
assert osx.code_table_osx[0x1d] == 0x3b;
assert osx_name.name_table_osx[0x1d] == "Control";
| 25.290323
| 76
| 0.802296
|
914c8f84150041fe47b5a9ab2a6dbd6cdedcdcbf
| 1,435
|
bzl
|
Python
|
ThirdParty/MicrosoftML/Microsoft.Ml.bzl
|
andwak/sentiment-analysis
|
26b3d6988982238808e8acb2f7fd7eb1c0fda3d1
|
[
"MIT"
] | null | null | null |
ThirdParty/MicrosoftML/Microsoft.Ml.bzl
|
andwak/sentiment-analysis
|
26b3d6988982238808e8acb2f7fd7eb1c0fda3d1
|
[
"MIT"
] | null | null | null |
ThirdParty/MicrosoftML/Microsoft.Ml.bzl
|
andwak/sentiment-analysis
|
26b3d6988982238808e8acb2f7fd7eb1c0fda3d1
|
[
"MIT"
] | null | null | null |
package(default_visibility = [ "//visibility:public" ])
load("@io_bazel_rules_dotnet//dotnet:defs.bzl", "core_import_library")
core_import_library(
name = "MicrosoftMLAssemblies",
src = "lib/netstandard2.0/Microsoft.ML.dll",
version = "1.7.1",
deps = [
"MicrosoftMLAssembliesCore",
"MicrosoftMLAssembliesData",
"MicrosoftMLAssembliesKMeansClustering",
"MicrosoftMLAssembliesPCA",
"MicrosoftMLAssembliesStandardTrainers",
"MicrosoftMLAssembliesTransforms",
]
)
core_import_library(
name = "MicrosoftMLAssembliesCore",
src = "lib/netstandard2.0/Microsoft.ML.Core.dll",
version = "1.7.1",
)
core_import_library(
name = "MicrosoftMLAssembliesData",
src = "lib/netstandard2.0/Microsoft.ML.Data.dll",
version = "1.7.1",
)
core_import_library(
name = "MicrosoftMLAssembliesKMeansClustering",
src = "lib/netstandard2.0/Microsoft.ML.KMeansClustering.dll",
version = "1.7.1",
)
core_import_library(
name = "MicrosoftMLAssembliesPCA",
src = "lib/netstandard2.0/Microsoft.ML.PCA.dll",
version = "1.7.1",
)
core_import_library(
name = "MicrosoftMLAssembliesStandardTrainers",
src = "lib/netstandard2.0/Microsoft.ML.StandardTrainers.dll",
version = "1.7.1",
)
core_import_library(
name = "MicrosoftMLAssembliesTransforms",
src = "lib/netstandard2.0/Microsoft.ML.Transforms.dll",
version = "1.7.1",
)
| 27.596154
| 70
| 0.694077
|
212ffa7e30ec5c42606a96845c98c9ba2bc6563b
| 1,966
|
py
|
Python
|
venv1/Lib/site-packages/tensorflow/contrib/kernel_methods/python/mappers/dense_kernel_mapper.py
|
Soum-Soum/Tensorflow_Face_Finder
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
[
"Apache-2.0",
"MIT"
] | 6
|
2022-02-04T18:12:24.000Z
|
2022-03-21T23:57:12.000Z
|
Lib/site-packages/tensorflow/contrib/kernel_methods/python/mappers/dense_kernel_mapper.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | 1
|
2021-05-20T00:58:04.000Z
|
2021-05-20T00:58:04.000Z
|
Lib/site-packages/tensorflow/contrib/kernel_methods/python/mappers/dense_kernel_mapper.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-08T03:53:23.000Z
|
2022-02-08T03:53:23.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""API class for dense (approximate) kernel mappers.
See ./random_fourier_features.py for a concrete instantiation of this class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
class InvalidShapeError(Exception):
"""Exception thrown when a tensor's shape deviates from an expected shape."""
@six.add_metaclass(abc.ABCMeta)
class DenseKernelMapper(object):
"""Abstract class for a kernel mapper that maps dense inputs to dense outputs.
This class is abstract. Users should not create instances of this class.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def map(self, input_tensor):
"""Main Dense-Tensor-In-Dense-Tensor-Out (DTIDTO) map method.
Should be implemented by subclasses.
Args:
input_tensor: The dense input tensor to be mapped using the (approximate)
kernel mapper.
"""
raise NotImplementedError('map is not implemented for {}.'.format(self))
@abc.abstractproperty
def name(self):
"""Returns the name of the kernel mapper."""
pass
@abc.abstractproperty
def output_dim(self):
"""Returns the output dimension of the mapping."""
pass
| 32.766667
| 81
| 0.693286
|
26f123430f1fe4ff13e1b1add4abc3c4fc4cf87a
| 6,918
|
py
|
Python
|
tensorflow_datasets/core/features/class_label_feature.py
|
atksh/datasets
|
814058b31ebd99e418114016d60ab4d6f8f82070
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/features/class_label_feature.py
|
atksh/datasets
|
814058b31ebd99e418114016d60ab4d6f8f82070
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/features/class_label_feature.py
|
atksh/datasets
|
814058b31ebd99e418114016d60ab4d6f8f82070
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ClassLabel feature."""
import os
import six
import tensorflow as tf
from tensorflow_datasets.core import api_utils
from tensorflow_datasets.core.features import feature
class ClassLabel(feature.Tensor):
"""`FeatureConnector` for integer class labels."""
@api_utils.disallow_positional_args
def __init__(self, num_classes=None, names=None, names_file=None):
"""Constructs a ClassLabel FeatureConnector.
There are 3 ways to define a ClassLabel, which correspond to the 3
arguments:
* `num_classes`: create 0 to (num_classes-1) labels
* `names`: a list of label strings
* `names_file`: a file containing the list of labels.
Note: On python2, the strings are encoded as utf-8.
Args:
num_classes: `int`, number of classes. All labels must be < num_classes.
names: `list<str>`, string names for the integer classes. The
order in which the names are provided is kept.
names_file: `str`, path to a file with names for the integer
classes, one per line.
"""
super(ClassLabel, self).__init__(shape=(), dtype=tf.int64)
self._num_classes = None
self._str2int = None
self._int2str = None
# The label is explicitly set as undefined (no label defined)
if not sum(bool(a) for a in (num_classes, names, names_file)):
return
if sum(bool(a) for a in (num_classes, names, names_file)) != 1:
raise ValueError(
"Only a single argument of ClassLabel() should be provided."
)
if num_classes:
self._num_classes = num_classes
else:
self.names = names or _load_names_from_file(names_file)
@property
def num_classes(self):
return self._num_classes
@property
def names(self):
if not self._int2str:
return [tf.compat.as_text(str(i)) for i in range(self._num_classes)]
return list(self._int2str)
@names.setter
def names(self, new_names):
int2str = [tf.compat.as_text(name) for name in new_names]
# Names can only be defined once
if self._int2str is not None and self._int2str != int2str:
raise ValueError(
"Trying to overwrite already defined ClassLabel names. Previous: {} "
", new: {}".format(self._int2str, int2str)
)
# Set-up [new] names
self._int2str = int2str
self._str2int = {name: i for i, name in enumerate(self._int2str)}
if len(self._int2str) != len(self._str2int):
raise ValueError(
"Some label names are duplicated. Each label name should be unique."
)
# If num_classes has been defined, ensure that num_classes and names match
num_classes = len(self._str2int)
if self._num_classes is None:
self._num_classes = num_classes
elif self._num_classes != num_classes:
raise ValueError(
"ClassLabel number of names do not match the defined num_classes. "
"Got {} names VS {} num_classes".format(num_classes, self._num_classes)
)
def str2int(self, str_value):
"""Conversion class name string => integer."""
str_value = tf.compat.as_text(str_value)
if self._str2int:
return self._str2int[str_value]
# No names provided, try to integerize
failed_parse = False
try:
int_value = int(str_value)
except ValueError:
failed_parse = True
if failed_parse or not 0 <= int_value < self._num_classes:
raise ValueError("Invalid string class label %s" % str_value)
return int_value
def int2str(self, int_value):
"""Conversion integer => class name string."""
if self._int2str:
# Maybe should support batched np array/eager tensors, to allow things
# like
# out_ids = model(inputs)
# labels = cifar10.info.features['label'].int2str(out_ids)
return self._int2str[int_value]
# No names provided, return str(int)
if not 0 <= int_value < self._num_classes:
raise ValueError("Invalid integer class label %d" % int_value)
return tf.compat.as_text(str(int_value))
def encode_example(self, example_data):
if self._num_classes is None:
raise ValueError(
"Trying to use ClassLabel feature with undefined number of class. "
"Please set ClassLabel.names or num_classes."
)
# If a string is given, convert to associated integer
if isinstance(example_data, six.string_types):
example_data = self.str2int(example_data)
# Allowing -1 to mean no label.
if not -1 <= example_data < self._num_classes:
raise ValueError(
"Class label %d greater than configured num_classes %d"
% (example_data, self._num_classes)
)
return example_data
def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Save names if defined
if self._str2int is not None:
names_filepath = _get_names_filepath(data_dir, feature_name)
_write_names_to_file(names_filepath, self.names)
def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
names_filepath = _get_names_filepath(data_dir, feature_name)
if tf.io.gfile.exists(names_filepath):
self.names = _load_names_from_file(names_filepath)
def _additional_repr_info(self):
return {"num_classes": self.num_classes}
def _get_names_filepath(data_dir, feature_name):
return os.path.join(data_dir, "{}.labels.txt".format(feature_name))
def _load_names_from_file(names_filepath):
with tf.io.gfile.GFile(names_filepath, "r") as f:
return [
name.strip()
for name in tf.compat.as_text(f.read()).split("\n")
if name.strip() # Filter empty names
]
def _write_names_to_file(names_filepath, names):
with tf.io.gfile.GFile(names_filepath, "w") as f:
f.write("\n".join(names) + "\n")
| 36.603175
| 87
| 0.640214
|
84944a045e580baa3c9462c745933c74f30b2632
| 118
|
py
|
Python
|
code/exampleStrats/random60.py
|
SeanLossef/carykh-prisoners-dilemma-tournament
|
5da4571aa684edc780c950db32da4896d0118870
|
[
"MIT"
] | null | null | null |
code/exampleStrats/random60.py
|
SeanLossef/carykh-prisoners-dilemma-tournament
|
5da4571aa684edc780c950db32da4896d0118870
|
[
"MIT"
] | null | null | null |
code/exampleStrats/random60.py
|
SeanLossef/carykh-prisoners-dilemma-tournament
|
5da4571aa684edc780c950db32da4896d0118870
|
[
"MIT"
] | null | null | null |
import random
def strategy(history, memory):
if random.random() < 0.60:
return 0, None
return 1, None
| 19.666667
| 30
| 0.635593
|
d73b9af0a3d698a69beeca9bca5960d7505c0675
| 200
|
py
|
Python
|
mpf/exceptions/driver_limits_error.py
|
Scottacus64/mpf
|
fcfb6c5698b9c7d8bf0eb64b021aaa389ea6478a
|
[
"MIT"
] | 163
|
2015-01-25T02:19:50.000Z
|
2022-03-26T12:00:28.000Z
|
mpf/exceptions/driver_limits_error.py
|
Scottacus64/mpf
|
fcfb6c5698b9c7d8bf0eb64b021aaa389ea6478a
|
[
"MIT"
] | 1,086
|
2015-03-23T19:53:17.000Z
|
2022-03-24T20:46:11.000Z
|
mpf/exceptions/driver_limits_error.py
|
Scottacus64/mpf
|
fcfb6c5698b9c7d8bf0eb64b021aaa389ea6478a
|
[
"MIT"
] | 148
|
2015-01-28T02:31:39.000Z
|
2022-03-22T13:54:01.000Z
|
"""A request was made to drive the driver outside its configured limits."""
class DriverLimitsError(AssertionError):
"""A request was made to drive the driver outside its configured limits."""
| 28.571429
| 79
| 0.75
|
f43d2b85476f625317394c36615df0d6ddb70db1
| 3,371
|
py
|
Python
|
alerts/webhooks.py
|
rh-cssre/glitchtip-backend
|
ae12fbd54532cff5fd3d7a72631ba18625bbf1de
|
[
"MIT"
] | null | null | null |
alerts/webhooks.py
|
rh-cssre/glitchtip-backend
|
ae12fbd54532cff5fd3d7a72631ba18625bbf1de
|
[
"MIT"
] | null | null | null |
alerts/webhooks.py
|
rh-cssre/glitchtip-backend
|
ae12fbd54532cff5fd3d7a72631ba18625bbf1de
|
[
"MIT"
] | null | null | null |
from typing import List, TYPE_CHECKING, Optional
from dataclasses import dataclass, asdict
import requests
if TYPE_CHECKING:
from issues.models import Issue
from .models import Notification
@dataclass
class WebhookAttachmentField:
title: str
value: str
short: bool
@dataclass
class WebhookAttachment:
title: str
title_link: str
text: str
image_url: Optional[str] = None
color: Optional[str] = None
fields: Optional[List[WebhookAttachmentField]] = None
mrkdown_in: Optional[List[str]] = None
@dataclass
class MSTeamsSection:
"""
Similar to WebhookAttachment but for MS Teams
https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using?tabs=cURL
"""
activityTitle: str
activitySubtitle: str
@dataclass
class WebhookPayload:
alias: str
text: str
attachments: List[WebhookAttachment]
sections: List[MSTeamsSection]
def send_webhook(
url: str,
message: str,
attachments: List[WebhookAttachment] = [],
sections: List[MSTeamsSection] = [],
):
data = WebhookPayload(
alias="GlitchTip", text=message, attachments=attachments, sections=sections
)
response = requests.post(url, json=asdict(data))
return response
def send_issue_as_webhook(url, issues: List["Issue"], issue_count: int = 1):
"""
Notification about issues via webhook.
url: Webhook URL
issues: This should be only the issues to send as attachment
issue_count - total issues, may be greater than len(issues)
"""
attachments: List[WebhookAttachment] = []
sections: List[MSTeamsSection] = []
for issue in issues:
fields = [
WebhookAttachmentField(
title="Project",
value=issue.project.name,
short=True,
)
]
environment = issue.tags.get("environment")
if environment:
fields.append(
WebhookAttachmentField(
title="Environment",
value=environment[0],
short=True,
)
)
release = issue.tags.get("release")
if release:
fields.append(
WebhookAttachmentField(
title="Release",
value=release[0],
short=False,
)
)
attachments.append(
WebhookAttachment(
mrkdown_in=["text"],
title=str(issue),
title_link=issue.get_detail_url(),
text=issue.culprit,
color=issue.get_hex_color(),
fields=fields,
)
)
sections.append(
MSTeamsSection(
activityTitle=str(issue),
activitySubtitle=f"[View Issue {issue.short_id_display}]({issue.get_detail_url()})",
)
)
message = "GlitchTip Alert"
if issue_count > 1:
message += f" ({issue_count} issues)"
return send_webhook(url, message, attachments, sections)
def send_webhook_notification(notification: "Notification", url: str):
issue_count = notification.issues.count()
issues = notification.issues.all()[:3] # Show no more than three
send_issue_as_webhook(url, issues, issue_count)
| 27.631148
| 118
| 0.602492
|
5ab37bfba31cca4a20ba456e47ff3749cf973674
| 3,229
|
py
|
Python
|
examples/basic_operations/update_expanded_text_ad.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
examples/basic_operations/update_expanded_text_ad.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
examples/basic_operations/update_expanded_text_ad.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates an expanded text ad.
To get expanded text ads, run get_expanded_text_ads.py.
"""
import argparse
import sys
import uuid
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.errors import GoogleAdsException
from google.api_core import protobuf_helpers
# [START update_expanded_text_ad]
def main(client, customer_id, ad_id):
ad_service = client.get_service("AdService", version="v6")
ad_operation = client.get_type("AdOperation", version="v6")
# Update ad operation.
ad = ad_operation.update
ad.resource_name = ad_service.ad_path(customer_id, ad_id)
ad.expanded_text_ad.headline_part1 = (
f"Cruise to Pluto {str(uuid.uuid4())[:8]}"
)
ad.expanded_text_ad.headline_part2 = "Tickets on sale now"
ad.expanded_text_ad.description = "Best space cruise ever."
ad.final_urls.append("http://www.example.com")
ad.final_mobile_urls.append("http://www.example.com/mobile")
fm = protobuf_helpers.field_mask(None, ad)
ad_operation.update_mask.CopyFrom(fm)
# Updates the ad.
try:
ad_response = ad_service.mutate_ads(customer_id, [ad_operation])
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
print(
f'Ad with resource name "{ad_response.results[0].resource_name}" '
"was updated."
)
# [END update_expanded_text_ad]
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = GoogleAdsClient.load_from_storage()
parser = argparse.ArgumentParser(
description=(
"Updates the specified expanded text ad, "
"for the given customer ID."
)
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-i", "--ad_id", type=str, required=True, help="The ad ID."
)
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.ad_id)
| 33.28866
| 77
| 0.683493
|
addb316947f31f9ea20843e3b293d324a45781e5
| 1,347
|
py
|
Python
|
college-predictor/data_prep.py
|
paulliwali/basketball-stats
|
d73a12eb7944665225d5ca2a3b41be8856c08114
|
[
"MIT"
] | null | null | null |
college-predictor/data_prep.py
|
paulliwali/basketball-stats
|
d73a12eb7944665225d5ca2a3b41be8856c08114
|
[
"MIT"
] | 1
|
2019-11-28T16:54:17.000Z
|
2019-11-28T16:54:17.000Z
|
college-predictor/data_prep.py
|
paulliwali/Basketball-Stats
|
d73a12eb7944665225d5ca2a3b41be8856c08114
|
[
"MIT"
] | null | null | null |
import pandas
import scipy
import numpy
import csv
import os
import sklearn.preprocessing
def rescaleData(X, Y):
# Rescale the input data to a range between 0 to 1
scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0,1))
rescaledX = scaler.fit_transform(X)
return rescaledX
def stardardizeData(X, Y):
# Stardardize the input data to a Gaussian distribution with mean of 0
# and std.dev of 1
scaler = sklearn.preprocessing.StandardScaler().fit(X)
standardX = scaler.transform(X)
return standardX
def commentHighSchoolPlayers(datafile):
# Run through the input datafiles and comment lines where players have 0s in all the
# statistical categories
dir = os.path.dirname(__file__)
datafilepath = os.path.join(dir, 'data', datafile)
newdatafilepath = os.path.join(dir, 'data', 'working', datafile)
r = csv.reader(open(datafilepath, 'r'))
lines = list(r)
for rows in lines:
if rows.count('0') > 10:
rows[0] = "#" + str(rows[0])
elif "Did Not Attend College" in rows:
rows[0] = '#' + str(rows[0])
elif "College Not Found" in rows:
rows[0] = '#' + str(rows[0])
else:
next
print(rows)
writer = csv.writer(open(newdatafilepath, 'w', newline=''))
writer.writerows(lines)
return
| 31.325581
| 88
| 0.650334
|
edb94dcd99f9ec9d34c1b2511b6ba708b5924519
| 10,701
|
py
|
Python
|
functions/framework.py
|
aswad32/fbscrapper
|
3c5a198c4b4a9ed5d52b9fcf9d71ecb74fc58ad0
|
[
"Apache-2.0"
] | 1
|
2020-02-02T18:05:03.000Z
|
2020-02-02T18:05:03.000Z
|
functions/framework.py
|
aswad32/fbscrapper
|
3c5a198c4b4a9ed5d52b9fcf9d71ecb74fc58ad0
|
[
"Apache-2.0"
] | null | null | null |
functions/framework.py
|
aswad32/fbscrapper
|
3c5a198c4b4a9ed5d52b9fcf9d71ecb74fc58ad0
|
[
"Apache-2.0"
] | null | null | null |
from bs4 import BeautifulSoup
from urlparse import parse_qs, urlparse
import moment
import database
import utility
img_skip_regex = ['-PAXP-deijE.gif', 'p32x32', '-pz5JhcNQ9P.png']
'''
Params:
@based_postid: not applicable for search query result and can be leave as '' string
@content: based content element for each post
@validateExists: in some case you just to ignore validation you can set it to False
@postToGroupMethod: 1
@query_string: Search keyword
'''
def loadPost(based_postid, content, validateExists, postToGroupMethod, query_string):
profile_element = content.find('a', { 'class': '_5pb8' })
# in case of scraping personal account feed, profile element are missing on post
# skip it if occurs
if profile_element is None:
return 404
fbid_url = profile_element['data-hovercard']
fbid = utility.getFbId(fbid_url)
profile_link = profile_element['href']
profile_picture = profile_element.find('img')['src']
profile_name_element = content.find('span', { 'class': 'fwb'})
profile_name = profile_name_element.find('a').getText().encode('utf-8')
link_date_post_element = content.find('span', { 'class': 'fsm' })
post_link = link_date_post_element.find('a')['href']
#in case of user sharing image or videos, usually there are two postid,
#we need to use the postid being capture originally by the system rather than the postid for image / video
#to get the number of shares and likes
if not based_postid == '':
post_id = based_postid
else:
post_id = utility.getPostId(post_link)
print post_id
if validateExists:
# make sure skip the rest of the process if the data already existed
# if more than 10 data existed on straight loop break the loop end the process
if database.postExists(post_id):
return None
date_post = link_date_post_element.find('abbr')['data-utime']
if not date_post == "":
post_created_time = moment.unix(float(date_post), utc=True).date
else:
post_created_time = ""
posted_to_name = ""
posted_to_link = ""
if postToGroupMethod == 1:
posted_to_element = profile_name_element.find('a', { 'class': '_wpv'})
posted_to_name, posted_to_link = loadPostToGroupLayoutNormal(posted_to_element)
elif postToGroupMethod == 2:
posted_to_main_element = content.find('span', { 'class': 'fcg'})
posted_to_name, posted_to_link = loadPostToGroupLayoutShared(posted_to_main_element)
user_content = content.find('div', { 'class': 'userContent' })
if user_content is None:
return 404
status_message = user_content.find_all('p')
post_message = ""
if not status_message is None:
for msg in status_message:
post_message += msg.getText().encode('ascii', 'ignore')
# facebook will hide extra message and it's located inside div(class='text_exposed_show')
text_exposed = user_content.find('div', { 'class': 'text_exposed_show'})
if not text_exposed is None:
status_message_extra = text_exposed.find_all('p')
if not status_message_extra is None:
for msg in status_message_extra:
post_message += msg.getText().encode('ascii', 'ignore')
post_message = post_message.replace('...', '')
# retrieve facebook video post
video = {
"direct_url": "",
"embed_url": ""
}
if 'videos' in post_link:
post_video_direct_url, post_video_embed_url = utility.buildEmbedVideoUrl(post_id)
video = {
"direct_url": post_video_direct_url,
"embed_url": post_video_embed_url
}
# Start loading attachment,
# Here we scrap information about:
# 1. A post that sharing another profile post message
# 2. A post that sharing another profile post message with external link
# 3. A post that sharing external link
based_attachment_element = content.find('div', { 'class': '_3x-2'})
post_personal_attach, post_img_attachment = loadAttachment(based_attachment_element)
textContent = post_message + ' ' + post_personal_attach['message'] + ' ' + post_personal_attach['attachment']['description']
hashtag = utility.buildHashtag(textContent)
user_post = {
"fbid": str(fbid),
"profile_name": str(profile_name),
"profile_link": str(profile_link),
"profile_picture": str(profile_picture),
"postid": str(post_id),
"link": str(post_link),
"posted_to": {
"name": str(posted_to_name),
"link": str(posted_to_link)
},
"created_time": post_created_time,
"message": post_message,
"attachment_shared": post_personal_attach,
"attachment_img": post_img_attachment,
"attachment_video": video,
"connections": {
"likes": {
"count": 0,
"profiles": []
},
"shares": {
"count": 0,
"profiles": []
},
"comments": {
"count": 0,
"story": []
}
},
"query_string": query_string,
"hashtag": hashtag
}
return user_post
def loadPostToGroupLayoutShared(element):
posted_to_name = ""
posted_to_link = ""
if not element is None:
posted_to_element = element.find_all('a', { 'class': 'profileLink'})
posted_to_element_text = element.getText().encode('ascii', 'ignore')
# this is hackaround to find out either this is
# a facebook post to a group or not
if 'group' in posted_to_element_text:
if len(posted_to_element) == 2:
posted_to_name = posted_to_element[1].getText().encode('ascii', 'ignore')
posted_to_link = posted_to_element[1]['href']
return posted_to_name, posted_to_link
def loadPostToGroupLayoutNormal(element):
posted_to_name = ""
posted_to_link = ""
if not element is None:
posted_to_name = element.getText().encode('utf-8')
posted_to_link = element['href']
return posted_to_name, posted_to_link
def loadAttachment(based_attachment_element):
if not based_attachment_element is None:
based_element = based_attachment_element.find('div', {'class': '_5r69'})
if not based_element is None:
owner_element = based_element.find('span', { 'class': 'fwb'})
if not owner_element is None:
owner_name = owner_element.getText().encode('ascii', 'ignore').strip()
owner_account = (owner_element.find('a')['href']).encode('ascii', 'ignore').strip()
owner_fbid_url = owner_element.find('a')['data-hovercard']
owner_fbid = utility.getFbId(owner_fbid_url)
else:
owner_name = ""
owner_account = ""
owner_fbid = ""
time_element = based_element.find('div', {'class': '_5pcp'})
if not time_element is None:
time_post = str(time_element.find('abbr')['data-utime'])
#update time epoch to utc datetime
created_time = moment.unix(float(time_post), utc=True).date
else:
created_time = ""
#post_link_element
post_link_element = based_element.find('a', {'class': '_5pcq'})
if not post_link_element is None:
share_post_link = post_link_element['href'].encode('ascii', 'ignore').strip()
share_post_id = utility.getPostId(share_post_link)
else:
share_post_link = ""
share_post_id = ""
post_element = based_element.find('div', { 'class': '_5pco' })
if not post_element is None:
post_element_paragraph = post_element.find_all('p')
posts = ""
for msg in post_element_paragraph:
posts += msg.getText().encode('ascii', 'ignore').strip()
# facebook will hide extra message and it's located inside div(class='text_exposed_show')
text_exposed = based_element.find('div', { 'class': 'text_exposed_show'})
if not text_exposed is None:
posts_extra = text_exposed.find_all('p')
for msg in posts_extra:
posts += msg.getText().encode('ascii', 'ignore').strip()
posts = posts.replace('...', '')
else:
posts = ""
# retrieve facebook video post
video = {
"direct_url": "",
"embed_url": ""
}
if 'videos' in share_post_link:
print "share post id", share_post_id
post_video_direct_url, post_video_embed_url = utility.buildEmbedVideoUrl(share_post_id)
video = {
"direct_url": post_video_direct_url,
"embed_url": post_video_embed_url
}
# retrieve attachment link
external_attachment = externalAttachment(based_element)
post_personal_attach = {
"owner" : {
"fbid": str(owner_fbid),
"name": str(owner_name),
"account": str(owner_account)
},
"postid": str(share_post_id),
"created_time": created_time,
"message": str(posts),
"link": str(share_post_link),
"attachment_video": video,
"attachment": external_attachment
}
else:
#in case the post only contains external web link attachment
external_attachment = externalAttachment(based_attachment_element)
post_personal_attach = {
"owner" : {
"fbid": "",
"name": "",
"account": ""
},
"created_time": "",
"message": "",
"link": "",
"postid": "",
"attachment_video": {
"direct_url": "",
"embed_url": ""
},
"attachment": external_attachment
}
#get the share picture
post_img_element = based_attachment_element.find_all('img', {'class': 'img'})
post_img_attachment = imgAttachment(post_img_element)
return post_personal_attach, post_img_attachment
def externalAttachment(basedElement):
attachment_title = ""
attachment_link = ""
attachment_desc = ""
attachment_source = ""
externalElement = basedElement.find('div', { 'class': '_6m3' })
if not externalElement is None:
title = externalElement.find('div', { 'class': '_6m6'})
attachment_title = title.getText().encode('ascii', 'ignore').strip()
attachment_link_element = externalElement.findNext('a', {'class': '_52c6'})
if not attachment_link_element is None:
attachment_link = attachment_link_element['href'].encode('ascii', 'ignore').strip()
attachment_desc_element = externalElement.find('div', {'class': '_6m7'})
if not attachment_desc_element is None:
attachment_desc = attachment_desc_element.getText().encode('ascii', 'ignore').strip()
attachment_source = externalElement.find('div', {'class': '_59tj'}).getText().encode('ascii', 'ignore').strip()
return {
"title": str(attachment_title) ,
"description": str(attachment_desc),
"link": str(attachment_link) ,
"source": str(attachment_source)
}
def imgAttachment(imgElement):
post_img_attachment = []
if not imgElement is None:
for img in imgElement:
img_src = str(img['src'])
if not any(x in img_src for x in img_skip_regex):
post_img_attachment.append(str(img['src']))
return post_img_attachment
'''
Check the layout for type of account that do the post
permalinkPost = pages
stream_pagelet = personal
pagelet_group_mall = group
'''
def checkLayoutAccount(based):
postElement = None
element = [
{'selector': 'class', 'name': 'permalinkPost'},
{'selector': 'id', 'name': 'stream_pagelet'},
{'selector': 'id', 'name':'pagelet_group_mall'}
]
for el in element:
postElement = based.find('div', { el['selector'] : el['name'] })
if not postElement is None:
break;
return postElement
| 30.838617
| 125
| 0.704514
|
fb2b18842dfb1e82da76bdada64b0e997ac999e3
| 10,393
|
py
|
Python
|
adafruit_dht.py
|
michaellass/Adafruit_CircuitPython_DHT
|
35cc6b451822e3a0f77f21a6f109cb843d660887
|
[
"MIT"
] | 73
|
2020-05-02T13:48:27.000Z
|
2022-03-26T13:15:10.000Z
|
adafruit_dht.py
|
michaellass/Adafruit_CircuitPython_DHT
|
35cc6b451822e3a0f77f21a6f109cb843d660887
|
[
"MIT"
] | null | null | null |
adafruit_dht.py
|
michaellass/Adafruit_CircuitPython_DHT
|
35cc6b451822e3a0f77f21a6f109cb843d660887
|
[
"MIT"
] | 50
|
2020-05-15T13:57:28.000Z
|
2022-03-30T14:03:33.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2017 Mike McWethy for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
:mod:`adafruit_dhtlib`
======================
CircuitPython support for the DHT11 and DHT22 temperature and humidity devices.
* Author(s): Mike McWethy
"""
import array
import time
from digitalio import DigitalInOut, Pull, Direction
_USE_PULSEIO = False
try:
from pulseio import PulseIn
_USE_PULSEIO = True
except ImportError:
pass # This is OK, we'll try to bitbang it!
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_DHT.git"
class DHTBase:
""" base support for DHT11 and DHT22 devices
"""
__hiLevel = 51
def __init__(self, dht11, pin, trig_wait):
"""
:param boolean dht11: True if device is DHT11, otherwise DHT22.
:param ~board.Pin pin: digital pin used for communication
:param int trig_wait: length of time to hold trigger in LOW state (microseconds)
"""
self._dht11 = dht11
self._pin = pin
self._trig_wait = trig_wait
self._last_called = 0
self._humidity = None
self._temperature = None
# We don't use a context because linux-based systems are sluggish
# and we're better off having a running process
if _USE_PULSEIO:
self.pulse_in = PulseIn(self._pin, 81, True)
def _pulses_to_binary(self, pulses, start, stop):
"""Takes pulses, a list of transition times, and converts
them to a 1's or 0's. The pulses array contains the transition times.
pulses starts with a low transition time followed by a high transistion time.
then a low followed by a high and so on. The low transition times are
ignored. Only the high transition times are used. If the high
transition time is greater than __hiLevel, that counts as a bit=1, if the
high transition time is less that __hiLevel, that counts as a bit=0.
start is the starting index in pulses to start converting
stop is the index to convert upto but not including
Returns an integer containing the converted 1 and 0 bits
"""
binary = 0
hi_sig = False
for bit_inx in range(start, stop):
if hi_sig:
bit = 0
if pulses[bit_inx] > self.__hiLevel:
bit = 1
binary = binary << 1 | bit
hi_sig = not hi_sig
return binary
def _get_pulses_pulseio(self):
""" _get_pulses implements the communication protcol for
DHT11 and DHT22 type devices. It sends a start signal
of a specific length and listens and measures the
return signal lengths.
return pulses (array.array uint16) contains alternating high and low
transition times starting with a low transition time. Normally
pulses will have 81 elements for the DHT11/22 type devices.
"""
pulses = array.array("H")
if _USE_PULSEIO:
# The DHT type device use a specialize 1-wire protocol
# The microprocessor first sends a LOW signal for a
# specific length of time. Then the device sends back a
# series HIGH and LOW signals. The length the HIGH signals
# represents the device values.
self.pulse_in.pause()
self.pulse_in.clear()
self.pulse_in.resume(self._trig_wait)
# loop until we get the return pulse we need or
# time out after 1/4 second
time.sleep(0.25)
self.pulse_in.pause()
while self.pulse_in:
pulses.append(self.pulse_in.popleft())
return pulses
def _get_pulses_bitbang(self):
""" _get_pulses implements the communication protcol for
DHT11 and DHT22 type devices. It sends a start signal
of a specific length and listens and measures the
return signal lengths.
return pulses (array.array uint16) contains alternating high and low
transition times starting with a low transition time. Normally
pulses will have 81 elements for the DHT11/22 type devices.
"""
pulses = array.array("H")
with DigitalInOut(self._pin) as dhtpin:
# we will bitbang if no pulsein capability
transitions = []
# Signal by setting pin high, then low, and releasing
dhtpin.direction = Direction.OUTPUT
dhtpin.value = True
time.sleep(0.1)
dhtpin.value = False
time.sleep(0.001)
timestamp = time.monotonic() # take timestamp
dhtval = True # start with dht pin true because its pulled up
dhtpin.direction = Direction.INPUT
dhtpin.pull = Pull.UP
while time.monotonic() - timestamp < 0.25:
if dhtval != dhtpin.value:
dhtval = not dhtval # we toggled
transitions.append(time.monotonic()) # save the timestamp
# convert transtions to microsecond delta pulses:
# use last 81 pulses
transition_start = max(1, len(transitions) - 81)
for i in range(transition_start, len(transitions)):
pulses_micro_sec = int(1000000 * (transitions[i] - transitions[i - 1]))
pulses.append(min(pulses_micro_sec, 65535))
return pulses
def measure(self):
""" measure runs the communications to the DHT11/22 type device.
if successful, the class properties temperature and humidity will
return the reading returned from the device.
Raises RuntimeError exception for checksum failure and for insuffcient
data returned from the device (try again)
"""
delay_between_readings = 2 # 2 seconds per read according to datasheet
# Initiate new reading if this is the first call or if sufficient delay
# If delay not sufficient - return previous reading.
# This allows back to back access for temperature and humidity for same reading
if (
self._last_called == 0
or (time.monotonic() - self._last_called) > delay_between_readings
):
self._last_called = time.monotonic()
new_temperature = 0
new_humidity = 0
if _USE_PULSEIO:
pulses = self._get_pulses_pulseio()
else:
pulses = self._get_pulses_bitbang()
# print(len(pulses), "pulses:", [x for x in pulses])
if len(pulses) < 10:
# Probably a connection issue!
raise RuntimeError("DHT sensor not found, check wiring")
if len(pulses) < 80:
# We got *some* data just not 81 bits
raise RuntimeError("A full buffer was not returned. Try again.")
buf = array.array("B")
for byte_start in range(0, 80, 16):
buf.append(self._pulses_to_binary(pulses, byte_start, byte_start + 16))
if self._dht11:
# humidity is 1 byte
new_humidity = buf[0]
# temperature is 1 byte
new_temperature = buf[2]
else:
# humidity is 2 bytes
new_humidity = ((buf[0] << 8) | buf[1]) / 10
# temperature is 2 bytes
# MSB is sign, bits 0-14 are magnitude)
new_temperature = (((buf[2] & 0x7F) << 8) | buf[3]) / 10
# set sign
if buf[2] & 0x80:
new_temperature = -new_temperature
# calc checksum
chk_sum = 0
for b in buf[0:4]:
chk_sum += b
# checksum is the last byte
if chk_sum & 0xFF != buf[4]:
# check sum failed to validate
raise RuntimeError("Checksum did not validate. Try again.")
if new_humidity < 0 or new_humidity > 100:
# We received unplausible data
raise RuntimeError("Received unplausible data. Try again.")
self._temperature = new_temperature
self._humidity = new_humidity
@property
def temperature(self):
""" temperature current reading. It makes sure a reading is available
Raises RuntimeError exception for checksum failure and for insuffcient
data returned from the device (try again)
"""
self.measure()
return self._temperature
@property
def humidity(self):
""" humidity current reading. It makes sure a reading is available
Raises RuntimeError exception for checksum failure and for insuffcient
data returned from the device (try again)
"""
self.measure()
return self._humidity
class DHT11(DHTBase):
""" Support for DHT11 device.
:param ~board.Pin pin: digital pin used for communication
"""
def __init__(self, pin):
super().__init__(True, pin, 18000)
class DHT22(DHTBase):
""" Support for DHT22 device.
:param ~board.Pin pin: digital pin used for communication
"""
def __init__(self, pin):
super().__init__(False, pin, 1000)
| 37.930657
| 88
| 0.617531
|
b6cced92ce364ebca69881dafc073db4f18cce7f
| 538
|
py
|
Python
|
setup.py
|
stephleighton/placentagen
|
968685955fce7ca5503a85713113f62c1e5c74a4
|
[
"Apache-2.0"
] | 2
|
2018-04-04T21:53:45.000Z
|
2019-05-28T22:13:26.000Z
|
setup.py
|
stephleighton/placentagen
|
968685955fce7ca5503a85713113f62c1e5c74a4
|
[
"Apache-2.0"
] | 31
|
2018-03-14T01:43:19.000Z
|
2020-07-23T21:23:27.000Z
|
setup.py
|
stephleighton/placentagen
|
968685955fce7ca5503a85713113f62c1e5c74a4
|
[
"Apache-2.0"
] | 6
|
2018-04-29T23:42:48.000Z
|
2021-09-14T01:33:53.000Z
|
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='placentagen',
version='0.1.0',
packages=find_packages('source', exclude=['tests', 'tests.*', 'docs']),
package_dir={'': 'source'},
url='https://github.com/alysclark/placentagen.git',
license=license,
author='Alys Clark',
author_email='alys.clark@auckland.ac.nz',
test_suite='nose.collector',
tests_require=['nose'],
description=''
)
| 24.454545
| 75
| 0.643123
|
04709cefc48ed91ca43a65e304bf53e03fc7d435
| 4,486
|
py
|
Python
|
face_lib/detection.py
|
cyy0523xc/face_lib
|
cc873b8ca466e06424c36e4712e8cae80d9f11c9
|
[
"MIT"
] | null | null | null |
face_lib/detection.py
|
cyy0523xc/face_lib
|
cc873b8ca466e06424c36e4712e8cae80d9f11c9
|
[
"MIT"
] | null | null | null |
face_lib/detection.py
|
cyy0523xc/face_lib
|
cc873b8ca466e06424c36e4712e8cae80d9f11c9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# face detetion
# Author: alex
# Created Time: 2018年12月09日 星期日 11时19分47秒
import numpy as np
import cv2
from cv2 import dnn
import dlib
from .resource import predictor_5_point_model_location, \
predictor_68_point_model_location, \
cnn_face_detector_model_location, \
face_recognition_model_location, \
dnn_prototxt_location, dnn_caffemodel_location, \
haarcascade_frontalface_location
class conf:
"""config for dnn"""
in_width = 300
in_height = 300
threshold = 0.6 # 置信度阀值
# support algo
hog_detector = dlib.get_frontal_face_detector()
haar_detector = cv2.CascadeClassifier(haarcascade_frontalface_location())
cnn_detector = None
dnn_detector = None
#
predictor = dlib.shape_predictor(predictor_68_point_model_location())
face_encoder = dlib.face_recognition_model_v1(face_recognition_model_location())
def set_threshold(threshold):
conf.threshold = threshold
def set_cnn_model(model_path: str = None):
global cnn_detector
if model_path == None:
model_path = cnn_face_detector_model_location()
cnn_detector = dlib.cnn_face_detection_model_v1(model_path)
def set_dnn_model(model_path: str = None, prototxt_path: str = None):
global dnn_detector
if model_path is None:
model_path = dnn_caffemodel_location()
prototxt_path = dnn_prototxt_location()
dnn_detector = dnn.readNetFromCaffe(prototxt_path, model_path)
def set_predictor(use_small=False):
global predictor
if use_small:
predictor = dlib.shape_predictor(predictor_5_point_model_location())
else:
predictor = dlib.shape_predictor(predictor_68_point_model_location())
def detect(img, model='dnn', number_of_times_to_upsample=1):
"""face detection, 人脸检测
Args:
img: 图片对象
model: 支持的识别算法: hog, cnn, haar, dnn
Returns:
[(left, top, right, bottom)]: 每个人脸的左上角坐标和右下角坐标
[confidence]: 每个人脸的置信度,注意该参数对于cnn和dnn才有意义
see: https://www.learnopencv.com/face-detection-opencv-dlib-and-deep-learning-c-python/
"""
if model == 'hog':
rects = hog_detector(img, number_of_times_to_upsample)
rects = [(r.left(), r.top(), r.right(), r.bottom())
for r in rects]
return rects, [1]*len(rects)
elif model == 'cnn':
if cnn_detector is None:
set_cnn_model()
tmp_rects = cnn_detector(img, number_of_times_to_upsample)
rects = [(r.rect.left(), r.rect.top(), r.rect.right(), r.rect.bottom())
for r in tmp_rects]
confidences = [r.confidence for r in tmp_rects]
return rects, confidences
elif model == 'haar':
rects = haar_detector.detectMultiScale(img)
rects = [(x1, y1, x1+w, y1+h) for x1, y1, w, h in rects]
return rects, [1]*len(rects)
# 默认使用dnn
if dnn_detector is None:
set_dnn_model()
cols = img.shape[1]
rows = img.shape[0]
blob = dnn.blobFromImage(img, 1.0, (conf.in_width, conf.in_height),
(104, 177, 123), False, False)
rects, confidences = [], []
dnn_detector.setInput(blob)
detections = dnn_detector.forward()
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence < conf.threshold:
continue
left = int(detections[0, 0, i, 3] * cols)
top = int(detections[0, 0, i, 4] * rows)
right = int(detections[0, 0, i, 5] * cols)
bottom = int(detections[0, 0, i, 6] * rows)
rects.append((left, top, right, bottom))
confidences.append(confidence)
return rects, confidences
def landmarks(img, rects):
rects = [format_dlib_rect(rect) for rect in rects]
return [predictor(img, rect) for rect in rects]
def encode(img, rects, num_jitters=1):
rects = [format_dlib_rect(rect) for rect in rects]
landmarks = [predictor(img, rect) for rect in rects]
return [np.array(face_encoder.compute_face_descriptor(img, landmark, num_jitters))
for landmark in landmarks]
def distance(face_encodings, face_to_compare):
if len(face_encodings) == 0:
return np.empty((0))
return np.linalg.norm(face_encodings - face_to_compare, axis=1)
def format_dlib_rect(rect):
left, top, right, bottom = rect
return dlib.rectangle(left, top, right, bottom)
def format_out_rect(rects):
return [(min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2))
for x1, y1, x2, y2 in rects]
| 30.937931
| 91
| 0.669193
|
92e236b451053aefc37bae946dc56093dca0280d
| 1,195
|
py
|
Python
|
genomics_geek/messages/migrations/0001_initial.py
|
genomics-geek/genomics-geek.com
|
ba24be4a0e3d569859a5378d4e7054d58c88728e
|
[
"MIT"
] | null | null | null |
genomics_geek/messages/migrations/0001_initial.py
|
genomics-geek/genomics-geek.com
|
ba24be4a0e3d569859a5378d4e7054d58c88728e
|
[
"MIT"
] | null | null | null |
genomics_geek/messages/migrations/0001_initial.py
|
genomics-geek/genomics-geek.com
|
ba24be4a0e3d569859a5378d4e7054d58c88728e
|
[
"MIT"
] | 1
|
2019-05-16T03:54:21.000Z
|
2019-05-16T03:54:21.000Z
|
# Generated by Django 2.0.8 on 2018-09-16 16:00
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=255)),
('text', models.TextField()),
('read', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Message',
'verbose_name_plural': 'Messages',
},
),
]
| 35.147059
| 147
| 0.595816
|
f222f9caced948d34bc17cfda16b2bf7c0f65d31
| 6,485
|
py
|
Python
|
predict.py
|
saileshpuranam/DSND----Image-Classifier-Deep-Learning
|
ea472161670a1a2586f55be4f766be9b287ee761
|
[
"FTL"
] | null | null | null |
predict.py
|
saileshpuranam/DSND----Image-Classifier-Deep-Learning
|
ea472161670a1a2586f55be4f766be9b287ee761
|
[
"FTL"
] | null | null | null |
predict.py
|
saileshpuranam/DSND----Image-Classifier-Deep-Learning
|
ea472161670a1a2586f55be4f766be9b287ee761
|
[
"FTL"
] | null | null | null |
import argparse
import image_utils
import network_utils
import torch
from math import ceil
from train import check_gpu
from torchvision import models
import json
import numpy as np
import PIL
# Function arg_parser() parses keyword arguments from the command line
def arg_parser():
# Define a parser
parser = argparse.ArgumentParser(description="Neural Network Settings")
# Point towards image for prediction
parser.add_argument('--image',
type=str,
help='Point to impage file for prediction.',
required=True)
# Load checkpoint created by train.py
parser.add_argument('--checkpoint',
type=str,
help='Point to checkpoint file as str.',
required=True)
# Specify top-k
parser.add_argument('--top_k',
type=int,
help='Choose top K matches as int.')
# Import category names
parser.add_argument('--category_names',
type=str,
help='Mapping from categories to real names.')
# Add GPU Option to parser
parser.add_argument('--gpu',
action="store_true",
help='Use GPU + Cuda for calculations')
# Parse args
args = parser.parse_args()
return args
# Function load_checkpoint(checkpoint_path) loads our saved deep learning model from checkpoint
def load_checkpoint(checkpoint_path):
# Load the saved file
checkpoint = torch.load("my_checkpoint.pth")
# Load Defaults if none specified
if checkpoint['architecture'] == 'vgg16':
model = models.vgg16(pretrained=True)
model.name = "vgg16"
else:
exec("model = models.{}(pretrained=True)".checkpoint['architecture'])
model.name = checkpoint['architecture']
# Freeze parameters so we don't backprop through them
for param in model.parameters(): param.requires_grad = False
# Load stuff from checkpoint
model.class_to_idx = checkpoint['class_to_idx']
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
return model
# Function process_image(image_path) performs cropping, scaling of image for our model
def process_image(image_path):
test_image = PIL.Image.open(image_path)
# Get original dimensions
orig_width, orig_height = test_image.size
# Find shorter size and create settings to crop shortest side to 256
if orig_width < orig_height: resize_size=[256, 256**600]
else: resize_size=[256**600, 256]
test_image.thumbnail(size=resize_size)
# Find pixels to crop on to create 224x224 image
center = orig_width/4, orig_height/4
left, top, right, bottom = center[0]-(244/2), center[1]-(244/2), center[0]+(244/2), center[1]+(244/2)
test_image = test_image.crop((left, top, right, bottom))
# Converrt to numpy - 244x244 image w/ 3 channels (RGB)
np_image = np.array(test_image)/255 # Divided by 255 because imshow() expects integers (0:1)!!
# Normalize each color channel
normalise_means = [0.485, 0.456, 0.406]
normalise_std = [0.229, 0.224, 0.225]
np_image = (np_image-normalise_means)/normalise_std
# Set the color to the first channel
np_image = np_image.transpose(2, 0, 1)
return np_image
def predict(image_tensor, model, device, cat_to_name, top_k):
''' Predict the class (or classes) of an image using a trained deep learning model.
image_path: string. Path to image, directly to image and not to folder.
model: pytorch neural network.
top_k: integer. The top K classes to be calculated
outputs top k probabilities and corresponding top_labels
'''
# check top_k
if type(top_k) == type(None):
top_k = 5
print("Top K not specified, assuming K=5.")
# Set model to evaluate
model.to('cpu')
model.eval();
# Convert image from numpy to torch
torch_image = torch.from_numpy(np.expand_dims(image_tensor,
axis=0)).type(torch.FloatTensor)
# Find probabilities (results) by passing through the function (note the log softmax means that its on a log scale)
log_probs = model.forward(torch_image)
# Convert to linear scale
linear_probs = torch.exp(log_probs)
# Find the top 5 results
top_probs, top_labels = linear_probs.topk(top_k)
# Detatch all of the details
top_probs = np.array(top_probs.detach())[0] # This is not the correct way to do it but the correct way isnt working thanks to cpu/gpu issues so I don't care.
top_labels = np.array(top_labels.detach())[0]
# Convert to classes
idx_to_class = {val: key for key, val in
model.class_to_idx.items()}
top_labels = [idx_to_class[lab] for lab in top_labels]
top_flowers = [cat_to_name[lab] for lab in top_labels]
return top_probs, top_labels, top_flowers
def print_probability(probs, flowers):
"""
Converts two lists into a dictionary to print on screen
"""
for i, j in enumerate(zip(flowers, probs)):
print ("Rank {}:".format(i+1),
"Flower: {}, liklihood: {}%".format(j[1], ceil(j[0]*100)))
# =============================================================================
# Main Function
# =============================================================================
def main():
"""
Executing relevant functions
"""
# Get Keyword Args for Prediction
args = arg_parser()
# Load categories to names json file
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
# Load model trained with train.py
model = load_checkpoint(args.checkpoint)
# Process Image
image_tensor = process_image(args.image)
# Check for GPU
device = check_gpu(gpu_arg=args.gpu);
# Use `processed_image` to predict the top K most likely classes
top_probs, top_labels, top_flowers = predict(image_tensor, model,
device, cat_to_name,
args.top_k)
# Print out probabilities
print_probability(top_flowers, top_probs)
# Run Program
if __name__ == '__main__':
main()
| 32.752525
| 161
| 0.614649
|
6586eeee94092ce9f1feb1e8bc2282caaa308c7a
| 4,650
|
py
|
Python
|
anyway/widgets/urban_widgets/injured_accidents_with_pedestrians_widget.py
|
atalyaalon/anyway
|
0ddcd1d587de3bb65c528affcef5b6bd1dcaca71
|
[
"MIT"
] | null | null | null |
anyway/widgets/urban_widgets/injured_accidents_with_pedestrians_widget.py
|
atalyaalon/anyway
|
0ddcd1d587de3bb65c528affcef5b6bd1dcaca71
|
[
"MIT"
] | 78
|
2017-06-20T09:25:11.000Z
|
2021-08-01T05:48:08.000Z
|
anyway/widgets/urban_widgets/injured_accidents_with_pedestrians_widget.py
|
atalyaalon/anyway
|
0ddcd1d587de3bb65c528affcef5b6bd1dcaca71
|
[
"MIT"
] | null | null | null |
import logging
from typing import Dict
from sqlalchemy import func, or_
from sqlalchemy.sql.elements import and_
from flask_babel import _
from anyway.request_params import RequestParams
from anyway.app_and_db import db
from anyway.backend_constants import InjurySeverity, InjuredType
from anyway.widgets.widget_utils import add_empty_keys_to_gen_two_level_dict, gen_entity_labels
from anyway.models import NewsFlash, InvolvedMarkerView
from anyway.widgets.widget import register
from anyway.widgets.urban_widgets.urban_widget import UrbanWidget
@register
class InjuredAccidentsWithPedestriansWidget(UrbanWidget):
name: str = "injured_accidents_with_pedestrians"
def validate_parameters(self, yishuv_name, street1_hebrew):
# TODO: validate each parameter and display message accordingly
return (
yishuv_name is not None
and street1_hebrew is not None
and self.request_params.years_ago is not None
)
def convert_to_dict(self, query_results):
res = {}
for query_result in query_results:
if query_result.injury_severity not in res:
res[query_result.injury_severity] = {}
if query_result.accident_year not in res[query_result.injury_severity]:
res[query_result.injury_severity][query_result.accident_year] = 0
res[query_result.injury_severity][query_result.accident_year] += query_result.count
return res
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.rank = 18
self.information = "Injured and killed pedestrians by severity and year"
def generate_items(self) -> None:
try:
yishuv_name = self.request_params.location_info.get("yishuv_name")
street1_hebrew = self.request_params.location_info.get("street1_hebrew")
if not self.validate_parameters(yishuv_name, street1_hebrew):
logging.exception(
f"Could not validate parameters for {NewsFlash} : {self.request_params.news_flash_obj.id}"
)
return None
query = (
db.session.query(InvolvedMarkerView)
.with_entities(
InvolvedMarkerView.accident_year,
InvolvedMarkerView.injury_severity,
func.count().label("count"),
)
.filter(InvolvedMarkerView.accident_yishuv_name == yishuv_name)
.filter(
InvolvedMarkerView.injury_severity.in_(
[
InjurySeverity.KILLED.value,
InjurySeverity.SEVERE_INJURED.value,
InjurySeverity.LIGHT_INJURED.value,
]
)
)
.filter(InvolvedMarkerView.injured_type == InjuredType.PEDESTRIAN.value)
.filter(
or_(
InvolvedMarkerView.street1_hebrew == street1_hebrew,
InvolvedMarkerView.street2_hebrew == street1_hebrew,
)
)
.filter(
and_(
InvolvedMarkerView.accident_timestamp >= self.request_params.start_time,
InvolvedMarkerView.accident_timestamp <= self.request_params.end_time,
)
)
.group_by(InvolvedMarkerView.accident_year, InvolvedMarkerView.injury_severity)
)
self.items = add_empty_keys_to_gen_two_level_dict(
self.convert_to_dict(query.all()),
InjurySeverity.codes(),
list(
range(
self.request_params.start_time.year, self.request_params.end_time.year + 1
)
),
)
except Exception as e:
logging.error(f"InjuredAccidentsWithPedestriansWidget.generate_items(): {e}")
raise Exception(e)
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
items["data"]["text"] = {
"title": f"נפגעים הולכי רגל ב- {request_params.location_text}",
"labels": gen_entity_labels(InjurySeverity),
}
return items
# adding calls to _() for pybabel extraction
_("Injured and killed pedestrians by severity and year")
| 40.434783
| 111
| 0.595054
|
2ce649a0eb2d5b6cb6464fcd822bfae1b9ef666a
| 856
|
py
|
Python
|
utils/insertionsort.py
|
enlighter/algorithms
|
e8ecf918235933dd1bb384a5fa9ecc995c5bc73d
|
[
"MIT"
] | 1
|
2015-05-11T13:54:10.000Z
|
2015-05-11T13:54:10.000Z
|
utils/insertionsort.py
|
enlighter/algorithms
|
e8ecf918235933dd1bb384a5fa9ecc995c5bc73d
|
[
"MIT"
] | null | null | null |
utils/insertionsort.py
|
enlighter/algorithms
|
e8ecf918235933dd1bb384a5fa9ecc995c5bc73d
|
[
"MIT"
] | null | null | null |
from operator import lt as less_than_operator
from operator import gt as greater_than_operator
def insertion_sort(array: list, desc: bool = False):
n = len(array)
array = array.copy()
if desc:
comparison_operator = less_than_operator
else:
comparison_operator = greater_than_operator
# array[0:1] subsection is always sorted by default, so 0th iteration is redundant here
for i in range(1, n):
item = array[i]
j = i - 1
while j >= 0 and comparison_operator(array[j], item):
# print('Looping, j=', j)
array[j + 1] = array[j]
j = j - 1
# print(array)
array[j+1] = item
return array
if __name__ == '__main__':
test_list = [3, 6, 8, 24, 7, 11, 16]
sorted_list = insertion_sort(test_list, desc=True)
print(sorted_list)
| 27.612903
| 91
| 0.610981
|
583146f48d2c1ef1de943d6ec1f48af23a358ab5
| 4,229
|
py
|
Python
|
airflow/contrib/operators/jira_operator.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 1
|
2021-11-04T20:11:58.000Z
|
2021-11-04T20:11:58.000Z
|
airflow/contrib/operators/jira_operator.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 20
|
2017-04-18T19:47:46.000Z
|
2020-01-13T04:19:24.000Z
|
airflow/contrib/operators/jira_operator.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 2
|
2018-09-15T07:13:01.000Z
|
2021-03-26T07:27:38.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.jira_hook import JIRAError, JiraHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class JiraOperator(BaseOperator):
"""
JiraOperator to interact and perform action on Jira issue tracking system.
This operator is designed to use Jira Python SDK: http://jira.readthedocs.io
:param jira_conn_id: reference to a pre-defined Jira Connection
:type jira_conn_id: str
:param jira_method: method name from Jira Python SDK to be called
:type jira_method: str
:param jira_method_args: required method parameters for the jira_method. (templated)
:type jira_method_args: dict
:param result_processor: function to further process the response from Jira
:type result_processor: function
:param get_jira_resource_method: function or operator to get jira resource
on which the provided jira_method will be executed
:type get_jira_resource_method: function
"""
template_fields = ("jira_method_args",)
@apply_defaults
def __init__(self,
jira_conn_id='jira_default',
jira_method=None,
jira_method_args=None,
result_processor=None,
get_jira_resource_method=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.jira_conn_id = jira_conn_id
self.method_name = jira_method
self.jira_method_args = jira_method_args
self.result_processor = result_processor
self.get_jira_resource_method = get_jira_resource_method
def execute(self, context):
try:
if self.get_jira_resource_method is not None:
# if get_jira_resource_method is provided, jira_method will be executed on
# resource returned by executing the get_jira_resource_method.
# This makes all the provided methods of JIRA sdk accessible and usable
# directly at the JiraOperator without additional wrappers.
# ref: http://jira.readthedocs.io/en/latest/api.html
if isinstance(self.get_jira_resource_method, JiraOperator):
resource = self.get_jira_resource_method.execute(**context)
else:
resource = self.get_jira_resource_method(**context)
else:
# Default method execution is on the top level jira client resource
hook = JiraHook(jira_conn_id=self.jira_conn_id)
resource = hook.client
# Current Jira-Python SDK (1.0.7) has issue with pickling the jira response.
# ex: self.xcom_push(context, key='operator_response', value=jira_response)
# This could potentially throw error if jira_result is not picklable
jira_result = getattr(resource, self.method_name)(**self.jira_method_args)
if self.result_processor:
return self.result_processor(context, jira_result)
return jira_result
except JIRAError as jira_error:
raise AirflowException("Failed to execute jiraOperator, error: %s"
% str(jira_error))
except Exception as e:
raise AirflowException("Jira operator error: %s" % str(e))
| 44.989362
| 90
| 0.677938
|
324007e0012b6f3eba8dbc0445f0671c212d9283
| 957
|
py
|
Python
|
ci/travis/build-multinode-image.py
|
mickelliu/ray
|
9cec28d439aca3458d9207379447251aa7b97e4f
|
[
"Apache-2.0"
] | 2
|
2021-11-02T19:00:37.000Z
|
2022-01-22T01:33:01.000Z
|
ci/travis/build-multinode-image.py
|
mickelliu/ray
|
9cec28d439aca3458d9207379447251aa7b97e4f
|
[
"Apache-2.0"
] | null | null | null |
ci/travis/build-multinode-image.py
|
mickelliu/ray
|
9cec28d439aca3458d9207379447251aa7b97e4f
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import shutil
import subprocess
import tempfile
def build_multinode_image(source_image: str, target_image: str):
"""Build docker image from source_image.
This docker image will contain packages needed for the fake multinode
docker cluster to work.
"""
tempdir = tempfile.mkdtemp()
dockerfile = os.path.join(tempdir, "Dockerfile")
with open(dockerfile, "wt") as f:
f.write(f"FROM {source_image}\n")
f.write("RUN sudo apt update\n")
f.write("RUN sudo apt install -y openssh-server\n")
subprocess.check_output(
f"docker build -t {target_image} .", shell=True, cwd=tempdir)
shutil.rmtree(tempdir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("source_image", type=str)
parser.add_argument("target_image", type=str)
args = parser.parse_args()
build_multinode_image(args.source_image, args.target_image)
| 27.342857
| 73
| 0.703239
|
9ef57793a6f5e66651cc0bc4f01a3448c56dd3ae
| 2,518
|
py
|
Python
|
mysql/__init__.py
|
renxinqiang/SpiderComment
|
30df130e77a2dfca5d8fc219082386603f1f11ec
|
[
"MIT"
] | 1
|
2021-07-28T13:37:55.000Z
|
2021-07-28T13:37:55.000Z
|
mysql/__init__.py
|
renxinqiang/SpiderComment
|
30df130e77a2dfca5d8fc219082386603f1f11ec
|
[
"MIT"
] | null | null | null |
mysql/__init__.py
|
renxinqiang/SpiderComment
|
30df130e77a2dfca5d8fc219082386603f1f11ec
|
[
"MIT"
] | 1
|
2018-07-03T08:04:57.000Z
|
2018-07-03T08:04:57.000Z
|
#!/usr/local/bin/python3
import config
import log
import pymysql
class MySql:
__host = ''
__user = ''
__pass = ''
__db = ''
__port = ''
__charset = ''
__connect = ''
__cursor = ''
def __init__(self):
self.__host = config.MYSQL['host']
self.__user = config.MYSQL['user']
self.__pass = config.MYSQL['password']
self.__db = config.MYSQL['db']
self.__port = config.MYSQL['port']
self.__charset = config.MYSQL['charset']
self.__mysql_connect()
pass
# 链接
def __mysql_connect(self):
if not self.__connect:
try:
self.__connect = pymysql.connect(self.__host, self.__user, self.__pass, self.__db, charset = self.__charset)
self.__cursor = self.__connect.cursor()
except:
log.Log().write_log(__class__, 'Connect MySql Faild!!!')
pass
# 游标
def __exec(self, sql=''):
if not sql:
return False
try:
self.__cursor.execute(sql)
return self.__cursor
except:
log.Log().write_log(__class__, 'Cursor Sql Faild!!!')
# 查找所有
def find_all(self, sql=''):
if not sql:
return False
cursor = self.__exec(sql)
return cursor.fetchall()
# 查找一个
def find_one(self, sql=''):
if not sql:
return False
cursor = self.__exec(sql)
return cursor.fetchone()
# 查找几个
def find_mony(self, sql='', size=0):
if not sql or size is 0:
return False
cursor = self.__exec(sql)
return cursor.fetchmany(sql, size)
# 插入
def insert_sql(self,sql=''):
if not sql:
return False
self.__exec(sql)
self.__commit_sql()
return True
# 更新
def update_sql(self,sql=''):
if not sql:
return False
self.__exec(sql)
self.__commit_sql()
return True
# 删除操作
def delete_sql(self,sql=''):
if not sql:
return False
self.__exec(sql)
self.__commit_sql()
return True
# 执行语句
def __commit_sql(self):
try:
self.__connect.commit()
except:
self.__connect.rollback()
log.Log().write_log(__class__,'Commit Sql Faild!!!')
def __enter__(self):
return self
# 关闭
def close_connect(self):
self.__cursor.close()
self.__connect.close()
| 22.684685
| 124
| 0.533757
|
21f248a799930f423bafd6a552c83b59266ccee4
| 1,767
|
py
|
Python
|
google-datacatalog-rdbms-connector/setup.py
|
jason-h-35/datacatalog-connectors-rdbms
|
226f29f8d5568da6f588ffd09ba85a4daf73bc10
|
[
"Apache-2.0"
] | 46
|
2020-04-27T21:55:50.000Z
|
2022-02-06T04:34:06.000Z
|
google-datacatalog-rdbms-connector/setup.py
|
jason-h-35/datacatalog-connectors-rdbms
|
226f29f8d5568da6f588ffd09ba85a4daf73bc10
|
[
"Apache-2.0"
] | 45
|
2020-05-20T21:09:04.000Z
|
2022-03-24T00:14:30.000Z
|
google-datacatalog-rdbms-connector/setup.py
|
mesmacosta/datacatalog-connectors-rdbms
|
226f29f8d5568da6f588ffd09ba85a4daf73bc10
|
[
"Apache-2.0"
] | 47
|
2020-05-02T14:48:06.000Z
|
2022-03-28T22:12:22.000Z
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
release_status = 'Development Status :: 4 - Beta'
with open('README.md') as readme_file:
readme = readme_file.read()
setuptools.setup(
name='google-datacatalog-rdbms-connector',
version='0.12.1',
author='Google LLC',
description=
'Commons library for ingesting RDBMS metadata into Google Cloud Data Catalog',
packages=setuptools.find_packages(where='./src'),
namespace_packages=['google', 'google.datacatalog_connectors'],
package_dir={'': 'src'},
include_package_data=True,
install_requires=('pandas>=1.1.4,<1.2.0', 'gcsfs',
'google-datacatalog-connectors-commons>=0.6.0<0.7.0',
'schema'),
setup_requires=('pytest-runner'),
tests_require=('mock==3.0.5', 'pytest', 'pytest-cov',
'google-datacatalog-connectors-commons-test>=0.7.0<0.8.0'),
classifiers=[
release_status,
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
'Topic :: Internet',
],
long_description=readme,
long_description_content_type='text/markdown',
platforms='Posix; MacOS X; Windows',
)
| 35.34
| 82
| 0.679117
|
bb0ff5f54547ccdf63a205d4ee9f376634014c23
| 6,138
|
py
|
Python
|
tests/test_normal_compute_add_prob.py
|
broadinstitute/millipede
|
4b6a61027e559a6953fabee138b074afc4164489
|
[
"Apache-2.0"
] | 12
|
2021-11-09T17:19:32.000Z
|
2022-02-27T17:41:59.000Z
|
tests/test_normal_compute_add_prob.py
|
broadinstitute/millipede
|
4b6a61027e559a6953fabee138b074afc4164489
|
[
"Apache-2.0"
] | 1
|
2021-11-19T18:26:34.000Z
|
2021-11-29T18:12:21.000Z
|
tests/test_normal_compute_add_prob.py
|
broadinstitute/millipede
|
4b6a61027e559a6953fabee138b074afc4164489
|
[
"Apache-2.0"
] | 1
|
2022-02-28T15:18:53.000Z
|
2022-02-28T15:18:53.000Z
|
import math
from types import SimpleNamespace
import pytest
import torch
from common import assert_close
from torch import zeros
from millipede import NormalLikelihoodSampler
def get_sample(gamma, included_covariates, log_h_ratio):
P = len(gamma)
sample = SimpleNamespace(gamma=torch.tensor(gamma).bool(),
add_prob=zeros(P), _i_prob=zeros(P),
_idx=0, weight=0.0)
sample._active = torch.nonzero(sample.gamma).squeeze(-1)
if len(included_covariates) > 0:
sample._activeb = torch.cat([sample._active, torch.tensor(included_covariates)])
sample._log_h_ratio = log_h_ratio
return sample
def check_gammas(sampler, included_covariates, P, compute_log_factor_ratio):
# TEST GAMMA = 0 0 0
sample = get_sample([0] * P, included_covariates, sampler.log_h_ratio)
log_odds = sampler._compute_add_prob(sample)
for p in range(P):
assert_close(compute_log_factor_ratio([p], []), log_odds[p], atol=1.0e-7)
# TEST GAMMA = 1 0 0
sample = get_sample([1] + [0] * (P - 1), included_covariates, sampler.log_h_ratio)
log_odds = sampler._compute_add_prob(sample)
assert_close(compute_log_factor_ratio([0], []), log_odds[0], atol=1.0e-7)
for p in range(1, P):
assert_close(compute_log_factor_ratio([0, p], [0]), log_odds[p], atol=1.0e-7)
# TEST GAMMA = 1 1 0
sample = get_sample([1, 1] + [0] * (P - 2), included_covariates, sampler.log_h_ratio)
log_odds = sampler._compute_add_prob(sample)
assert_close(compute_log_factor_ratio([0, 1], [1]), log_odds[0], atol=1.0e-7)
assert_close(compute_log_factor_ratio([0, 1], [0]), log_odds[1], atol=1.0e-7)
for p in range(2, P):
assert_close(compute_log_factor_ratio([0, 1, p], [0, 1]), log_odds[p], atol=1.0e-7)
# TEST GAMMA = 1 1 1
sample = get_sample([1, 1, 1] + [0] * (P - 3), included_covariates, sampler.log_h_ratio)
log_odds = sampler._compute_add_prob(sample)
assert_close(compute_log_factor_ratio([0, 1, 2], [1, 2]), log_odds[0], atol=1.0e-7)
assert_close(compute_log_factor_ratio([0, 1, 2], [0, 2]), log_odds[1], atol=1.0e-7)
assert_close(compute_log_factor_ratio([0, 1, 2], [0, 1]), log_odds[2], atol=1.0e-7)
for p in range(3, P):
assert_close(compute_log_factor_ratio([0, 1, 2, p], [0, 1, 2]), log_odds[p], atol=1.0e-7)
@pytest.mark.parametrize("P", [4, 7])
@pytest.mark.parametrize("P_assumed", [0, 1, 2])
@pytest.mark.parametrize("precompute_XX", [False, True])
@pytest.mark.parametrize("include_intercept", [False, True])
def test_isotropic_compute_add_log_prob(P, P_assumed, precompute_XX, include_intercept,
N=11, tau=0.47, tau_intercept=0.11):
X = torch.randn(N, P).double()
X_assumed = torch.randn(N, P_assumed).double() if P_assumed > 0 else None
Y = X[:, 0] + 0.2 * torch.randn(N).double()
S = 1.0 if include_intercept else (torch.randn(P) / 100).exp().double() / P
sampler = NormalLikelihoodSampler(X, Y, X_assumed=X_assumed, S=S, c=0.0,
tau=tau, tau_intercept=tau_intercept, include_intercept=include_intercept,
precompute_XX=precompute_XX, prior="isotropic")
included_covariates = []
if P_assumed > 0:
X = torch.cat([X, X_assumed], dim=-1)
included_covariates = list(range(P, P + P_assumed))
if include_intercept:
included_covariates.append(P + P_assumed)
else:
if include_intercept:
included_covariates.append(P)
if include_intercept:
X = torch.cat([X, X.new_ones(X.size(0), 1)], dim=-1)
YY = sampler.YY
Z = sampler.Z
def compute_log_factor(ind):
ind.extend(included_covariates)
precision = tau * torch.eye(len(ind))
if include_intercept:
precision[-1, -1] = tau_intercept
F = torch.inverse(X[:, ind].t() @ X[:, ind] + precision)
ZFZ = (torch.mv(F, Z[ind]) * Z[ind]).sum(0)
return -0.5 * N * (YY - ZFZ).log() + 0.5 * F.logdet()
def compute_log_factor_ratio(ind1, ind0):
added_idx = list(set(ind1) - set(ind0))[0]
log_h_ratio = sampler.log_h_ratio[added_idx] if isinstance(sampler.log_h_ratio, torch.Tensor) \
else sampler.log_h_ratio
return compute_log_factor(ind1) - compute_log_factor(ind0) + log_h_ratio + 0.5 * math.log(tau)
check_gammas(sampler, included_covariates, P, compute_log_factor_ratio)
@pytest.mark.parametrize("P", [4, 7])
@pytest.mark.parametrize("P_assumed", [0, 1, 2])
@pytest.mark.parametrize("precompute_XX", [False, True])
@pytest.mark.parametrize("include_intercept", [True, False])
def test_gprior_compute_add_log_prob(P, P_assumed, precompute_XX, include_intercept, N=11):
X = torch.randn(N, P).double()
X_assumed = torch.randn(N, P_assumed).double() if P_assumed > 0 else None
Y = X[:, 0] + 0.2 * torch.randn(N).double()
sampler = NormalLikelihoodSampler(X, Y, X_assumed=X_assumed, S=1.0,
tau=0.0, c=0.73, include_intercept=include_intercept,
precompute_XX=precompute_XX, prior="gprior")
included_covariates = []
if P_assumed > 0:
X = torch.cat([X, X_assumed], dim=-1)
included_covariates = list(range(P, P + P_assumed))
if include_intercept:
included_covariates.append(P + P_assumed)
else:
if include_intercept:
included_covariates.append(P)
if include_intercept:
X = torch.cat([X, X.new_ones(X.size(0), 1)], dim=-1)
YY = sampler.YY
Z = sampler.Z
def compute_log_factor(ind):
ind.extend(included_covariates)
F = torch.inverse(X[:, ind].t() @ X[:, ind])
ZFZ = (torch.mv(F, Z[ind]) * Z[ind]).sum(0)
return -0.5 * N * (YY - sampler.c_one_c * ZFZ).log()
def compute_log_factor_ratio(ind1, ind0):
return compute_log_factor(ind1) - compute_log_factor(ind0) + sampler.log_h_ratio - sampler.log_one_c_sqrt
check_gammas(sampler, included_covariates, P, compute_log_factor_ratio)
| 42.041096
| 113
| 0.642229
|
442647ddbc4fe4b81c93f24276b50135d79f0b32
| 794
|
py
|
Python
|
examples/server/python/config.py
|
jeremykenedy/satellizer
|
6b0c6bde4eead23b8479bea3970bdbb08600dc07
|
[
"MIT"
] | null | null | null |
examples/server/python/config.py
|
jeremykenedy/satellizer
|
6b0c6bde4eead23b8479bea3970bdbb08600dc07
|
[
"MIT"
] | null | null | null |
examples/server/python/config.py
|
jeremykenedy/satellizer
|
6b0c6bde4eead23b8479bea3970bdbb08600dc07
|
[
"MIT"
] | null | null | null |
import os
DEBUG = True
TOKEN_SECRET = os.environ.get('SECRET_KEY') or 'hard to guess string''keyboard cat'
FACEBOOK_SECRET = os.environ.get('FACEBOOK_SECRET') or 'Facebook Client Secret'
FOURSQUARE_SECRET = os.environ.get('FOURSQUARE_SECRET') or 'Foursquare Client Secret'
GOOGLE_SECRET = os.environ.get('GOOGLE_SECRET') or 'Google Client Secret'
LINKEDIN_SECRET = os.environ.get('LINKEDIN_SECRET') or 'LinkedIn Client Secret'
TWITTER_CONSUMER_KEY = os.environ.get('TWITTER_CONSUMER_KEY') or 'Twitter Consumer Secret'
TWITTER_CONSUMER_SECRET = os.environ.get('TWITTER_CONSUMER_SECRET') or 'Twitter Consumer Secret'
TWITTER_CALLBACK_URL = os.environ.get('TWITTER_CALLBACK_URL') or 'http://localhost:3000'
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI') or 'sqlite:///app.db'
| 66.166667
| 96
| 0.799748
|
2a59b74197f64db3e9f069af49c52e8decab9d27
| 538
|
py
|
Python
|
.circleci/build_count.py
|
shubham-goel/pytorch3d
|
e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21
|
[
"BSD-3-Clause"
] | 6
|
2021-02-09T05:58:53.000Z
|
2021-11-01T03:28:40.000Z
|
.circleci/build_count.py
|
shubham-goel/pytorch3d
|
e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21
|
[
"BSD-3-Clause"
] | null | null | null |
.circleci/build_count.py
|
shubham-goel/pytorch3d
|
e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21
|
[
"BSD-3-Clause"
] | 2
|
2021-03-12T07:00:39.000Z
|
2021-04-12T09:47:36.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
Print the number of nightly builds
"""
from collections import Counter
import yaml
conf = yaml.safe_load(open("config.yml"))
jobs = conf["workflows"]["build_and_test"]["jobs"]
def jobtype(job):
if isinstance(job, str):
return job
if len(job) == 1:
[name] = job.keys()
return name
return "MULTIPLE PARTS"
for i, j in Counter(map(jobtype, jobs)).items():
print(i, j)
print()
print(len(jobs))
| 17.933333
| 71
| 0.64684
|
0ba1f88167566b39f5a9035a5a28f217e6de0ba9
| 23,332
|
py
|
Python
|
bth5/dataset.py
|
Quansight/bitemporal-h5
|
faa323e25521381e3d770f48aa089ede1c7406fe
|
[
"BSD-3-Clause"
] | 3
|
2019-09-13T18:41:09.000Z
|
2019-09-14T02:58:49.000Z
|
bth5/dataset.py
|
Quansight/bitemporal-h5
|
faa323e25521381e3d770f48aa089ede1c7406fe
|
[
"BSD-3-Clause"
] | 5
|
2019-09-05T14:21:59.000Z
|
2019-10-10T18:41:52.000Z
|
bth5/dataset.py
|
Quansight/bitemporal-h5
|
faa323e25521381e3d770f48aa089ede1c7406fe
|
[
"BSD-3-Clause"
] | 2
|
2020-02-11T18:52:58.000Z
|
2021-04-17T15:39:04.000Z
|
"""The main bitemporal data set interface"""
import datetime
import posixpath
from collections.abc import Iterable
import functools
import h5py
import numpy as np
import numba as nb
import json
class DatasetView(h5py.Dataset):
r"""
Views a ``h5py.Dataset`` as a dtype of your choice.
Examples
--------
>>> with h5py.File(temp_h5, 'w') as f:
... orig_dset = f['/'].create_dataset('example', shape=(), dtype=np.dtype('V8'))
... id = orig_dset.id
... dset = DatasetView(id, dtype='<M8[D]')
... dset[...] = np.datetime64("2019-09-18")
... orig_dset[...]
array(b'\xED\x46\x00\x00\x00\x00\x00\x00', dtype='|V8')
"""
def __init__(self, id, dtype=None):
super().__init__(id)
file_dtype = (
None
if "NUMPY_DTYPE" not in self.attrs
else _dtype_from_descr(json.loads(self.attrs["NUMPY_DTYPE"]))
)
if (
(file_dtype is not None)
and (dtype is not None)
and np.dtype(dtype) != file_dtype
):
raise ValueError("Dtype in file doesn't match specified dtype.")
elif ("NUMPY_DTYPE" not in self.attrs) and (dtype is None):
raise ValueError("dtype not specified and not in file.")
if file_dtype is not None:
dtype = file_dtype
else:
self.attrs["NUMPY_DTYPE"] = json.dumps(np.dtype(dtype).descr)
self._actual_dtype = np.dtype(dtype)
@property
def dtype(self):
return self._actual_dtype
def __getitem__(self, k):
if isinstance(k, str):
dt = np.dtype(self.dtype.fields[k][0])
return super().__getitem__(k).view(dt)
else:
return super().__getitem__(k).view(self.dtype)
def __setitem__(self, k, v):
if isinstance(k, str):
dt1 = np.dtype(self.dtype.fields[k][0])
dt2 = np.dtype(super().dtype.fields[k][0])
v = np.asarray(v, dtype=dt1)
super().__setitem__(k, v.view(dt2))
else:
v = np.asarray(v, dtype=self.dtype)
super().__setitem__(k, v.view(super().dtype))
def _dtype_from_descr(dtype):
if len(dtype) == 1 and dtype[0][0] == "":
dtype = np.dtype(dtype)
else:
dtype = np.lib.format.descr_to_dtype(dtype)
return dtype
@nb.jit(nopython=True, nogil=True)
def _argunique_last(keys):
"""
Deduplicates values w.r.t keys passed in.
Does the same as ``np.unique``, but keeping the last element instead of the
first, and returning the index.
Parameters
----------
values: numpy.ndarray
The ids to deduplicate.
keys: numpy.ndarray
The key with which to deduplicate.
Examples
--------
>>> keys = np.array([1, 2, 1], dtype=np.intp)
>>> _argunique_last(keys)
array([1, 2])
"""
a = {}
b = []
j = np.intp(0)
for i in range(len(keys)):
k = keys[i]
if k in a:
old_idx = a[k]
b[old_idx] = i
a[k] = j
else:
b.append(i)
a[k] = j
j += 1
ret = np.array(b, dtype=np.intp)
ret.sort()
return ret
def _wrap_deduplicate(f):
"""
Wraps a functions so it de-duplicates the data with respect to the valid times.
"""
@functools.wraps(f)
def wrapped(*a, **kw):
ret = f(*a, **kw)
if ret.ndim > 0:
# A view is okay here since we only care about the hash.
# Plus, Numba doesn't support datetime64 hashes.
dedup_ids = _argunique_last(ret["valid_time"].view(np.int64))
ret = ret[dedup_ids]
return ret
return wrapped
NAT = np.datetime64("nat")
TIME_DTYPE = np.dtype("<M8[us]")
TIDX_DTYPE = np.dtype(
[
("transaction_time", TIME_DTYPE),
("start_valid_time", TIME_DTYPE),
("end_valid_time", TIME_DTYPE),
("start_idx", "<u8"),
("end_idx", "<u8"),
]
)
def _ensure_groups(handle, path):
"""
Makes sure a path exists, returning the final group object.
Parameters
----------
handle : h5py.File
The file handle in which to ensure the group.
path : str
The group to ensure inside the file.
Examples
--------
>>> with h5py.File(temp_h5, 'w') as f:
... _ensure_groups(f, '/potato')
... '/potato' in f
<HDF5 group "/potato" (0 members)>
True
"""
# this assumes the path is an abspath
group = handle["/"] # the file handle is the root group.
heirarchy = path[1:].split("/")
for name in heirarchy:
if not name:
# handle double slashes, //
continue
elif name in group:
group = group[name]
else:
group = group.create_group(name)
return group
def _transform_dt(dt):
"""
Replaces all datetime64s inside a dtype with ``|V8``, an opaque
8-byte bitfield.
Parameters
----------
dt: np.dtype
The dtype to transform
Examples
--------
>>> _transform_dt(np.dtype('int8'))
dtype('int8')
>>> _transform_dt(np.dtype('<M8'))
dtype('V8')
>>> _transform_dt(np.dtype(('<M8', (5, 5))))
dtype(('V8', (5, 5)))
>>> _transform_dt(np.dtype([('a', '<M8'), ('b', 'int8')]))
dtype([('a', 'V8'), ('b', 'i1')])
"""
if dt.fields is not None:
dt_out = {"names": [], "formats": [], "offsets": []}
for field, dt_inner in dt.fields.items():
dt_out["names"].append(field)
dt_out["formats"].append(_transform_dt(dt_inner[0]))
dt_out["offsets"].append(dt_inner[1])
return np.dtype(dt_out)
if dt.subdtype is not None:
return np.dtype((_transform_dt(dt.subdtype[0]), dt.subdtype[1]))
return np.dtype("V8") if dt.kind in "Mm" else dt
def _check_index_dtype(k):
"""
Check the dtype of the index.
Parameters
----------
k: slice or array_like
Index into an array
Examples
--------
>>> _check_index_dtype(0)
dtype('int64')
>>> _check_index_dtype(np.datetime64(0, 'ms'))
dtype('<M8[ms]')
>>> _check_index_dtype(slice(5, 8))
dtype('int64')
"""
if not isinstance(k, slice):
if hasattr(k, "__len__") and len(k) == 0:
return np.intp
return np.asarray(k).dtype
arr = [v for v in (k.start, k.stop, k.step) if v is not None]
return _check_index_dtype(arr)
class _Indexer:
"""
Turns a function or method into an indexer.
Examples
--------
>>> def f(k):
... return k
>>> i = _Indexer(f)
>>> i[1]
1
>>> i[5:8]
slice(5, 8, None)
"""
def __init__(self, reader):
self._reader = reader
if hasattr(reader, "__doc__"):
self.__doc__ = reader.__doc__
def __get__(self, obj, otype=None):
if obj is not None:
reader = self._reader.__get__(obj, otype)
return type(self)(reader)
return self
def __getitem__(self, k):
return self._reader(k)
class Dataset:
"""
Represents a bitemporal dataset as a memory-mapped structure
stored in HDF5.
Examples
--------
>>> ds = bth5.Dataset(temp_h5, '/path/to/group', mode='a', value_dtype=np.float64)
>>> with ds:
... ds.write(np.datetime64("2018-06-21 12:26:47"), 2.0)
>>> # Write happens here.
>>> with ds:
... ds.valid_times[:]
array([(0, '2018-06-21T12:26:47.000000', 2.)],
dtype=[('transaction_id', '<u8'), ('valid_time', '<M8[us]'), ('value', '<f8')])
"""
def __init__(self, filename, path, mode="r", value_dtype=None):
"""
Creates a :obj:`Dataset`.
Parameters
----------
filename : str
The path to the h5 file, on disk.
path : str
The path to the group within the HDF5 file.
mode : str
The mode to open a file with.
value_dtype: str, optional
The dtype of the value that is attached to
"""
if not posixpath.isabs(path):
raise ValueError(
path + "must be a posix absolute path, i.e. "
"start with a leading '/'."
)
self.filename = filename
self.path = path
self.closed = True
self._mode = mode
self._handle = None
self._staged_data = None
if value_dtype is not None:
self._dtype = np.dtype(
[
("transaction_id", "<u8"),
("valid_time", TIME_DTYPE),
("value", value_dtype),
]
)
else:
with self:
self._dtype = self._dtype_from_file()
self._file_dtype = None
if self._dtype is None:
raise ValueError("Must specify dtype on first transaction.")
def _dtype_from_file(self):
if self._dataset_name not in self._group:
return None
ds = DatasetView(self._group[self._dataset_name].id)
return ds.dtype
@property
def dtype(self):
"""
The dtype of this dataset.
"""
return self._dtype
@property
def file_dtype(self):
"""
The dtype stored in the file.
"""
if self._file_dtype is None:
self._file_dtype = _transform_dt(self.dtype)
return self._file_dtype
@dtype.setter
def dtype(self, value):
self._dtype = value
def open(self, mode="r", **kwargs):
"""Opens the file for various operations"""
# check that we should open the dataset
if not self.closed:
if self._mode == mode:
return # already open in the same mode!
else:
raise IOError("attempted to reopen dataset in new mode")
# open the dataset and return
self._handle = h5py.File(self.filename, mode)
self.closed = False
self._mode = mode
self._group_name = self.path
self._dataset_name = "dataset"
self._transaction_idx_name = "transaction_index"
self._group = _ensure_groups(self._handle, self._group_name)
if "w" in mode or "a" in mode:
self._staged_data = []
@property
def _dataset(self):
if self._dataset_name not in self._group:
self._group.create_dataset(
self._dataset_name,
dtype=_transform_dt(self.dtype),
maxshape=(None,),
shape=(0,),
)
id = self._group[self._dataset_name].id
return DatasetView(id, dtype=self.dtype)
@property
def _transaction_index(self):
if self._transaction_idx_name not in self._group:
self._group.create_dataset(
self._transaction_idx_name,
dtype=_transform_dt(TIDX_DTYPE),
maxshape=(None,),
shape=(0,),
)
id = self._group[self._transaction_idx_name].id
return DatasetView(id, dtype=TIDX_DTYPE)
def close(self):
"""Close the current file handle."""
ds = self._dataset
tidx = self._transaction_index
# write the staged data
if self._staged_data:
n = len(self._staged_data)
data = np.empty(n, dtype=self.dtype)
data[:] = self._staged_data
# 1. Mergesort is stable
# 2. Faster on almost sorted data
sorted_idx = np.argsort(data["valid_time"], kind="mergesort")
# set transaction id
tid = len(tidx)
data["transaction_id"][:] = tid
# write dataset
m = ds.len()
ds.resize((m + n,))
ds[m:] = data[sorted_idx]
tidx.resize((tid + 1,))
now = np.datetime64(datetime.datetime.utcnow())
tidx[-1] = (now, data["valid_time"][0], data["valid_time"][-1], m, m + n)
# now close the file
self._handle.close()
self._handle = None
self.closed = True
# self._mode does not need to be reset, so that the file can be reopened
self._group_name = self._group = None
self._dataset_name = None
self._staged_data = None
def __enter__(self):
if self.closed:
mode = self._mode or "r"
self.open(mode=mode)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def write(self, valid_time, value):
"""
Appends data to a dataset.
Examples
--------
>>> with bth5.open(temp_h5, '/', mode='w', value_dtype=np.int64) as ds:
... ds.write(np.datetime64("2018-06-21 12:26:47"), 1.0)
... ds.write(np.datetime64("2018-06-21 12:26:49"), 2.0)
... ds.write([
... np.datetime64("2018-06-21 12:26:51"),
... np.datetime64("2018-06-21 12:26:53"),
... ], [3.0, 4.0])
>>> with bth5.open(temp_h5, '/', mode='r', value_dtype=np.int64) as ds:
... ds.records[:]
array([(0, '2018-06-21T12:26:47.000000', 1),
(0, '2018-06-21T12:26:49.000000', 2),
(0, '2018-06-21T12:26:51.000000', 3),
(0, '2018-06-21T12:26:53.000000', 4)],
dtype=[('transaction_id', '<u8'), ('valid_time', '<M8[us]'), ('value', '<i8')])
"""
if self.closed or self._mode not in ("w", "a"):
raise RuntimeError("dataset must be open to write data to it.")
if isinstance(valid_time, Iterable):
for v, d in zip(valid_time, value):
self.write(v, d)
return
data = (-1, valid_time, value)
self._staged_data.append(data)
def interpolate_values(self, interp_times):
"""Interpolates the values at the given valid times."""
interp_times = np.asarray(interp_times).astype(TIME_DTYPE)
min_time, max_time = np.min(interp_times), np.max(interp_times)
considered_records = self._extend_valid_times[min_time:max_time]
sorted_idx = np.argsort(considered_records, kind="mergesort")
considered_records = considered_records[sorted_idx]
x = considered_records["valid_time"].view(np.int64)
y = considered_records["value"]
return np.interp(interp_times.view(np.int64), x, y)
def _search_valid_transactions(self, k):
if not isinstance(k, slice):
k = slice(k, k, None)
ds = self._dataset
tidx = self._transaction_index
mask = np.ones(tidx.shape, dtype=np.bool_)
if k.start is not None:
mask |= tidx["start_valid_time"] >= k.start
if k.stop is not None:
mask |= tidx["end_valid_time"] <= k.stop
idxs = np.nonzero(mask)
return self.transaction_idx[
np.min(idxs, initial=0) : np.max(idxs, initial=0) + 1
]
@_wrap_deduplicate
def _index_valid_time(self, k, extend=False):
"""
Indexes into the dataset by valid time.
Examples
--------
>>> with bth5.open(temp_h5, '/', mode='w', value_dtype=np.int64) as ds:
... ds.write(np.datetime64("2018-06-21 12:26:47"), 2.0)
... ds.write(np.datetime64("2018-06-21 12:26:49"), 2.0)
>>> with bth5.open(temp_h5, '/', mode='r', value_dtype=np.int64) as ds:
... ds.valid_times[:]
... ds.valid_times[np.datetime64("2018-06-21 12:26:47"):np.datetime64("2018-06-21 12:26:48")]
... ds.valid_times[np.datetime64("2018-06-21 12:26:48"):]
... ds.valid_times[:np.datetime64("2018-06-21 12:26:48")]
... ds.valid_times[np.datetime64("2018-06-21 12:26:49")]
array([(0, '2018-06-21T12:26:47.000000', 2),
(0, '2018-06-21T12:26:49.000000', 2)],
dtype=[('transaction_id', '<u8'), ('valid_time', '<M8[us]'), ('value', '<i8')])
array([(0, '2018-06-21T12:26:47.000000', 2)],
dtype=[('transaction_id', '<u8'), ('valid_time', '<M8[us]'), ('value', '<i8')])
array([(0, '2018-06-21T12:26:49.000000', 2)],
dtype=[('transaction_id', '<u8'), ('valid_time', '<M8[us]'), ('value', '<i8')])
array([(0, '2018-06-21T12:26:47.000000', 2)],
dtype=[('transaction_id', '<u8'), ('valid_time', '<M8[us]'), ('value', '<i8')])
(0, '2018-06-21T12:26:49.000000', 2)
>>> with bth5.open(temp_h5, '/', mode='r', value_dtype=np.int64) as ds:
... ds.valid_times[np.datetime64("2018-06-21 12:26:48")]
Traceback (most recent call last):
...
ValueError: The specified date was not found in the dataset, use interpolate_value.
"""
ds = self._search_valid_transactions(k)
ds = ds[np.argsort(ds["valid_time"], kind="mergesort")]
sort_field = ds["valid_time"]
if isinstance(k, slice):
if k.step is not None:
raise ValueError(
"Stepping is not supported with indexing, use interpolate_values."
)
if not extend:
start_idx, end_idx = (
(
np.searchsorted(sort_field, k.start)
if k.start is not None
else None
),
(
np.searchsorted(sort_field, k.stop)
if k.stop is not None
else None
),
)
else:
start_idx, end_idx = (
(
np.searchsorted(sort_field, k.start, side="right") - 1
if k.start is not None
else None
),
(
np.searchsorted(sort_field, k.stop, side="left") + 1
if k.stop is not None
else None
),
)
return ds[start_idx:end_idx]
else:
possible_idx = np.searchsorted(sort_field, k)
if sort_field[possible_idx] == k:
return ds[possible_idx]
else:
raise ValueError(
"The specified date was not found in the dataset, use interpolate_value."
)
@_wrap_deduplicate
def _index_by(self, field, k, multi=False):
sort_field = self._dataset[field]
if multi and not isinstance(k, slice):
k = slice(k, k, None)
if k.step is not None:
raise ValueError(
"Stepping is not supported with indexing, use interpolate_values."
)
start_idx = (
np.searchsorted(sort_field, k.start) if k.start is not None else None
)
end_idx = (
np.searchsorted(sort_field, k.stop, side="right")
if k.stop is not None
else None
)
return self._dataset[start_idx:end_idx]
def _construct_indexer(key, multi=False):
def reader(self, k):
return self._index_by(key, k, multi=multi)
return _Indexer(reader)
def _index_extended_valid_time(self, k):
return self._index_valid_time(k, extend=True)
def _transaction_times(self, k):
"""
Index into the transaction index by transaction time.
Examples
--------
>>> with bth5.open(temp_h5, '/', mode='w', value_dtype=np.int64) as ds:
... ds.write(np.datetime64("2018-06-21 12:26:47"), 2.0)
... ds.write(np.datetime64("2018-06-21 12:26:49"), 2.0)
>>> with bth5.open(temp_h5, '/', mode='r', value_dtype=np.int64) as ds:
... ds.transaction_times[:] # doctest: +SKIP
array([('2019-09-19T10:32:00.210817', '2018-06-21T12:26:47.000000', '2018-06-21T12:26:49.000000', 0, 2)],
dtype=[('transaction_time', '<M8[us]'), ('start_valid_time', '<M8[us]'), ('end_valid_time', '<M8[us]'), ('start_idx', '<u8'), ('end_idx', '<u8')])
"""
tidx = self._transaction_index
sort_field = tidx["transaction_time"]
if isinstance(k, slice):
if k.step is not None:
raise ValueError(
"Stepping is not supported with indexing, use interpolate_values."
)
start_idx, end_idx = (
(np.searchsorted(sort_field, k.start) if k.start is not None else None),
(np.searchsorted(sort_field, k.stop) if k.stop is not None else None),
)
return tidx[start_idx:end_idx]
else:
possible_idx = np.searchsorted(sort_field, k)
if sort_field[possible_idx] == k:
return tidx[possible_idx]
else:
raise ValueError(
"The specified date was not found in the dataset, use interpolate_value."
)
valid_times = _Indexer(_index_valid_time)
_extend_valid_times = _Indexer(_index_extended_valid_time)
transaction_times = _Indexer(_transaction_times)
transaction_idx = _construct_indexer("transaction_id", multi=True)
transaction_idx.__doc__ = """Indexes into the dataset by transaction ID."""
def _records(self, k):
"""
Index into the dataset by record ID.
Examples
--------
>>> with bth5.open(temp_h5, '/', mode='w', value_dtype=np.int64) as ds:
... ds.write(np.datetime64("2018-06-21 12:26:47"), 2.0)
... ds.write(np.datetime64("2018-06-21 12:26:49"), 2.0)
>>> with bth5.open(temp_h5, '/', mode='r', value_dtype=np.int64) as ds:
... ds.records[:] # doctest: +SKIP
array([('2019-09-19T10:32:00.210817', '2018-06-21T12:26:47.000000', '2018-06-21T12:26:49.000000', 0, 2)],
dtype=[('transaction_time', '<M8[us]'), ('start_valid_time', '<M8[us]'), ('end_valid_time', '<M8[us]'), ('start_idx', '<u8'), ('end_idx', '<u8')])
"""
return self._dataset[k]
def _transactions(self, k):
"""
Index into the transaction index by transaction ID.
Examples
--------
>>> with bth5.open(temp_h5, '/', mode='w', value_dtype=np.int64) as ds:
... ds.write(np.datetime64("2018-06-21 12:26:47"), 2.0)
... ds.write(np.datetime64("2018-06-21 12:26:49"), 2.0)
>>> with bth5.open(temp_h5, '/', mode='r', value_dtype=np.int64) as ds:
... ds.transactions[:] # doctest: +SKIP
array([('2019-09-30T13:52:44.216755', '2018-06-21T12:26:47.000000', '2018-06-21T12:26:49.000000', 0, 2)],
dtype=[('transaction_time', '<M8[us]'), ('start_valid_time', '<M8[us]'), ('end_valid_time', '<M8[us]'), ('start_idx', '<u8'), ('end_idx', '<u8')])
"""
return self._transaction_index[k]
records = _Indexer(_records)
transactions = _Indexer(_transactions)
def open(filename, path, mode="r", value_dtype=None, **kwargs):
"""Opens a bitemporal HDF5 dataset."""
ds = Dataset(filename, path, value_dtype=value_dtype)
ds.open(mode, **kwargs)
return ds
| 32.815752
| 160
| 0.537888
|
6cad27739adaf7a08393c070871bbb785a61ae5b
| 12,577
|
py
|
Python
|
tests/tools/assigner/actions/balancemodules/test_rackaware.py
|
akashvacher/kafka-tools
|
0d98bbefc1105851b7b7203de4f6c68d9c097730
|
[
"Apache-2.0"
] | 578
|
2016-05-05T05:18:15.000Z
|
2022-03-23T07:18:07.000Z
|
tests/tools/assigner/actions/balancemodules/test_rackaware.py
|
akashvacher/kafka-tools
|
0d98bbefc1105851b7b7203de4f6c68d9c097730
|
[
"Apache-2.0"
] | 94
|
2016-04-29T23:25:38.000Z
|
2022-02-07T17:16:16.000Z
|
tests/tools/assigner/actions/balancemodules/test_rackaware.py
|
akashvacher/kafka-tools
|
0d98bbefc1105851b7b7203de4f6c68d9c097730
|
[
"Apache-2.0"
] | 150
|
2016-04-29T16:33:20.000Z
|
2022-03-14T10:05:48.000Z
|
import sys
import unittest
from argparse import Namespace
from collections import deque
from mock import call, patch
from ..fixtures import set_up_cluster, set_up_subparser
from kafka.tools.exceptions import BalanceException
from kafka.tools.models.broker import Broker
from kafka.tools.assigner.actions.balance import ActionBalance
from kafka.tools.assigner.actions.balancemodules.rackaware import (ActionBalanceRackAware, check_partition_swappable, racks_for_replica_list,
difference_in_size_to_last_partition)
class ActionBalanceRackAwareTests(unittest.TestCase):
def setUp(self):
self.cluster = set_up_cluster()
self.cluster.topics['testTopic1'].partitions[0].size = 1000
self.cluster.topics['testTopic1'].partitions[1].size = 1000
self.cluster.topics['testTopic2'].partitions[0].size = 2000
self.cluster.topics['testTopic2'].partitions[1].size = 2000
(self.parser, self.subparsers) = set_up_subparser()
self.args = Namespace(exclude_topics=[])
def test_configure_args(self):
ActionBalance.configure_args(self.subparsers)
sys.argv = ['kafka-assigner', 'balance', '-t', 'rackaware']
parsed_args = self.parser.parse_args()
assert parsed_args.action == 'balance'
def test_init_broker_deque(self):
action = ActionBalanceRackAware(self.args, self.cluster)
assert set(action._random_brokers) == set(self.cluster.brokers.values())
def test_check_partition_swappable_already_exists(self):
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
replicas_a = [b1, b2]
replicas_b = [b2, b1]
assert check_partition_swappable(replicas_a, replicas_b, 0) is False
assert check_partition_swappable(replicas_b, replicas_a, 0) is False
def test_check_partition_swappable_racks_collide(self):
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
b3 = Broker('brokerhost3.example.com', id=3)
b4 = Broker('brokerhost4.example.com', id=4)
b2.rack = "a"
b3.rack = "a"
b4.rack = "b"
replicas_a = [b1, b2]
replicas_b = [b3, b4]
assert check_partition_swappable(replicas_a, replicas_b, 0) is False
assert check_partition_swappable(replicas_b, replicas_a, 0) is False
def test_check_partition_swappable_racks_ok(self):
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
b3 = Broker('brokerhost3.example.com', id=3)
b4 = Broker('brokerhost4.example.com', id=4)
b2.rack = "a"
b3.rack = "b"
b4.rack = "b"
replicas_a = [b1, b2]
replicas_b = [b3, b4]
assert check_partition_swappable(replicas_a, replicas_b, 0) is True
assert check_partition_swappable(replicas_b, replicas_a, 0) is True
def test_racks_for_replica_list_nopos(self):
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
assert racks_for_replica_list([b1, b2]) == ["a", "b"]
def test_racks_for_replica_list_pos(self):
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
assert racks_for_replica_list([b1, b2], 1) == ["a"]
def test_difference_in_size_larger(self):
partitions = [self.cluster.topics['testTopic1'].partitions[0],
self.cluster.topics['testTopic1'].partitions[1]]
partitions[0].size = 1000
partitions[1].size = 2000
p = self.cluster.topics['testTopic2'].partitions[0]
p.size = 3000
assert difference_in_size_to_last_partition(p, partitions) == 1000
def test_difference_in_size_smaller(self):
partitions = [self.cluster.topics['testTopic1'].partitions[0],
self.cluster.topics['testTopic1'].partitions[1]]
partitions[0].size = 1000
partitions[1].size = 2000
p = self.cluster.topics['testTopic2'].partitions[0]
p.size = 1000
assert difference_in_size_to_last_partition(p, partitions) == 1000
def test_difference_in_size_empty_list(self):
partitions = []
p = self.cluster.topics['testTopic2'].partitions[0]
p.size = 1000
assert difference_in_size_to_last_partition(p, partitions) == float("inf")
def test_try_pick_new_broker_skipself(self):
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
b3 = Broker('brokerhost3.example.com', id=3)
self.cluster.add_broker(b3)
b3.rack = "a"
action = ActionBalanceRackAware(self.args, self.cluster)
# Firmly order the deque
action._random_brokers = deque([b1, b2, b3])
newbroker = action._try_pick_new_broker(self.cluster.topics['testTopic1'].partitions[0], 0)
assert newbroker == b3
assert action._random_brokers == deque([b1, b2, b3])
def test_try_pick_new_broker_failed(self):
action = ActionBalanceRackAware(self.args, self.cluster)
self.assertRaises(BalanceException, action._try_pick_new_broker, self.cluster.topics['testTopic1'].partitions[0], 0)
def test_try_pick_new_broker(self):
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
b3 = Broker('brokerhost3.example.com', id=3)
self.cluster.add_broker(b3)
b3.rack = "b"
action = ActionBalanceRackAware(self.args, self.cluster)
# Firmly order the deque
action._random_brokers = deque([b3, b1, b2])
newbroker = action._try_pick_new_broker(self.cluster.topics['testTopic1'].partitions[0], 1)
assert newbroker == b3
assert action._random_brokers == deque([b1, b2, b3])
def test_try_pick_swap_partition_none(self):
action = ActionBalanceRackAware(self.args, self.cluster)
small_partitions = [self.cluster.topics['testTopic1'].partitions[0],
self.cluster.topics['testTopic1'].partitions[1]]
large_partitions = [self.cluster.topics['testTopic2'].partitions[0]]
p = self.cluster.topics['testTopic2'].partitions[1]
small_partitions[0].size = 1000
small_partitions[1].size = 2000
large_partitions[0].size = 4000
p.size = 3000
assert action._try_pick_swap_partition(p, 0, small_partitions, large_partitions) is None
@patch('kafka.tools.assigner.actions.balancemodules.rackaware.check_partition_swappable')
def test_try_pick_swap_partitions_nosmall(self, mock_check):
action = ActionBalanceRackAware(self.args, self.cluster)
small_partitions = []
large_partitions = [self.cluster.topics['testTopic2'].partitions[0]]
p = self.cluster.topics['testTopic2'].partitions[1]
large_partitions[0].size = 4000
p.size = 3000
mock_check.return_value = True
assert action._try_pick_swap_partition(p, 0, small_partitions, large_partitions) == self.cluster.topics['testTopic2'].partitions[0]
@patch('kafka.tools.assigner.actions.balancemodules.rackaware.check_partition_swappable')
def test_try_pick_swap_partitions_nolarge(self, mock_check):
action = ActionBalanceRackAware(self.args, self.cluster)
large_partitions = []
small_partitions = [self.cluster.topics['testTopic2'].partitions[0]]
p = self.cluster.topics['testTopic2'].partitions[1]
small_partitions[0].size = 1000
p.size = 3000
mock_check.return_value = True
assert action._try_pick_swap_partition(p, 0, small_partitions, large_partitions) == self.cluster.topics['testTopic2'].partitions[0]
@patch('kafka.tools.assigner.actions.balancemodules.rackaware.check_partition_swappable')
def test_try_pick_swap_partition_full(self, mock_check):
action = ActionBalanceRackAware(self.args, self.cluster)
small_partitions = [self.cluster.topics['testTopic1'].partitions[0],
self.cluster.topics['testTopic1'].partitions[1]]
large_partitions = [self.cluster.topics['testTopic2'].partitions[0]]
p = self.cluster.topics['testTopic2'].partitions[1]
small_partitions[0].size = 1000
small_partitions[1].size = 2000
large_partitions[0].size = 4000
p.size = 3000
mock_check.side_effect = [False, False, True]
target = action._try_pick_swap_partition(p, 0, small_partitions, large_partitions)
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
mock_check.assert_has_calls([call([b1, b2], [b2, b1], 0), call([b1, b2], [b2, b1], 0), call([b1, b2], [b1, b2], 0)])
assert target == self.cluster.topics['testTopic1'].partitions[0]
def test_get_sorted_list(self):
action = ActionBalanceRackAware(self.args, self.cluster)
p1 = self.cluster.topics['testTopic1'].partitions[0]
p2 = self.cluster.topics['testTopic1'].partitions[1]
p3 = self.cluster.topics['testTopic2'].partitions[0]
p4 = self.cluster.topics['testTopic2'].partitions[1]
p1.size = 1000
p2.size = 6000
p3.size = 3000
p4.size = 4000
assert action._get_sorted_partition_list_at_pos(0) == [p1, p3, p4, p2]
def test_get_sorted_list_missing_replica(self):
action = ActionBalanceRackAware(self.args, self.cluster)
p1 = self.cluster.topics['testTopic1'].partitions[0]
p2 = self.cluster.topics['testTopic1'].partitions[1]
p3 = self.cluster.topics['testTopic2'].partitions[0]
p4 = self.cluster.topics['testTopic2'].partitions[1]
p1.size = 1000
p2.size = 6000
p3.size = 3000
p4.size = 4000
p1.replicas = [self.cluster.brokers[1]]
assert action._get_sorted_partition_list_at_pos(0) == [p3, p4, p2]
@patch.object(ActionBalanceRackAware, '_try_pick_swap_partition')
def test_process_partitions_at_pos_nochange(self, mock_pick):
action = ActionBalanceRackAware(self.args, self.cluster)
action._process_partitions_at_pos(0)
mock_pick.assert_not_called()
@patch.object(ActionBalanceRackAware, '_try_pick_swap_partition')
def test_process_partitions_at_pos_swap_partition(self, mock_pick):
action = ActionBalanceRackAware(self.args, self.cluster)
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
b3 = Broker('brokerhost3.example.com', id=3)
b4 = Broker('brokerhost4.example.com', id=4)
self.cluster.add_broker(b3)
self.cluster.add_broker(b4)
b3.rack = "a"
b4.rack = "c"
self.cluster.topics['testTopic2'].partitions[0].swap_replicas(b2, b3)
self.cluster.topics['testTopic1'].partitions[1].swap_replicas(b1, b4)
mock_pick.return_value = self.cluster.topics['testTopic1'].partitions[1]
action._process_partitions_at_pos(0)
assert self.cluster.topics['testTopic1'].partitions[1].replicas == [b3, b4]
assert self.cluster.topics['testTopic2'].partitions[0].replicas == [b2, b1]
@patch.object(ActionBalanceRackAware, '_try_pick_swap_partition')
@patch.object(ActionBalanceRackAware, '_try_pick_new_broker')
def test_process_partitions_at_pos_swap_broker(self, mock_broker, mock_pick):
action = ActionBalanceRackAware(self.args, self.cluster)
b1 = self.cluster.brokers[1]
b2 = self.cluster.brokers[2]
b3 = Broker('brokerhost3.example.com', id=3)
b4 = Broker('brokerhost4.example.com', id=4)
self.cluster.add_broker(b3)
self.cluster.add_broker(b4)
b3.rack = "a"
b4.rack = "c"
self.cluster.topics['testTopic2'].partitions[0].swap_replicas(b2, b3)
self.cluster.topics['testTopic1'].partitions[1].swap_replicas(b1, b4)
mock_pick.return_value = None
mock_broker.return_value = b2
action._process_partitions_at_pos(0)
assert self.cluster.topics['testTopic1'].partitions[1].replicas == [b2, b4]
assert self.cluster.topics['testTopic2'].partitions[0].replicas == [b2, b1]
def test_process_cluster_single_rack(self):
action = ActionBalanceRackAware(self.args, self.cluster)
self.cluster.brokers[2].rack = "a"
self.assertRaises(BalanceException, action.process_cluster)
@patch.object(ActionBalanceRackAware, '_process_partitions_at_pos')
def test_process_cluster(self, mock_process):
action = ActionBalanceRackAware(self.args, self.cluster)
action.process_cluster()
mock_process.assert_has_calls([call(0), call(1)])
| 45.90146
| 141
| 0.674406
|
52cabc43bb488be047e0b39249cb019bf620677f
| 1,690
|
py
|
Python
|
brax/tests/dto_test.py
|
teruyuki-yamasaki/brax
|
9e9a782523e94a792cda6c655de6df7f0d3b72ad
|
[
"Apache-2.0"
] | 1
|
2021-09-30T06:23:26.000Z
|
2021-09-30T06:23:26.000Z
|
brax/tests/dto_test.py
|
teruyuki-yamasaki/brax
|
9e9a782523e94a792cda6c655de6df7f0d3b72ad
|
[
"Apache-2.0"
] | 1
|
2021-06-18T14:57:49.000Z
|
2021-06-18T14:57:49.000Z
|
brax/tests/dto_test.py
|
isabella232/brax
|
02682dee8be5476121d5629eaf411ecbad1aa2da
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training tests dor direct trajectory optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
# Dependency imports
from absl.testing import absltest
from absl.testing import parameterized
from flax import serialization
import jax
from brax import envs
from brax.training import dto
def run_test(seed):
env_name = 'ant'
eval_frequency = 10
episode_length = 1000
action_repeat = 1
learning_rate = 1
num_envs = 1
max_gradient_norm = 0.2
env_fn = envs.create_fn(env_name)
inference, params, metrics = dto.train(
environment_fn=env_fn,
log_frequency=eval_frequency,
episode_length=episode_length,
action_repeat=action_repeat,
learning_rate=learning_rate,
num_envs=num_envs,
max_gradient_norm=max_gradient_norm,
seed=seed)
return inference, params, metrics, env_fn
class TrainingTest(parameterized.TestCase):
def testTraining(self):
_, _, metrics, _ = run_test(seed=0)
logging.info(metrics)
if __name__ == '__main__':
absltest.main()
| 26
| 74
| 0.75503
|
63cc66ee53e09d5a3a884f56812a7db47f081a74
| 1,500
|
py
|
Python
|
grr/server/grr_response_server/flows/general/artifact_fallbacks_test.py
|
4ndygu/grr
|
cfc725b5ee3a2626ac4cdae7fb14471612da4522
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/flows/general/artifact_fallbacks_test.py
|
4ndygu/grr
|
cfc725b5ee3a2626ac4cdae7fb14471612da4522
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/flows/general/artifact_fallbacks_test.py
|
4ndygu/grr
|
cfc725b5ee3a2626ac4cdae7fb14471612da4522
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Tests for grr.server.flows.general.artifact_fallbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import flags
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server.flows.general import artifact_fallbacks
from grr.test_lib import action_mocks
from grr.test_lib import db_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
@db_test_lib.DualDBTest
class TestSystemRootSystemDriveFallbackFlow(flow_test_lib.FlowTestsBaseclass):
def testSystemRootFallback(self):
client_id = self.SetupClient(0)
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
vfs_test_lib.ClientVFSHandlerFixture):
client_mock = action_mocks.ListDirectoryClientMock()
session_id = flow_test_lib.TestFlowHelper(
artifact_fallbacks.SystemRootSystemDriveFallbackFlow.__name__,
client_mock,
client_id=client_id,
token=self.token,
artifact_name="WindowsEnvironmentVariableSystemRoot")
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertEqual(str(results[0].registry_data.GetValue()), r"C:\WINDOWS")
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 32.608696
| 79
| 0.77
|
6f7044ac254193cc664728c584d90e555365ff70
| 1,358
|
py
|
Python
|
onlineDFS.py
|
shubham-11700069/Python
|
9ea7dcbe6d9050453357a0c56815ce2371f9364e
|
[
"Apache-2.0"
] | null | null | null |
onlineDFS.py
|
shubham-11700069/Python
|
9ea7dcbe6d9050453357a0c56815ce2371f9364e
|
[
"Apache-2.0"
] | null | null | null |
onlineDFS.py
|
shubham-11700069/Python
|
9ea7dcbe6d9050453357a0c56815ce2371f9364e
|
[
"Apache-2.0"
] | null | null | null |
# Python program to print DFS traversal from a
# given given graph
from collections import defaultdict
# This class represents a directed graph using
# adjacency list representation
class Graph:
# Constructor
def __init__(self):
# default dictionary to store graph
self.graph = defaultdict(list)
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
# A function used by DFS
def DFSUtil(self,v,visited):
# Mark the current node as visited and print it
visited[v]= True
print v,
# Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
self.DFSUtil(i, visited)
# The function to do DFS traversal. It uses
# recursive DFSUtil()
def DFS(self,v):
# Mark all the vertices as not visited
visited = [False]*(len(self.graph))
# Call the recursive helper function to print
# DFS traversal
self.DFSUtil(v,visited)
# Driver code
# Create a graph given in the above diagram
g = Graph()
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
print "Following is DFS from (starting from vertex 2)"
g.DFS(2)
| 24.25
| 61
| 0.606038
|
9ccca357f65c4009afc748454e59cb90d1e653c1
| 1,460
|
py
|
Python
|
alembic/versions/bc442d63d7d3_move_forward_current_value_for_heron.py
|
sanger/baracoda
|
27fbba64293d2d8f952dc35a32f47597b9f4908f
|
[
"MIT"
] | null | null | null |
alembic/versions/bc442d63d7d3_move_forward_current_value_for_heron.py
|
sanger/baracoda
|
27fbba64293d2d8f952dc35a32f47597b9f4908f
|
[
"MIT"
] | 225
|
2020-04-17T15:20:29.000Z
|
2022-03-31T06:13:44.000Z
|
alembic/versions/bc442d63d7d3_move_forward_current_value_for_heron.py
|
sanger/baracoda
|
27fbba64293d2d8f952dc35a32f47597b9f4908f
|
[
"MIT"
] | 1
|
2021-03-01T10:19:15.000Z
|
2021-03-01T10:19:15.000Z
|
"""Move forward current value for heron
Revision ID: bc442d63d7d3
Revises: a32c725ae353
Create Date: 2021-10-13 14:07:48.425000
"""
from alembic import op
import os
# revision identifiers, used by Alembic.
revision = "bc442d63d7d3"
down_revision = "a32c725ae353"
branch_labels = None
depends_on = None
def __DANGER_restart_sequence_with(value):
if os.environ.get("CONFIRM_HERON_SEQUENCE_RESTART") is None:
description = (
"'This migration is potentially destructive. Update the value for RESTART WITH"
"to a known unused value and set the environment variable "
"CONFIRM_HERON_SEQUENCE_RESTART to confirm you want to apply this migration.'"
)
raise ValueError(description)
# Last value in database is 2564197, we want to move forward past this value
op.execute(f"ALTER SEQUENCE heron RESTART WITH { value };")
def upgrade():
# Last value in database is 2564197, we want to move forward past this value
__DANGER_restart_sequence_with(2564300)
def downgrade():
# Actual value in Baracoda is 2156143, but if we rollback we can leave a bit of space
# Please retrieve a sensible value and edit if you plan to downgrade
description = (
"\nPlease substitute this exception with a line in python like: \n"
"__DANGER_restart_sequence_with(<newvalue>)\n"
"where <newvalue> is a SAFE value you estimate.\n"
)
raise Exception(description)
| 33.181818
| 91
| 0.717808
|
632a3fa753d43551674690c2e70078b3fe48cf14
| 16
|
py
|
Python
|
doodah/__init__.py
|
Scaremonger/doodah
|
f4d8c4aaa7b3f46e183b1a661ecac6f35d83ae30
|
[
"MIT"
] | null | null | null |
doodah/__init__.py
|
Scaremonger/doodah
|
f4d8c4aaa7b3f46e183b1a661ecac6f35d83ae30
|
[
"MIT"
] | null | null | null |
doodah/__init__.py
|
Scaremonger/doodah
|
f4d8c4aaa7b3f46e183b1a661ecac6f35d83ae30
|
[
"MIT"
] | null | null | null |
name = "doodah"
| 8
| 15
| 0.625
|
f9722d2c0c2fc1d26a1f270368ee7124add9618f
| 1,391
|
py
|
Python
|
config.py
|
gumato/Personal-Blog
|
d65e328f1aaa484eb2f560908d57265452f4b62d
|
[
"Unlicense",
"MIT"
] | null | null | null |
config.py
|
gumato/Personal-Blog
|
d65e328f1aaa484eb2f560908d57265452f4b62d
|
[
"Unlicense",
"MIT"
] | null | null | null |
config.py
|
gumato/Personal-Blog
|
d65e328f1aaa484eb2f560908d57265452f4b62d
|
[
"Unlicense",
"MIT"
] | null | null | null |
import os
class Config:
'''
General configuration parent class
'''
SECRET_KEY = 'gumeshi1'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://pricilla:1234@localhost/blog'
UPLOADED_PHOTOS_DEST ='app/static/photos'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
# SUBJECT_PREFIX = 'Personal Blog'
# SENDER_EMAIL = 'gumatopricilla22@gmail.com'
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://pricilla:1234@localhost/blog_test'
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
pass
# SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://pricilla:1234@localhost/blog'
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
}
| 26.245283
| 87
| 0.70381
|
5ef54b38f3c13b0b374d2169da6d6bdc0d69f889
| 59,361
|
py
|
Python
|
snpdb/views/views.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
snpdb/views/views.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
snpdb/views/views.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
import itertools
import json
import logging
from collections import OrderedDict, defaultdict
from typing import Iterable
import pandas as pd
from celery.result import AsyncResult
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User, Group
from django.core.exceptions import PermissionDenied, ImproperlyConfigured
from django.db.utils import IntegrityError
from django.forms.models import inlineformset_factory, ALL_FIELDS
from django.forms.widgets import TextInput
from django.http.response import HttpResponse, HttpResponseRedirect, HttpResponseServerError, JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.urls.base import reverse
from django.views.decorators.cache import cache_page
from django.views.decorators.http import require_POST
from django.views.decorators.vary import vary_on_cookie
from django_messages.models import Message
from global_login_required import login_not_required
from guardian.shortcuts import get_objects_for_group, get_objects_for_user
from termsandconditions.decorators import terms_required
from analysis.analysis_templates import get_sample_analysis
from analysis.forms import AnalysisOutputNodeChoiceForm
from analysis.models import AnalysisTemplate, SampleStats, SampleStatsPassingFilter
from annotation.forms import GeneCountTypeChoiceForm
from annotation.manual_variant_entry import create_manual_variants, can_create_variants
from annotation.models import AnnotationVersion
from annotation.models.models import ManualVariantEntryCollection, VariantAnnotationVersion
from annotation.models.models_gene_counts import GeneValueCountCollection, \
GeneCountType, SampleAnnotationVersionVariantSource, CohortGeneCounts
from classification.classification_stats import get_grouped_classification_counts
from classification.models.clinvar_export_sync import clinvar_export_sync
from classification.views.classification_datatables import ClassificationColumns
from genes.custom_text_gene_list import create_custom_text_gene_list
from genes.forms import CustomGeneListForm, UserGeneListForm, GeneAndTranscriptForm
from genes.models import GeneListCategory, CustomTextGeneList, GeneList
from library.constants import WEEK_SECS, HOUR_SECS
from library.django_utils import add_save_message, get_model_fields, set_form_read_only
from library.guardian_utils import assign_permission_to_user_and_groups, DjangoPermission
from library.keycloak import Keycloak
from library.utils import full_class_name, import_class, rgb_invert
from ontology.models import OntologyTerm
from patients.forms import PatientForm
from patients.models import Patient, Clinician
from patients.views import get_patient_upload_csv
from snpdb import forms
from snpdb.bam_file_path import get_example_replacements
from snpdb.forms import SampleChoiceForm, VCFChoiceForm, \
UserSettingsOverrideForm, UserForm, UserContactForm, SampleForm, TagForm, SettingsInitialGroupPermissionForm, \
OrganizationForm, LabForm, LabUserSettingsOverrideForm, OrganizationUserSettingsOverrideForm
from snpdb.graphs import graphcache
from snpdb.graphs.allele_frequency_graph import AlleleFrequencyHistogramGraph
from snpdb.graphs.chromosome_density_graph import SampleChromosomeDensityGraph
from snpdb.graphs.chromosome_intervals_graph import ChromosomeIntervalsGraph
from snpdb.graphs.homozygosity_percent_graph import HomozygosityPercentGraph
from snpdb.import_status import set_vcf_and_samples_import_status
from snpdb.models import CachedGeneratedFile, VariantGridColumn, UserSettings, \
VCF, UserTagColors, CustomColumnsCollection, CustomColumn, Cohort, \
CohortSample, GenomicIntervalsCollection, Sample, UserDataPrefix, UserGridConfig, \
get_igv_data, SampleLocusCount, UserContact, Tag, Wiki, Organization, GenomeBuild, \
Trio, AbstractNodeCountSettings, CohortGenotypeCollection, UserSettingsOverride, NodeCountSettingsCollection, Lab, \
LabUserSettingsOverride, OrganizationUserSettingsOverride, LabHead, SomalierRelatePairs, \
VariantZygosityCountCollection, VariantZygosityCountForVCF, ClinVarKey, AvatarDetails
from snpdb.models.models_enums import ProcessingStatus, ImportStatus, BuiltInFilters
from snpdb.tasks.soft_delete_tasks import soft_delete_vcfs
from snpdb.utils import LabNotificationBuilder
from upload.uploaded_file_type import retry_upload_pipeline
@terms_required
def index(request):
if Clinician.user_is_clinician(request.user):
return redirect('clinician_login')
return render(request, 'index.html')
def data(request):
return render(request, 'snpdb/data/data.html')
def maps(request):
return render(request, 'maps.html')
def get_writable_class_object(user, class_name, primary_key):
klass = import_class(class_name)
name = klass.__name__
obj = klass.objects.get(pk=primary_key)
if not obj.can_write(user):
write_perm = DjangoPermission.perm(obj, DjangoPermission.WRITE)
msg = f"You do not have permission {write_perm} needed to modify {name}"
raise PermissionDenied(msg)
return obj, name
def get_writable_class_objects(user, class_name):
klass = import_class(class_name)
name = klass.__name__
write_perm = DjangoPermission.perm(klass, DjangoPermission.WRITE)
qs = get_objects_for_user(user, write_perm, klass=klass, accept_global_perms=False)
return qs, name
def group_permissions(request, class_name, primary_key):
obj, name = get_writable_class_object(request.user, class_name, primary_key)
try:
# If object has "get_permission_object" it can delegate it.
permission_obj = obj.get_permission_object()
perm_obj_name = permission_obj.__class__.__name__
except AttributeError:
# Default is use itself
permission_obj = obj
perm_obj_name = name
permission_forms = get_group_permission_forms(request, permission_obj)
if request.method == 'POST':
valid = all([pf.is_valid() for pf in permission_forms])
if valid:
for pf in permission_forms:
pf.save()
add_save_message(request, valid, f"{perm_obj_name} group permissions")
get_listing_url = getattr(obj, "get_listing_url", None)
if get_listing_url:
delete_redirect_url = get_listing_url()
else:
delete_redirect_url = "/"
context = {'permission_forms': permission_forms,
'class_name': class_name,
'name': name,
'perm_obj_name': perm_obj_name,
'permission_obj': permission_obj,
'instance': obj,
'delete_redirect_url': delete_redirect_url}
return render(request, 'snpdb/data/group_permissions.html', context)
@require_POST
def group_permissions_object_delete(request, class_name, primary_key):
if class_name == 'snpdb.models.VCF': # TODO: Hack? Make some class object?
soft_delete_vcfs(request.user, primary_key)
else:
obj, _ = get_writable_class_object(request.user, class_name, primary_key)
try:
obj.delete()
except IntegrityError as ie:
pks = ", ".join(str(o.pk) for o in ie.args[1])
error_message = f"{ie.args[0]}: {pks}"
return HttpResponseServerError(content=error_message)
return HttpResponse()
def bulk_group_permissions(request, class_name):
qs, name = get_writable_class_objects(request.user, class_name)
groups = list(request.user.groups.all().order_by("name"))
objects_and_forms = []
for obj in qs:
permission_forms = get_group_permission_forms(request, obj, groups=groups)
objects_and_forms.append((obj, permission_forms))
if request.method == 'POST':
all_forms = []
for _, permission_forms in objects_and_forms:
all_forms.extend(permission_forms)
valid = all([pf.is_valid() for pf in all_forms])
if valid:
for pf in all_forms:
pf.save()
add_save_message(request, valid, f"{name} group permissions")
context = {"name": name,
"groups": groups,
"objects_and_forms": objects_and_forms}
return render(request, 'snpdb/data/bulk_group_permissions.html', context)
def _get_vcf_sample_stats(vcf, klass):
""" Count is het + hom """
ss_fields = ("sample_id", "sample__name", "variant_count", "ref_count", "het_count", "hom_count", "unk_count")
ss_values_qs = klass.objects.filter(sample__vcf=vcf).order_by("sample").values(*ss_fields)
sample_stats_het_hom_count = {}
sample_names = []
sample_zygosities = defaultdict(list)
for value_dict in ss_values_qs:
sample_id = value_dict.pop("sample_id")
sample_names.append(value_dict.pop("sample__name"))
value_dict.pop("variant_count")
sample_stats_het_hom_count[sample_id] = value_dict["het_count"] + value_dict["hom_count"]
for k, v in value_dict.items():
sample_zygosities[k].append(v)
return sample_stats_het_hom_count, sample_names, tuple(sample_zygosities.items())
def view_vcf(request, vcf_id):
vcf = VCF.get_for_user(request.user, vcf_id)
# I couldn't get prefetch_related_objects([vcf], "sample_set__samplestats") to work - so storing in a dict
sample_stats_het_hom_count, sample_names, sample_zygosities = _get_vcf_sample_stats(vcf, SampleStats)
sample_stats_pass_het_hom_count, _, sample_zygosities_pass = _get_vcf_sample_stats(vcf, SampleStatsPassingFilter)
VCFSampleFormSet = inlineformset_factory(VCF, Sample, extra=0, can_delete=False,
fields=["vcf_sample_name", "name", "patient", "specimen", "bam_file_path"],
widgets=SampleForm.Meta.widgets)
post = request.POST or None
vcf_form = forms.VCFForm(post, instance=vcf)
samples_form = VCFSampleFormSet(post, instance=vcf)
for form in samples_form.forms:
form.fields["vcf_sample_name"].disabled = True
requires_user_input = vcf.import_status == ImportStatus.REQUIRES_USER_INPUT
reload_vcf = False
if request.method == 'POST':
valid = all(f.is_valid() for f in [vcf_form, samples_form])
if valid:
vcf = vcf_form.save()
reload_vcf = requires_user_input and vcf.genome_build
samples_form.save()
add_save_message(request, valid, "VCF")
try:
# Some legacy data was too hard to fix and relies on being re-imported
_ = vcf.cohort
_ = vcf.cohort.cohort_genotype_collection
except (Cohort.DoesNotExist, CohortGenotypeCollection.DoesNotExist):
messages.add_message(request, messages.ERROR, "This legacy VCF is missing data and needs to be reloaded.")
if reload_vcf:
set_vcf_and_samples_import_status(vcf, ImportStatus.IMPORTING)
retry_upload_pipeline(vcf.uploadedvcf.uploaded_file.uploadpipeline)
vcf_form = forms.VCFForm(post, instance=vcf) # Reload as import status has changed
messages.add_message(request, messages.INFO, "Reloading VCF")
for warning, _ in vcf.get_warnings():
messages.add_message(request, messages.WARNING, warning, extra_tags='import-message')
has_write_permission = vcf.can_write(request.user)
if not has_write_permission:
messages.add_message(request, messages.WARNING, "You can view but not modify this data.")
variant_zygosity_count_collections = {}
for vzcc in VariantZygosityCountCollection.objects.all():
vzc_vcf = VariantZygosityCountForVCF.objects.filter(vcf=vcf, collection=vzcc).first()
variant_zygosity_count_collections[vzcc] = vzc_vcf
context = {
'vcf': vcf,
'sample_stats_het_hom_count': sample_stats_het_hom_count,
'sample_stats_pass_het_hom_count': sample_stats_pass_het_hom_count,
'sample_names': sample_names,
'sample_zygosities': sample_zygosities,
'vcf_form': vcf_form,
'samples_form': samples_form,
'patient_form': PatientForm(user=request.user), # blank
'has_write_permission': has_write_permission,
'can_download_vcf': (not settings.VCF_DOWNLOAD_ADMIN_ONLY) or request.user.is_superuser,
"variant_zygosity_count_collections": variant_zygosity_count_collections,
}
return render(request, 'snpdb/data/view_vcf.html', context)
def get_patient_upload_csv_for_vcf(request, pk):
vcf = VCF.get_for_user(request.user, pk)
sample_qs = vcf.sample_set.all()
filename = f"vcf_{pk}_patient_upload"
return get_patient_upload_csv(filename, sample_qs)
def view_sample(request, sample_id):
sample = Sample.get_for_user(request.user, sample_id)
has_write_permission = sample.can_write(request.user)
form = forms.SampleForm(request.POST or None, instance=sample)
if not has_write_permission:
set_form_read_only(form)
messages.add_message(request, messages.WARNING, "You can view but not modify this data.")
if request.method == 'POST':
if not has_write_permission:
raise PermissionDenied("Can't modify public data")
valid = form.is_valid()
if valid:
form.save()
add_save_message(request, valid, "Sample")
sample_locus_count = list(SampleLocusCount.objects.filter(sample=sample).order_by("locus_count"))
igv_data = get_igv_data(request.user, genome_build=sample.genome_build)
patient_form = PatientForm(user=request.user) # blank
related_samples = None
if settings.SOMALIER.get("enabled"):
related_samples = SomalierRelatePairs.get_for_sample(sample).order_by("relate")
context = {'sample': sample,
'samples': [sample],
'sample_locus_count': sample_locus_count,
'form': form,
'patient_form': patient_form,
'cohorts': cohorts,
'has_write_permission': has_write_permission,
'igv_data': igv_data,
"related_samples": related_samples}
return render(request, 'snpdb/data/view_sample.html', context)
def sample_variants_tab(request, sample_id):
sample = Sample.get_for_user(request.user, sample_id)
analysis = None
error_message = None
if settings.ANALYSIS_TEMPLATES_AUTO_SAMPLE:
try:
analysis_template = AnalysisTemplate.objects.get(name=settings.ANALYSIS_TEMPLATES_AUTO_SAMPLE)
analysis = get_sample_analysis(sample, analysis_template)
except AnalysisTemplate.DoesNotExist:
error_message = f"Analysis Template '{settings.ANALYSIS_TEMPLATES_AUTO_SAMPLE}' does not exist!"
else:
error_message = "settings.ANALYSIS_TEMPLATES_AUTO_SAMPLE not set. Talk to your administrator"
if error_message:
messages.add_message(request, messages.ERROR, error_message)
context = {
'sample': sample,
"analysis": analysis,
'output_node_form': AnalysisOutputNodeChoiceForm(analysis=analysis)
}
return render(request, 'snpdb/data/sample_variants_tab.html', context)
def sample_variants_gene_detail(request, sample_id, gene_symbol):
sample = Sample.get_for_user(request.user, sample_id)
context = {'sample': sample,
'sample_ids': [sample.pk],
'gene_symbol': gene_symbol,
"datatable_config": ClassificationColumns(request)}
return render(request, 'snpdb/data/sample_variants_gene_detail.html', context)
def sample_graphs_tab(request, sample_id):
sample = Sample.get_for_user(request.user, sample_id)
context = {'sample': sample}
return render(request, 'snpdb/data/sample_graphs_tab.html', context)
def get_group_permission_forms(request, obj, groups=None):
if groups is None:
groups = request.user.groups.all().order_by("name")
return [forms.GroupPermissionForm(request.POST or None, obj=obj, group=group) for group in groups]
def sample_permissions_tab(request, sample_id):
sample = Sample.get_for_user(request.user, sample_id)
context = {'sample': sample,
'class_name': full_class_name(Sample)}
return render(request, 'snpdb/data/sample_permissions_tab.html', context)
def view_genomic_intervals(request, genomic_intervals_collection_id):
gic = get_object_or_404(GenomicIntervalsCollection, pk=genomic_intervals_collection_id)
if not request.user.has_perm('view_genomicintervalscollection', gic):
raise PermissionDenied()
form = forms.GenomicIntervalsCollectionForm(request.POST or None, instance=gic)
if request.method == "POST":
valid = form.is_valid()
if valid:
gic = form.save()
add_save_message(request, valid, "Genomic Intervals")
if gic.genome_build is None:
msg = "Unable to automatically set build, please select manually."
messages.add_message(request, messages.WARNING, msg, extra_tags='import-message')
context = {'gic': gic,
'form': form,
"has_write_permission": gic.can_write(request.user)}
return render(request, 'snpdb/data/view_genomic_intervals.html', context)
@require_POST
def cached_generated_file_delete(request):
cgf_id = request.POST["cgf_id"]
cgf = get_object_or_404(CachedGeneratedFile, pk=cgf_id)
cgf.delete()
return HttpResponse()
def vcfs(request):
context = {
"form": VCFChoiceForm(),
}
return render(request, 'snpdb/data/vcfs.html', context=context)
def samples(request):
groups = request.user.groups.values_list("name", flat=True)
groups_str = ', '.join(groups)
num_groups = len(groups)
if num_groups > 1:
group_info = f"(or owned by one of your groups: {groups_str})"
elif num_groups:
group_info = f"(or owned by your group: {groups_str})"
else:
group_info = ''
context = {
"form": SampleChoiceForm(),
"group_info": group_info,
}
return render(request, 'snpdb/data/samples.html', context=context)
def bed_files(request):
return render(request, 'snpdb/data/bed_files.html')
@require_POST
def messages_bulk_delete(request):
messages_str = request.POST['message_ids']
message_ids = json.loads(messages_str)
user_messages_qs = Message.objects.filter(recipient=request.user)
user_messages_qs.filter(pk__in=message_ids).delete()
return HttpResponse()
def manual_variant_entry(request):
if can_create_variants(request.user):
form = forms.ManualVariantEntryForm(request.POST or None, user=request.user)
if request.method == 'POST':
valid = form.is_valid()
if valid:
variants_text = form.cleaned_data['variants_text']
genome_build_pk = form.cleaned_data['genome_build']
genome_build = GenomeBuild.objects.get(pk=genome_build_pk)
create_manual_variants(request.user, genome_build, variants_text)
form = forms.ManualVariantEntryForm(None, user=request.user) # Reset form
add_save_message(request, valid, "Manually entered variants")
else:
form = None
messages.add_message(request, messages.INFO, "Manual variant entry has been disabled by an admin.")
mvec_qs = ManualVariantEntryCollection.objects.order_by("-id")
context = {"form": form,
"mvec_qs": mvec_qs}
return render(request, 'snpdb/data/manual_variant_entry.html', context=context)
@require_POST
def set_user_row_config(request):
""" This is set from jqgrid.html setRowChangeCallbacks when changing grid rows """
grid_name = request.POST["grid_name"]
grid_rows = int(request.POST["grid_rows"])
UserGridConfig.objects.update_or_create(user=request.user, grid_name=grid_name, defaults={"rows": grid_rows})
return HttpResponse()
@require_POST
def set_user_data_grid_config(request):
""" This is set from user_data_grid_filter.html, should contain either filter_level+checked or filter_name """
grid_name = request.POST["grid_name"]
user_grid_config = UserGridConfig.get(request.user, grid_name)
filter_level = request.POST.get("filter_level")
if filter_level:
checked = json.loads(request.POST["checked"])
if filter_level == 'groups':
user_grid_config.show_group_data = checked
elif filter_level == 'incomplete':
user_grid_config.show_incomplete_data = checked
elif filter_level == 'hidden':
user_grid_config.show_hidden_data = checked
else:
msg = f"Unknown value for filter_level: '{filter_level}'"
raise ValueError(msg)
else:
user_grid_config.filter_name = request.POST["filter_name"]
user_grid_config.save()
return HttpResponse()
def view_user_settings(request):
user = request.user
user_contact = UserContact.get_for_user(user)
action = request.POST.get('action') if request.POST else None
post = request.POST or None if not action else None
user_form = UserForm(post, instance=user)
user_contact_form = UserContactForm(post, instance=user_contact)
user_settings = UserSettings.get_for_user(user)
override_source, override_values = user_settings.get_override_source_and_values_before_user()
user_settings_override = UserSettingsOverride.objects.get(user=user)
user_settings_override_form = UserSettingsOverrideForm(post, instance=user_settings_override)
labs_by_group_name = {l.group_name: l for l in Lab.valid_labs_qs(user)}
group_initial_perm_forms = {}
if settings.USER_SETTINGS_SHOW_GROUPS:
read_groups, write_groups = user_settings.initial_perm_read_and_write_groups
for group in user.groups.all().order_by('name'):
initial = {"read": group in read_groups, "write": group in write_groups}
group_initial_perm_forms[group] = SettingsInitialGroupPermissionForm(request.POST or None, initial=initial,
settings_override=user_settings_override,
group=group)
if request.method == "POST":
all_valid = True
action = request.POST.get('action')
if action == 'password-reset':
keycloak = Keycloak()
keycloak.change_password(user)
messages.add_message(request, level=messages.INFO, message='Password reset email sent',
extra_tags='save-message')
else:
if not settings.USE_OIDC:
if user_form.is_valid():
user = user_form.save()
else:
all_valid = False
for form in itertools.chain([user_contact_form, user_settings_override_form],
group_initial_perm_forms.values()):
if form.is_valid():
form.save()
else:
all_valid = False
add_save_message(request, all_valid, "User Settings")
context = {
'user': user,
'user_form': user_form,
'user_contact_form': user_contact_form,
'user_settings_form': user_settings_override_form,
'group_initial_perm_forms': group_initial_perm_forms,
'accounts_email': settings.ACCOUNTS_EMAIL,
'account_manage_url': settings.OIDC_USER_SERVICES,
'override_source': override_source,
'override_values': override_values,
'labs_by_group_name': labs_by_group_name,
'avatar_details': AvatarDetails.avatar_for(user)
}
return render(request, 'snpdb/settings/view_user_settings.html', context)
def user_settings_node_counts_tab(request):
user_settings_override = UserSettingsOverride.objects.get_or_create(user=request.user)[0]
return _settings_override_node_counts_tab(request, user_settings_override)
def lab_settings_node_counts_tab(request, pk):
lab = get_object_or_404(Lab, pk=pk)
has_write_permission = lab.can_write(request.user)
if has_write_permission is False:
_add_read_only_settings_message(request, [lab])
lab_settings_override = LabUserSettingsOverride.objects.get_or_create(lab=lab)[0]
return _settings_override_node_counts_tab(request, lab_settings_override, has_write_permission=has_write_permission)
def organization_settings_node_counts_tab(request, pk):
organization = get_object_or_404(Organization, pk=pk)
has_write_permission = organization.can_write(request.user)
if has_write_permission is False:
_add_read_only_settings_message(request, organization.lab_set.all())
org_settings_override = OrganizationUserSettingsOverride.objects.get_or_create(organization=organization)[0]
return _settings_override_node_counts_tab(request, org_settings_override, has_write_permission=has_write_permission)
def _settings_override_node_counts_tab(request, settings_override, has_write_permission=True):
# This calls _analysis_settings_node_counts_tab with a FakeAnalysis object that
# handles loading/saving a global one against User settings objects instead of analysis
class FakeAnalysis:
def set_node_count_types(self, node_counts_array):
collection, _ = NodeCountSettingsCollection.objects.get_or_create(settings=settings_override)
AbstractNodeCountSettings.save_count_configs_from_array(collection.nodecountsettings_set, node_counts_array)
def get_node_count_types(self):
try:
node_count_config = settings_override.nodecountsettingscollection
node_count_filters = node_count_config.get_node_count_filters()
except:
node_count_filters = BuiltInFilters.DEFAULT_NODE_COUNT_FILTERS
return AbstractNodeCountSettings.get_types_from_labels(node_count_filters)
fake_analysis = FakeAnalysis()
from analysis.views.views import _analysis_settings_node_counts_tab # Circular import
return _analysis_settings_node_counts_tab(request, fake_analysis,
pass_analysis_settings=False, has_write_permission=has_write_permission)
def view_user(request, pk):
user = get_object_or_404(User, pk=pk)
user_contact = UserContact.get_for_user(user)
context = {"user": user,
'user_contact': user_contact}
return render(request, 'snpdb/settings/view_user.html', context)
def _add_read_only_settings_message(request, lab_list: Iterable[Lab]):
""" lab_list: labs where lab heads can modify settings """
lab_heads_qs = LabHead.objects.filter(lab__in=lab_list).distinct()
lab_head_names = ", ".join([str(lh.user) for lh in lab_heads_qs])
if lab_head_names:
lab_head_msg = f" or lab heads: {lab_head_names}"
else:
lab_head_msg = ""
read_only_message = f"Only administrators{lab_head_msg} can modify these settings"
messages.add_message(request, messages.INFO, read_only_message)
def view_lab(request, pk):
lab = get_object_or_404(Lab, pk=pk)
lab_form = LabForm(request.POST or None, instance=lab)
lab_settings_override = LabUserSettingsOverride.objects.get_or_create(lab=lab)[0]
override_fields = set(get_model_fields(LabUserSettingsOverride)) - {"id", "settingsoverride_ptr", "lab"}
parent_overrides = UserSettings.get_settings_overrides(organization=lab.organization)
override_source, override_values = UserSettings.get_override_source_and_values(override_fields, parent_overrides)
settings_overrides = parent_overrides + [lab_settings_override]
read_groups, write_groups = UserSettings.get_initial_perm_read_and_write_groups([lab.group], settings_overrides)
initial = {"read": lab.group in read_groups, "write": lab.group in write_groups}
group_initial_perm_form = None
if settings.USER_SETTINGS_SHOW_GROUPS:
group_initial_perm_form = SettingsInitialGroupPermissionForm(request.POST or None, initial=initial,
settings_override=lab_settings_override,
group=lab.group)
lab_settings_override_form = LabUserSettingsOverrideForm(request.POST or None, instance=lab_settings_override)
has_write_permission = lab.can_write(request.user)
all_forms = [form for form in [lab_form, group_initial_perm_form, lab_settings_override_form] if form]
if request.method == "POST":
lab.check_can_write(request.user)
if debug_method := request.POST.get("debug_method"):
if "Test Slack" == debug_method:
if not lab.slack_webhook:
messages.add_message(request, messages.ERROR, "Slack URL not configured correctly")
else:
#try:
notification_builder = LabNotificationBuilder(lab=lab, message="Testing Slack Integration", notification_type=LabNotificationBuilder.NotificationType.SLACK_ONLY)
notification_builder.add_header(f"{settings.SITE_NAME} -> Slack Integration Test")
notification_builder.add_markdown("If you can see this, then integration has worked! :smile:")
notification_builder.send()
messages.add_message(request, messages.SUCCESS, "Message sent, check your Slack to confirm")
#except:
# report_exc_info()
# messages.add_message(request, messages.ERROR, "Unable to send test notification")
return redirect(reverse('view_lab', kwargs={"pk":pk}))
else:
raise ValueError(f"Un-supported debug method {debug_method}")
else:
all_valid = True
for form in all_forms:
if form.is_valid():
form.save()
else:
all_valid = False
add_save_message(request, all_valid, "Lab Settings")
if has_write_permission is False:
for form in all_forms:
set_form_read_only(form)
# we just hide the form now
# _add_read_only_settings_message(request, [lab])
if settings.VARIANT_CLASSIFICATION_STATS_USE_SHARED:
visibility = "Shared"
else:
visibility = f"Created"
context = {
"lab": lab,
"visibility": visibility,
"is_member": lab.is_member(request.user) or request.user.is_superuser,
"lab_form": lab_form,
'settings_override_form': lab_settings_override_form,
'group_initial_perm_form': group_initial_perm_form,
'override_source': override_source,
'override_values': override_values,
'has_write_permission': has_write_permission,
'clinvar_export_enabled': clinvar_export_sync.is_enabled
}
return render(request, 'snpdb/settings/view_lab.html', context)
def view_clinvar_key(request, pk: str):
clinvar_key = get_object_or_404(ClinVarKey, pk=pk)
clinvar_key.check_user_can_access(request.user)
return render(request, 'snpdb/settings/clinvar_key.html', {
'clinvar_key': clinvar_key,
'labs': Lab.objects.filter(clinvar_key=clinvar_key).order_by('name')
})
def view_organization(request, pk):
organization = get_object_or_404(Organization, pk=pk)
organization_form = OrganizationForm(request.POST or None, instance=organization)
org_settings_override = OrganizationUserSettingsOverride.objects.get_or_create(organization=organization)[0]
override_fields = set(get_model_fields(OrganizationUserSettingsOverride)) - {"id", "settingsoverride_ptr", "organization"}
parent_overrides = UserSettings.get_settings_overrides()
override_source, override_values = UserSettings.get_override_source_and_values(override_fields, parent_overrides)
org_settings_override_form = OrganizationUserSettingsOverrideForm(request.POST or None, instance=org_settings_override)
all_forms = [organization_form, org_settings_override_form]
if request.method == "POST":
organization.check_can_write(request.user)
all_valid = True
for form in all_forms:
if form.is_valid():
form.save()
else:
all_valid = False
add_save_message(request, all_valid, "Organization Settings")
has_write_permission = organization.can_write(request.user)
if has_write_permission is False:
for form in all_forms:
set_form_read_only(form)
# put on individual tabs now
# _add_read_only_settings_message(request, organization.lab_set.all())
context = {
"organization": organization,
"is_member": organization.is_member(request.user) or request.user.is_superuser,
"organization_form": organization_form,
'settings_override_form': org_settings_override_form,
'override_source': override_source,
'override_values': override_values,
'has_write_permission': has_write_permission,
}
return render(request, 'snpdb/settings/view_organization.html', context)
def custom_columns(request):
context = {}
form = forms.CustomColumnsCollectionForm(request.POST or None, user=request.user)
if request.method == "POST":
if form.is_valid():
ccc = form.save()
return HttpResponseRedirect(reverse("view_custom_columns", kwargs={"custom_columns_collection_id": ccc.pk}))
add_save_message(request, False, "Columns", created=True)
context["form"] = form
return render(request, 'snpdb/settings/custom_columns.html', context)
# Based on code from http://j-syk.com/weblog/2012/10/18/jquery-sortables-ajax-django/
def view_custom_columns(request, custom_columns_collection_id):
ccc = CustomColumnsCollection.get_for_user(request.user, custom_columns_collection_id)
custom_columns_qs = VariantGridColumn.objects.filter(customcolumn__custom_columns_collection=ccc)
my_columns = list(custom_columns_qs.order_by("customcolumn__sort_order"))
available_columns = list(VariantGridColumn.objects.exclude(grid_column_name__in=my_columns))
variant_grid_columns = {}
for vgc in VariantGridColumn.objects.all():
variant_grid_columns[vgc.pk] = vgc
has_write_permission = ccc.can_write(request.user)
if not has_write_permission:
msg = "You do not have permission to edit these columns. " \
"If you wish to customise them, click 'clone' and modify the copy"
messages.add_message(request, messages.WARNING, msg)
if request.method == "POST":
ccc.check_can_write(request.user)
if name := request.POST.get("name"):
ccc.name = name
ccc.save()
elif my_columns_str := request.POST.get("columns"):
def update_user_columns(id_list, active):
for i, col in enumerate(id_list):
column = variant_grid_columns[col]
CustomColumn.objects.update_or_create(custom_columns_collection=ccc, column=column,
defaults={"sort_order": i})
# Delete any not in id_list
CustomColumn.objects.filter(custom_columns_collection=ccc).exclude(column__in=id_list).delete()
my_columns_list = my_columns_str.split(',') if my_columns_str else []
active = 'my_columns' in request.POST
update_user_columns(my_columns_list, active)
return HttpResponse() # Nobody ever looks at this
context_dict = {
'available_columns_list': available_columns,
'my_columns_list': my_columns,
'custom_columns': ccc,
'has_write_permission': has_write_permission,
}
return render(request, 'snpdb/settings/view_custom_columns.html', context_dict)
def tag_settings(request):
form = forms.CreateTagForm(request.POST or None)
if request.method == "POST":
valid = form.is_valid()
if valid:
tag_name = form.cleaned_data['tag']
name = f"Tag {tag_name}"
try:
Tag.objects.create(pk=tag_name)
except:
valid = False
else:
name = "Tag"
add_save_message(request, valid, name, created=True)
user_tag_styles, user_tag_colors = UserTagColors.get_tag_styles_and_colors(request.user)
context_dict = {'form': form,
'user_tag_styles': user_tag_styles,
'user_tag_colors': user_tag_colors}
return render(request, 'snpdb/settings/tag_settings.html', context_dict)
@require_POST
def set_user_tag_color(request):
tag = request.POST['tag']
rgb = request.POST['rgb']
(utc, _) = UserTagColors.objects.get_or_create(user=request.user, tag_id=tag)
utc.rgb = rgb
utc.save()
logging.info("saved %s", utc)
return HttpResponse()
def igv_integration(request):
widgets = {"prefix": TextInput(attrs={'placeholder': 'from...'}),
"replacement": TextInput(attrs={'placeholder': 'to...'})}
UserDataPrefixFormSet = inlineformset_factory(User,
UserDataPrefix,
can_delete=True,
fields=ALL_FIELDS,
widgets=widgets,
max_num=10,
extra=3, )
formset = UserDataPrefixFormSet(request.POST or None, instance=request.user)
if request.method == "POST":
valid = formset.is_valid()
if valid:
formset.save()
add_save_message(request, valid, "IGV Integration")
context_dict = {'user': request.user,
'formset': formset,
'example_replacements': get_example_replacements(request.user)}
return render(request, 'snpdb/settings/igv_integration.html', context_dict)
def cohorts(request):
user_settings = UserSettings.get_for_user(request.user)
initial = {'genome_build': user_settings.default_genome_build}
form = forms.CreateCohortForm(request.POST or None, initial=initial)
if request.method == "POST":
valid = form.is_valid()
if valid:
cohort = form.save()
assign_permission_to_user_and_groups(request.user, cohort)
return HttpResponseRedirect(reverse('view_cohort', kwargs={'cohort_id': cohort.pk}))
else:
add_save_message(request, valid, "Cohort", created=True)
context = {"form": form}
return render(request, 'snpdb/patients/cohorts.html', context)
def view_cohort_details_tab(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
context = {"cohort": cohort,
"has_write_permission": cohort.can_write(request.user)}
return render(request, 'snpdb/patients/view_cohort_details_tab.html', context)
def view_cohort(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
if cohort.vcf:
return redirect('view_vcf', vcf_id=cohort.vcf.pk)
try:
cohort_genotype_collection = cohort.cohort_genotype_collection
except CohortGenotypeCollection.DoesNotExist:
cohort_genotype_collection = None
form = forms.CohortForm(request.POST or None, instance=cohort)
if request.method == "POST":
if valid := form.is_valid():
cohort = form.save()
add_save_message(request, valid, "Cohort")
sample_form = SampleChoiceForm(genome_build=cohort.genome_build)
sample_form.fields['sample'].required = False
context = {"form": form,
"sample_form": sample_form,
"cohort": cohort,
"cohort_genotype_collection": cohort_genotype_collection,
"has_write_permission": cohort.can_write(request.user)}
return render(request, 'snpdb/patients/view_cohort.html', context)
def cohort_sample_edit(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
if request.method == "POST":
cohort_op = request.POST['cohort_op']
sample_ids_str = request.POST['sample_ids']
sample_ids = json.loads(sample_ids_str)
if cohort_op == 'add':
for sample_id in sample_ids:
cohort.add_sample(sample_id)
elif cohort_op == 'remove':
for sample_id in sample_ids:
try:
cohort_sample = CohortSample.objects.get(cohort=cohort, sample_id=sample_id)
cohort_sample.delete()
logging.info("Removed: %s", sample_id)
except CohortSample.DoesNotExist:
pass
else:
raise ValueError(f"Unknown cohort_op '{cohort_op}'")
return HttpResponse()
def cohort_hotspot(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
form = GeneAndTranscriptForm(genome_build=cohort.genome_build)
try:
cohort_genotype_collection = cohort.cohort_genotype_collection
except Exception as e:
cohort_genotype_collection = None
logging.error(e)
context = {"cohort": cohort,
"cohort_genotype_collection": cohort_genotype_collection,
"form": form}
return render(request, 'snpdb/patients/cohort_hotspot.html', context)
def cohort_gene_counts(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
COHORT_CUSTOM_GENE_LIST = f"__QC_COVERAGE_CUSTOM_GENE_LIST__{request.user}"
# We only want to keep 1 per user
custom_text_gene_list, _ = CustomTextGeneList.objects.get_or_create(name=COHORT_CUSTOM_GENE_LIST)
custom_gene_list_form = CustomGeneListForm(request.POST or None,
initial={"custom_gene_list_text": custom_text_gene_list.text})
if custom_gene_list_form.is_valid():
custom_text_gene_list.text = custom_gene_list_form.cleaned_data['custom_gene_list_text']
custom_text_gene_list.save()
create_custom_text_gene_list(custom_text_gene_list, request.user, GeneListCategory.QC_COVERAGE_CUSTOM_TEXT,
hidden=True)
gene_list_id = custom_text_gene_list.gene_list.pk
else:
gene_list_id = None
context = {"cohort": cohort,
'gene_list_id': gene_list_id,
'gene_list_form': UserGeneListForm(),
'custom_gene_list_form': custom_gene_list_form,
'gene_count_type_choice_form': GeneCountTypeChoiceForm()}
return render(request, 'snpdb/patients/cohort_gene_counts.html', context)
def cohort_gene_counts_matrix(request, cohort_id, gene_count_type_id, gene_list_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
gene_count_type = GeneCountType.objects.get(pk=gene_count_type_id)
gene_list = GeneList.get_for_user(request.user, gene_list_id)
samples = list(cohort.get_samples())
annotation_version = AnnotationVersion.latest(cohort.genome_build)
variant_annotation_version = annotation_version.variant_annotation_version
cgc, created = CohortGeneCounts.objects.get_or_create(variant_annotation_version=variant_annotation_version,
gene_count_type=gene_count_type,
cohort=cohort,
cohort_version=cohort.version)
graph_kwargs = {"cohort_id": cohort_id,
"gene_count_type_id": gene_count_type_id,
"gene_list_id": gene_list_id}
redirect_url = reverse("cohort_gene_counts_matrix", kwargs=graph_kwargs)
if created or (cgc.processing_status not in ProcessingStatus.FINISHED_STATES):
celery_task = cgc.launch_task()
wait_for_task_kwargs = {"celery_task": celery_task, "sleep_ms": 2000, "redirect_url": redirect_url}
wait_url = reverse("wait_for_task", kwargs=wait_for_task_kwargs)
return HttpResponseRedirect(wait_url)
else:
if cgc.processing_status == ProcessingStatus.SUCCESS:
return sample_gene_matrix(request, variant_annotation_version, samples, gene_list, gene_count_type)
else:
raise ValueError(f"{cgc} had ProcessingStatus: {cgc.processing_status}")
def trios(request):
context = {}
return render(request, 'snpdb/patients/trios.html', context)
def view_trio(request, pk):
trio = Trio.get_for_user(request.user, pk)
context = {"trio": trio,
"has_write_permission": trio.cohort.can_write(request.user)}
return render(request, 'snpdb/patients/view_trio.html', context)
@login_not_required
def sample_gene_matrix(request, variant_annotation_version, samples, gene_list,
gene_count_type, highlight_gene_symbols=None):
""" highlight_gene_symbols - put these genes 1st """
# 19/07/18 - Plotly can't display a categorical color map. See: https://github.com/plotly/plotly.js/issues/1747
# So just doing as HTML table
if gene_list:
genes = gene_list.get_genes(variant_annotation_version.gene_annotation_release)
gene_symbols = set(gene_list.get_gene_names())
else:
# This was originally designed around a gene list, but now we need to support no gene list (only when uses
# variant classifications)
genes = []
gene_symbols = []
qs = gene_count_type.get_variant_queryset(variant_annotation_version)
GS_PATH = "variantannotation__transcript_version__gene_version__gene_symbol"
qs = qs.filter(**{GS_PATH + "__isnull": False})
for gene, gene_symbol in qs.values_list("variantannotation__gene", GS_PATH).distinct():
genes.append(gene)
gene_symbols.append(gene_symbol)
gene_values = list(gene_count_type.genevalue_set.all().order_by("id"))
default_color = "#d9d9d9"
default_text = ""
empty_gene_value = list(filter(lambda x: x.use_as_empty_value, gene_values))
if len(empty_gene_value) == 1:
default_color = empty_gene_value[0].rgb
phenotypes = ["Age", "HPO", "OMIM"]
highlight_gene_labels = []
other_gene_labels = []
gene_links_lookup = OrderedDict()
for gene_symbol in sorted(gene_symbols):
gene_classes_list = ["gene-label", gene_symbol]
highlight = highlight_gene_symbols and gene_symbol in highlight_gene_symbols
if highlight:
gene_classes_list.append("highlight-gene")
gene_classes = ' '.join(gene_classes_list)
if request.user.is_authenticated: # Only display links to logged in users
url = reverse('view_gene_symbol', kwargs={"gene_symbol": gene_symbol})
gene_symbol_text = f'<a class="{gene_classes}" href="{url}">{gene_symbol}</a>'
else:
gene_symbol_text = f"<span class='{gene_classes}'>{gene_symbol}</span>"
if highlight:
highlight_gene_labels.append(gene_symbol_text)
else:
other_gene_labels.append(gene_symbol_text)
gene_links_lookup[gene_symbol] = gene_symbol_text
matrix_rows = phenotypes + highlight_gene_labels + other_gene_labels
color_df = pd.DataFrame(index=matrix_rows, dtype='O')
text_df = pd.DataFrame(index=matrix_rows)
sample_names = []
used_sample_names = set()
for i, sample in enumerate(samples):
try:
can_access = False
if request.user.is_authenticated: # Only display links to logged in users
try:
Sample.get_for_user(request.user, sample.pk) # Throws exception
can_access = True
except (Sample.DoesNotExist, PermissionDenied):
pass
source = SampleAnnotationVersionVariantSource.objects.get(sample=sample,
variant_annotation_version=variant_annotation_version)
gvcc = GeneValueCountCollection.objects.get(source=source,
gene_count_type=gene_count_type)
gvc_qs = gvcc.genevaluecount_set.filter(gene__in=genes)
sample_code = "%03d" % i
if can_access:
view_sample_url = reverse('view_sample', kwargs={'sample_id': sample.pk})
sample_link = f'<a href="{view_sample_url}">{sample.name}</a>'
if sample_link in used_sample_names:
uniq_sample_name = sample.name + "_" + sample_code
sample_link = f'<a href="{view_sample_url}">{uniq_sample_name}</a>'
sample_name = sample_link
else:
sample_name = "S" + sample_code
sample_names.append(sample_name)
used_sample_names.add(sample_name)
color_df[sample_name] = default_color
color_df.loc["Age", sample_name] = '#FFFFFF'
color_df.loc["HPO", sample_name] = '#FFFFFF'
color_df.loc["OMIM", sample_name] = '#FFFFFF'
text_df[sample_name] = default_text
if sample.patient:
try:
# Check you have Patient permissions
patient = Patient.get_for_user(request.user, sample.patient.pk)
def format_ontology(ontology_term):
return f"<div title='{ontology_term}'>{ontology_term.name}</div>"
hpo, omim = OntologyTerm.split_hpo_and_omim(patient.get_ontology_term_ids())
hpo_text = " ".join(map(format_ontology, hpo))
omim_text = " ".join(map(format_ontology, omim))
try:
age = sample.specimen.age_at_collection_date
except:
age = None
text_df.loc["Age", sample_name] = age or ''
text_df.loc["HPO", sample_name] = hpo_text
text_df.loc["OMIM", sample_name] = omim_text
except PermissionDenied:
pass
except Patient.DoesNotExist:
pass
FIELDS = ["gene__geneversion__gene_symbol", "value__rgb", "value__show_counts", "count"]
for gene_symbol, rgb, show_counts, count in gvc_qs.values_list(*FIELDS):
gene_link = gene_links_lookup[gene_symbol]
color_df.loc[gene_link, sample_name] = rgb
if show_counts:
text_df.loc[gene_link, sample_name] = count
except (SampleAnnotationVersionVariantSource.DoesNotExist, GeneValueCountCollection.DoesNotExist):
pass
def set_style(s):
color_series = color_df[s.name]
styles = []
for color in color_series:
styles.append(f"color: {rgb_invert(color)}; background-color: {color};")
return styles
style = text_df.style.apply(set_style)
style = style.set_table_attributes('class="sample-gene-matrix"')
text_table_html = style.render()
context = {"text_table_html": text_table_html,
"gene_values": gene_values}
return render(request, 'snpdb/patients/cohort_gene_counts_matrix.html', context)
def cohort_sort(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
if request.method == "POST":
cohort_samples_str = request.POST.get("cohort_samples")
cohort_samples_ids = cohort_samples_str.split(',') if cohort_samples_str else []
cohort_samples = []
for i, cs_id in enumerate(cohort_samples_ids):
cohort_sample = CohortSample.objects.get(pk=cs_id, cohort=cohort)
cohort_sample.sort_order = i
cohort_sample.save()
cohort_samples.append(cohort_sample)
else:
cohort_samples = cohort.get_cohort_samples()
context = {'cohort': cohort,
'cohort_samples': cohort_samples}
return render(request, 'snpdb/patients/cohort_sort.html', context)
def help_static_page(request, page_name):
""" This embeds static pages in a help template """
context = {"page_name": page_name}
return render(request, 'snpdb/help/help_static_page.html', context)
def ajax_hello_world(request, data:str):
return render(request, 'snpdb/ajax_hello_world.html', {'data': data})
def staff_only(request):
return render(request, 'snpdb/staff_only.html')
@cache_page(WEEK_SECS)
def tag_autocomplete_form(request):
""" This is an absolutely minimal HTML to create a Tag autocomplete form (used for load()) """
context = {"tag_form": TagForm()}
return render(request, 'snpdb/tag_autocomplete_form.html', context)
def wait_for_task(request, celery_task, sleep_ms, redirect_url):
async_result = AsyncResult(celery_task)
if async_result.successful():
return HttpResponseRedirect(redirect_url)
kwargs = {"celery_task": celery_task, "sleep_ms": sleep_ms, "redirect_url": redirect_url}
url = reverse("wait_for_task", kwargs=kwargs)
context = {"url": url,
"sleep_ms": sleep_ms,
"async_result": async_result}
return render(request, 'snpdb/wait_for_task.html', context)
@require_POST
def wiki_save(request, class_name, unique_keyword, unique_value):
wiki = Wiki.get_or_create(class_name, unique_keyword, unique_value)
markdown = request.POST["markdown"]
wiki.check_user_edit_permission(request.user) # Throws 403
wiki.markdown = markdown
wiki.last_edited_by = request.user
wiki.save()
return JsonResponse({})
def labs(request):
# Use short names is available
short_names_qs = Organization.objects.filter(short_name__isnull=False)
name_to_short_name = dict(short_names_qs.values_list("name", "short_name"))
if settings.VARIANT_CLASSIFICATION_STATS_USE_SHARED:
org_field = "classification__lab__organization__name"
state_field = "classification__lab__state"
show_unclassified = False
else:
org_field = "lab__organization__name"
state_field = "lab__state"
show_unclassified = True
vc_org_data_json = get_grouped_classification_counts(
user=request.user,
field=org_field,
max_groups=15,
field_labels=name_to_short_name,
show_unclassified=show_unclassified)
vc_state_data_json = get_grouped_classification_counts(
user=request.user,
field=state_field,
max_groups=15,
show_unclassified=show_unclassified)
active_organizations = Organization.objects.filter(active=True).order_by('name')
organization_labs = {}
for org in active_organizations:
if settings.VARIANT_CLASSIFICATION_STATS_USE_SHARED:
org_labs = org.sharing_labs
else:
org_labs = org.classifying_labs
if org_labs:
organization_labs[org] = list(org_labs)
lab_list = [l for ll in organization_labs.values() for l in ll]
context = {
"organization_labs": organization_labs,
"labs": lab_list,
"shared_classifications": settings.VARIANT_CLASSIFICATION_STATS_USE_SHARED,
"vc_org_data": vc_org_data_json,
"vc_state_data": vc_state_data_json,
"show_unclassified": show_unclassified,
}
return render(request, "snpdb/labs.html", context)
@login_not_required
@cache_page(4 * HOUR_SECS)
def public_global_sample_gene_matrix(request):
# No auth required - rendered w/o links etc
return global_sample_gene_matrix(request)
@cache_page(HOUR_SECS)
@vary_on_cookie
def user_global_sample_gene_matrix(request):
# global_sample_gene_matrix is rendered differently for external/logged in users
# So keep as separate views so we can cache them
return global_sample_gene_matrix(request)
def global_sample_gene_matrix(request):
gene_count_type = GeneCountType.objects.get(pk=settings.PUBLIC_SAMPLE_GENE_MATRIX_TYPE)
gene_list_id = settings.PUBLIC_SAMPLE_GENE_MATRIX_GENE_LIST_ID
if gene_list_id:
gene_list = GeneList.objects.get(pk=gene_list_id)
else:
gene_list = None
if gene_count_type.uses_classifications is False:
raise PermissionDenied("settings.PUBLIC_SAMPLE_GENE_MATRIX_GENE_LIST_ID must be set "
"if GeneCountType.uses_classifications is False")
if settings.PUBLIC_SAMPLE_GENE_MATRIX_SHOW_PRIVATE_SAMPLES:
sample_qs = Sample.objects.filter(import_status=ImportStatus.SUCCESS)
else:
public = Group.objects.get(name='public')
read_perm = DjangoPermission.perm(Sample, DjangoPermission.READ)
sample_qs = get_objects_for_group(public, read_perm, Sample)
if gene_count_type.uses_classifications:
vc_qs = gene_count_type.get_classification_qs()
sample_qs = sample_qs.filter(classification__in=vc_qs)
genome_build_name = settings.PUBLIC_SAMPLE_GENE_MATRIX_GENOME_BUILD
if genome_build_name is None:
try:
genome_build = GenomeBuild.builds_with_annotation().get()
except GenomeBuild.MultipleObjectsReturned:
msg = f"settings.PUBLIC_SAMPLE_GENE_MATRIX_GENOME_BUILD must be set when there are multiple genome builds"
raise ImproperlyConfigured(msg)
else:
genome_build = GenomeBuild.get_name_or_alias(genome_build_name)
samples_list = list(sample_qs.filter(vcf__genome_build=genome_build).order_by("name").distinct())
variant_annotation_version = VariantAnnotationVersion.latest(genome_build)
highlight_gene_symbols = settings.PUBLIC_SAMPLE_GENE_MATRIX_HIGHLIGHT_GENE_SYMBOLS
return sample_gene_matrix(request, variant_annotation_version, samples_list, gene_list, gene_count_type,
highlight_gene_symbols=highlight_gene_symbols)
def genomic_intervals_graph(request, genomic_intervals_collection_id):
graph_class_name = full_class_name(ChromosomeIntervalsGraph)
cached_graph = graphcache.async_graph(graph_class_name, genomic_intervals_collection_id)
return HttpResponseRedirect(reverse("cached_generated_file_check", kwargs={"cgf_id": cached_graph.id}))
def chrom_density_graph(request, sample_id, cmap):
graph_class_name = full_class_name(SampleChromosomeDensityGraph)
cached_graph = graphcache.async_graph(graph_class_name, cmap, sample_id)
return HttpResponseRedirect(reverse("cached_generated_file_check", kwargs={"cgf_id": cached_graph.id}))
def homozygosity_graph(request, sample_id, cmap):
graph_class_name = full_class_name(HomozygosityPercentGraph)
cached_graph = graphcache.async_graph(graph_class_name, cmap, sample_id)
return HttpResponseRedirect(reverse("cached_generated_file_check", kwargs={"cgf_id": cached_graph.id}))
def sample_allele_frequency_histogram_graph(request, sample_id, min_read_depth):
graph_class_name = full_class_name(AlleleFrequencyHistogramGraph)
cached_graph = graphcache.async_graph(graph_class_name, sample_id, min_read_depth)
return HttpResponseRedirect(reverse("cached_generated_file_check", kwargs={"cgf_id": cached_graph.id}))
| 42.736501
| 181
| 0.695356
|
c664a5fe622028749e714ccdbca7d601898d7830
| 1,575
|
py
|
Python
|
share/rpcauth/rpcauth.py
|
proteanx/Bitcorn-Test
|
87e0245c1cbbb1a662ae0f3a3a9411bbe308ab0f
|
[
"MIT"
] | 25
|
2019-01-05T05:00:11.000Z
|
2021-05-03T03:54:07.000Z
|
share/rpcauth/rpcauth.py
|
Mattras007/BITCORN-1
|
47a5cdf7fa559aeeacf23f7d0191ba832561260b
|
[
"MIT"
] | 17
|
2019-07-12T22:10:09.000Z
|
2021-04-07T17:15:26.000Z
|
share/rpcauth/rpcauth.py
|
Mattras007/BITCORN-1
|
47a5cdf7fa559aeeacf23f7d0191ba832561260b
|
[
"MIT"
] | 17
|
2019-06-09T20:46:37.000Z
|
2021-12-31T08:44:19.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
from binascii import hexlify
from getpass import getpass
from os import urandom
import hmac
def generate_salt(size):
"""Create size byte hex salt"""
return hexlify(urandom(size)).decode()
def generate_password():
"""Create 32 byte b64 password"""
return urlsafe_b64encode(urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
parser.add_argument('username', help='the username for authentication')
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
args = parser.parse_args()
if not args.password:
args.password = generate_password()
elif args.password == '-':
args.password = getpass()
# Create 16 byte hex salt
salt = generate_salt(16)
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to bitcorn.conf:')
print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
print('Your password:\n{0}'.format(args.password))
if __name__ == '__main__':
main()
| 33.510638
| 134
| 0.714286
|
8b602875b973d8dd85a95eca7bd8d359dca659ac
| 4,329
|
py
|
Python
|
benchmark/startQiskit3054.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit3054.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit3054.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=46
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=23
prog.rx(-0.6848671984825748,input_qubit[1]) # number=26
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.h(input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=37
prog.cz(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=39
prog.cx(input_qubit[0],input_qubit[3]) # number=30
prog.cx(input_qubit[0],input_qubit[3]) # number=40
prog.x(input_qubit[3]) # number=41
prog.cx(input_qubit[0],input_qubit[3]) # number=42
prog.cx(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.cx(input_qubit[3],input_qubit[0]) # number=20
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.z(input_qubit[3]) # number=44
prog.cx(input_qubit[3],input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[3],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.h(input_qubit[1]) # number=36
prog.y(input_qubit[2]) # number=11
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit3054.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.195122
| 140
| 0.652345
|
457dff2b35d00ac433a60602f68ecd7b62142bbe
| 885
|
py
|
Python
|
vegeta/create_targets.py
|
killthrush/piccolo-daimao
|
ff4cbeca1a371307c9b89839d453f1e510f3b269
|
[
"MIT"
] | null | null | null |
vegeta/create_targets.py
|
killthrush/piccolo-daimao
|
ff4cbeca1a371307c9b89839d453f1e510f3b269
|
[
"MIT"
] | null | null | null |
vegeta/create_targets.py
|
killthrush/piccolo-daimao
|
ff4cbeca1a371307c9b89839d453f1e510f3b269
|
[
"MIT"
] | null | null | null |
import sys
import random
import uuid
# Generate some random targets for Vegeta to obliterate
if len(sys.argv) > 2:
num_targets = int(sys.argv[1])
target_server = sys.argv[2]
print "Generating {} random targets for server {}...".format(num_targets, target_server)
random_content = []
with open('./random_targets.txt', 'w') as targets_file:
for i in range(0, num_targets):
with open("./random{}".format(i), 'w') as body_file:
content = "key={}&value={}".format(uuid.uuid4(), random.randint(-10000, 10000))
body_file.write(content)
targets_file.write("POST http://{}:3333/increment\n".format(target_server))
targets_file.write("@./random{}\n".format(i))
targets_file.write("\n")
print "Done."
else:
print "This program requires <num_targets> followed by <target_server>. Exiting."
| 35.4
| 93
| 0.642938
|
4684bf40094bf4d86fcbffe4d645a6ce59db551d
| 6,132
|
py
|
Python
|
pyzoo/zoo/examples/ray_on_spark/parameter_server/async_parameter_server.py
|
limn2o4/analytics-zoo
|
78d6ce10976a7e1320ff5ebdf431db93a439ec56
|
[
"Apache-2.0"
] | 2,970
|
2017-06-08T00:24:43.000Z
|
2022-03-30T12:14:55.000Z
|
pyzoo/zoo/examples/ray_on_spark/parameter_server/async_parameter_server.py
|
limn2o4/analytics-zoo
|
78d6ce10976a7e1320ff5ebdf431db93a439ec56
|
[
"Apache-2.0"
] | 3,530
|
2017-05-09T08:29:10.000Z
|
2022-03-21T02:11:45.000Z
|
pyzoo/zoo/examples/ray_on_spark/parameter_server/async_parameter_server.py
|
limn2o4/analytics-zoo
|
78d6ce10976a7e1320ff5ebdf431db93a439ec56
|
[
"Apache-2.0"
] | 972
|
2017-05-09T07:03:50.000Z
|
2022-03-23T07:48:48.000Z
|
# This file is adapted from https://github.com/ray-project/ray/blob
# /master/examples/parameter_server/async_parameter_server.py
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import time
from zoo.examples.ray_on_spark.parameter_server import model
import ray
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca import OrcaContext
os.environ["LANG"] = "C.UTF-8"
parser = argparse.ArgumentParser(description="Run the asynchronous parameter "
"server example.")
parser.add_argument('--cluster_mode', type=str, default="local",
help='The mode for the Spark cluster. local or yarn.')
parser.add_argument("--num_workers", default=4, type=int,
help="The number of workers to use.")
parser.add_argument("--iterations", default=50, type=int,
help="Iteration time.")
parser.add_argument("--executor_cores", type=int, default=8,
help="The number of driver's cpu cores you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--executor_memory", type=str, default="10g",
help="The size of slave(executor)'s memory you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--driver_memory", type=str, default="2g",
help="The size of driver's memory you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--driver_cores", type=int, default=8,
help="The number of driver's cpu cores you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--extra_executor_memory_for_ray", type=str, default="20g",
help="The extra executor memory to store some data."
"You can change it depending on your own cluster setting.")
parser.add_argument("--object_store_memory", type=str, default="4g",
help="The memory to store data on local."
"You can change it depending on your own cluster setting.")
@ray.remote
class ParameterServer(object):
def __init__(self, keys, values):
# These values will be mutated, so we must create a copy that is not
# backed by the object store.
values = [value.copy() for value in values]
self.weights = dict(zip(keys, values))
def push(self, keys, values):
for key, value in zip(keys, values):
self.weights[key] += value
def pull(self, keys):
return [self.weights[key] for key in keys]
@ray.remote
def worker_task(ps, worker_index, batch_size=50):
# Download MNIST.
print("Worker " + str(worker_index))
mnist = model.download_mnist_retry(seed=worker_index)
# Initialize the model.
net = model.SimpleCNN()
keys = net.get_weights()[0]
while True:
# Get the current weights from the parameter server.
weights = ray.get(ps.pull.remote(keys))
net.set_weights(keys, weights)
# Compute an update and push it to the parameter server.
xs, ys = mnist.train.next_batch(batch_size)
gradients = net.compute_update(xs, ys)
ps.push.remote(keys, gradients)
if __name__ == "__main__":
args = parser.parse_args()
cluster_mode = args.cluster_mode
if cluster_mode == "yarn":
sc = init_orca_context(cluster_mode=cluster_mode,
cores=args.executor_cores,
memory=args.executor_memory,
init_ray_on_spark=True,
num_executors=args.num_workers,
driver_memory=args.driver_memory,
driver_cores=args.driver_cores,
extra_executor_memory_for_ray=args.extra_executor_memory_for_ray,
object_store_memory=args.object_store_memory,
additional_archive="MNIST_data.zip#MNIST_data")
ray_ctx = OrcaContext.get_ray_context()
elif cluster_mode == "local":
sc = init_orca_context(cores=args.driver_cores)
ray_ctx = OrcaContext.get_ray_context()
else:
print("init_orca_context failed. cluster_mode should be either 'local' or 'yarn' but got "
+ cluster_mode)
# Create a parameter server with some random weights.
net = model.SimpleCNN()
all_keys, all_values = net.get_weights()
ps = ParameterServer.remote(all_keys, all_values)
# Start some training tasks.
worker_tasks = [worker_task.remote(ps, i) for i in range(args.num_workers)]
# Download MNIST.
mnist = model.download_mnist_retry()
print("Begin iteration")
i = 0
while i < args.iterations:
# Get and evaluate the current model.
print("-----Iteration" + str(i) + "------")
current_weights = ray.get(ps.pull.remote(all_keys))
net.set_weights(all_keys, current_weights)
test_xs, test_ys = mnist.test.next_batch(1000)
accuracy = net.compute_accuracy(test_xs, test_ys)
print("Iteration {}: accuracy is {}".format(i, accuracy))
i += 1
time.sleep(1)
ray_ctx.stop()
stop_orca_context()
| 42.881119
| 98
| 0.638617
|
04d9469b3db916e41521633fe04c13b6ba731923
| 57,665
|
py
|
Python
|
test/functional/test_framework/mininode.py
|
ThomasTail/gamecoin
|
158cbc119e07c5c885ee41b916ed658239655ac0
|
[
"MIT"
] | 1
|
2021-05-04T21:01:14.000Z
|
2021-05-04T21:01:14.000Z
|
test/functional/test_framework/mininode.py
|
ThomasTail/gamecoin
|
158cbc119e07c5c885ee41b916ed658239655ac0
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/mininode.py
|
ThomasTail/gamecoin
|
158cbc119e07c5c885ee41b916ed658239655ac0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
NodeConn: an object which manages p2p connectivity to a bitcoin node
NodeConnCB: a base class that describes the interface for receiving
callbacks with network messages from a NodeConn
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization
"""
import asyncore
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from threading import RLock, Thread
import gamecoin_scrypt
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until
BIP0031_VERSION = 60000
MY_VERSION = 80014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
logger = logging.getLogger("TestFramework.mininode")
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.scrypt256 = header.scrypt256
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.scrypt256 = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
self.scrypt256 = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
self.scrypt256 = uint256_from_str(gamecoin_scrypt.getPoWHash(r))
def rehash(self):
self.sha256 = None
self.scrypt256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.scrypt256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.scrypt256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
class NodeConnCB(object):
"""Callback and helper functions for P2P connection to a bitcoind node.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour.
"""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
Optionally waits for deliver_sleep_time before dispatching message.
"""
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
raise
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
# Connection helper methods
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet3": b"\xfc\xc1\xb7\xdc", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
logger.info('Connecting to Gamecoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
raise ValueError("Unknown command: '%s'" % (command))
except Exception as e:
logger.exception('got_data:', repr(e))
raise
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 31.069504
| 262
| 0.598405
|
8b9f3acf691ea145032f365fbb3c01e724da8cfb
| 163
|
py
|
Python
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_Lag1Trend_Seasonal_DayOfWeek_SVR.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_Lag1Trend_Seasonal_DayOfWeek_SVR.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_Lag1Trend_Seasonal_DayOfWeek_SVR.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['Lag1Trend'] , ['Seasonal_DayOfWeek'] , ['SVR'] );
| 40.75
| 90
| 0.760736
|
12e9d58db28ae45a1cc66c35922036f9ec14ccc0
| 20,833
|
py
|
Python
|
managesf/controllers/root.py
|
softwarefactory-project/managesf
|
7018d041291f50b90e782ca31d0cfc67abd10170
|
[
"Apache-2.0"
] | 1
|
2018-08-02T23:30:03.000Z
|
2018-08-02T23:30:03.000Z
|
managesf/controllers/root.py
|
softwarefactory-project/managesf
|
7018d041291f50b90e782ca31d0cfc67abd10170
|
[
"Apache-2.0"
] | 1
|
2021-12-13T18:24:10.000Z
|
2021-12-13T20:10:39.000Z
|
managesf/controllers/root.py
|
softwarefactory-project/managesf
|
7018d041291f50b90e782ca31d0cfc67abd10170
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import logging
from urllib.parse import unquote
from pecan import conf
from pecan import expose
from pecan import abort
from pecan.rest import RestController
from pecan import request, response
from stevedore import driver
from managesf.controllers import localuser, introspection
from managesf.controllers import SFuser
from managesf.services import base
from managesf.services import exceptions
from managesf import policy
from managesf import DEFAULT_SERVICES
from managesf.controllers.api.v2 import resources as v2_resources
from managesf.controllers.api.v2 import configurations as v2_configurations
from managesf.api.v2.managers import resource_manager
logger = logging.getLogger(__name__)
LOGERRORMSG = "Unable to process client request, failed with "\
"unhandled error: %s"
CLIENTERRORMSG = "Unable to process your request, failed with "\
"unhandled error (server side): %s"
# TODO: add detail (detail arg or abort function) for all abort calls.
# instanciate service plugins
SF_SERVICES = []
SERVICES = {}
def load_services():
try:
if conf.services:
services = conf.services
else:
services = DEFAULT_SERVICES
msg = 'No service configured, loading: %s' % DEFAULT_SERVICES
logger.info(msg)
except AttributeError:
services = DEFAULT_SERVICES
msg = 'Obsolete conf file, loading default: %s' % DEFAULT_SERVICES
logger.info(msg)
for service in services:
try:
plugin = driver.DriverManager(namespace='managesf.service',
name=service,
invoke_on_load=True,
invoke_args=(conf,)).driver
SF_SERVICES.append(plugin)
SERVICES[service] = plugin
logger.info('%s plugin loaded successfully' % service)
except Exception as e:
logger.error('Could not load service %s: %s' % (service, e))
def _decode_project_name(name):
if name.startswith('==='):
try:
n = base64.urlsafe_b64decode(name.encode()[3:])
return n.decode('utf8')
except Exception:
return name[3:]
return name
class SFManager:
user = SFuser.SFUserManager()
sfmanager = SFManager()
def is_admin(user):
return base.RoleManager.is_admin(user)
def report_unhandled_error(exp):
logger.exception(LOGERRORMSG % str(exp))
response.status = 500
return CLIENTERRORMSG % str(exp)
# TODO move to utils and use resources rather than gerrit for groups
def authorize(rule_name, target):
if not request.remote_user:
request.remote_user = request.headers.get('X-Remote-User')
credentials = {'username': request.remote_user, 'groups': []}
# OpenID Connect authentication
if request.headers.get("OIDC_CLAIM_groups", None) is not None:
for group in request.headers.get('OIDC_CLAIM_groups').split(','):
# it seems like keycloak prefixes groups with /, remove it
if group.startswith('/'):
credentials['groups'].append(group[1:])
else:
credentials['groups'].append(group)
# gerrit based
else:
if request.remote_user:
code_reviews = [s for s in SF_SERVICES
if isinstance(s, base.BaseCodeReviewServicePlugin)]
if code_reviews:
user_groups = code_reviews[0].project.get_user_groups(
request.remote_user)
credentials['groups'] = [grp['name'] for grp in user_groups]
return policy.authorize(rule_name, target, credentials)
class LocalUserController(RestController):
@expose('json')
def post(self, username):
_policy = 'managesf.localuser:create_update'
if not authorize(_policy,
target={'username': username}):
return abort(401,
detail='Failure to comply with policy %s' % _policy)
infos = request.json if request.content_length else {}
try:
ret = localuser.update_user(username, infos)
except (localuser.InvalidInfosInput, localuser.BadUserInfos) as e:
abort(400, detail=str(e))
except Exception as e:
return report_unhandled_error(e)
if isinstance(ret, dict):
# user created - set correct status code
response.status = 201
return ret
@expose('json')
def get(self, username):
_policy = 'managesf.localuser:get'
if not authorize(_policy,
target={'username': username}):
return abort(401,
detail='Failure to comply with policy %s' % _policy)
try:
ret = localuser.get_user(username)
except localuser.UserNotFound as e:
abort(404, detail=str(e))
except Exception as e:
return report_unhandled_error(e)
return ret
@expose('json')
def delete(self, username):
_policy = 'managesf.localuser:delete'
if not authorize(_policy,
target={'username': username}):
return abort(401,
detail='Failure to comply with policy %s' % _policy)
try:
ret = localuser.delete_user(username)
except localuser.UserNotFound as e:
abort(404, detail=str(e))
except Exception as e:
return report_unhandled_error(e)
return ret
class LocalUserBindController(RestController):
log = logging.getLogger("BindController")
@expose('json')
def get(self):
_policy = 'managesf.localuser:bind'
authorization = request.headers.get('Authorization', None)
if not authorization:
abort(401, detail="Authentication header missing")
try:
username, password = localuser.decode(authorization)
except localuser.DecodeError:
self.log.warning("Authorization decoding error")
abort(401, detail="Wrong authorization header")
if not authorize(_policy,
target={'username': username}):
self.log.error(u"%s: policy error" % username)
return abort(401,
detail='Failure to comply with policy %s' % _policy)
try:
ret = localuser.bind_user(authorization)
except (localuser.BindForbidden, localuser.UserNotFound) as e:
self.log.warning(u"%s: UserNotFound or Forbidden" % username)
abort(401, detail=str(e))
except Exception as e:
self.log.exception(u"%s: couldn't bind user" % username)
return report_unhandled_error(e)
if not ret:
self.log.exception(u"%s: Authentication failed" % username)
abort(401, detail="Authentication failed")
self.log.info(u"%s: binding success" % username)
return ret
class ServicesUsersController(RestController):
def _remove_non_updatable_fields(self, infos):
forbidden = sum([s.user.check_forbidden_fields(**infos)
for s in SF_SERVICES], [])
msg = 'The following fields cannot be updated: %s, discarding them'
logger.debug(msg % str(forbidden))
return dict((u, infos[u]) for u in infos.keys()
if u not in forbidden and infos[u] is not None)
def _update(self, user_id, infos):
sfmanager.user.update(user_id,
username=infos.get('username'),
email=infos.get('email'),
fullname=infos.get('full_name'),
idp_sync=infos.get('idp_sync'))
for service in SF_SERVICES:
s_id = sfmanager.user.mapping.get_service_mapping(
service.service_name,
user_id)
if s_id:
try:
service.user.update(uid=s_id, **infos)
except exceptions.UnavailableActionError:
pass
else:
full_name = infos.get('full_name')
username = infos.get('username')
ssh_keys = infos.get('ssh_keys', [])
email = infos.get('email')
cauth_id = infos.get('external_id')
try:
s_id = service.user.create(username=username,
email=email,
full_name=full_name,
ssh_keys=ssh_keys,
cauth_id=cauth_id)
sfmanager.user.mapping.set(user_id,
service.service_name,
s_id)
except exceptions.UnavailableActionError:
pass
@expose('json')
def put(self, id=None, email=None, username=None):
_policy = 'managesf.user:update'
infos = request.json if request.content_length else {}
# the JSON payload is only for data to update.
_email = request.GET.get('email')
if _email:
_email = unquote(_email)
_username = request.GET.get('username')
if _username:
_username = unquote(_username)
d_id = request.GET.get('id')
if not d_id and (_email or _username):
logger.debug('[update] looking for %s %s ...' % (_email,
_username))
d_id = sfmanager.user.get(username=_username,
email=_email).get('id')
logger.debug('found %s %s with id %s' % (_email, _username, d_id))
if not d_id:
response.status = 404
return
u = _username or sfmanager.user.get(d_id).get('username')
if not authorize(_policy,
target={'username': u}):
return abort(401,
detail='Failure to comply with policy %s' % _policy)
sanitized = self._remove_non_updatable_fields(infos)
logger.debug('[update] sanitized request %r to %r' % (infos,
sanitized))
if not sanitized:
if sanitized != infos:
msg = 'You tried to update immutable fields'
else:
msg = 'Nothing to do'
abort(400,
detail=msg)
try:
self._update(d_id, sanitized)
response.status = 200
return {'updated_fields': sanitized}
except Exception as e:
return report_unhandled_error(e)
@expose('json')
def post(self):
_policy = 'managesf.user:create'
infos = request.json if request.content_length else {}
if not infos or not infos.get('username'):
abort(400, detail=u'Incomplete user information: %r' % infos)
if not authorize(_policy,
target={'username': infos.get('username')}):
return abort(401,
detail='Failure to comply with policy %s' % _policy)
try:
known_user = None
if infos.get('external_id', -1) != -1:
known_user = sfmanager.user.get(cauth_id=infos['external_id'])
if known_user:
msg = (u'found user #%(id)s %(username)s (%(email)s) '
u'by cauth ID #%(cauth_id)s, user needs update')
logger.debug(msg % known_user)
u = known_user['id']
clean_infos = self._remove_non_updatable_fields(infos)
if known_user.get('idp_sync'):
self._update(u, clean_infos)
else:
logger.info("Skipping user information update because"
"idp_sync is disabled")
# if we still cannot find it, let's create it
if not known_user:
u = sfmanager.user.create(username=infos['username'],
email=infos['email'],
fullname=infos['full_name'],
cauth_id=infos.get('external_id'))
self._create_user_in_services(u, infos)
except Exception as e:
return report_unhandled_error(e)
# TODO(mhu) later, this should return the local id and the user data
response.status = 201
def _create_user_in_services(self, user_id, infos):
for service in SF_SERVICES:
try:
s_id = service.user.get(username=infos.get('username'))
if s_id:
msg = u'[%s] user %s exists, skipping creation'
logger.debug(msg % (service.service_name,
infos.get('username')))
mapped = sfmanager.user.mapping.get_user_mapping(
service.service_name,
s_id)
if not mapped:
sfmanager.user.mapping.set(user_id,
service.service_name,
s_id)
msg = u'[%s] user %s mapped to id %s'
logger.debug(msg % (service.service_name,
infos.get('username'),
s_id))
else:
full_name = infos.get('full_name')
username = infos.get('username')
ssh_keys = infos.get('ssh_keys', [])
email = infos.get('email')
cauth_id = infos.get('external_id')
s_id = service.user.create(username=username,
email=email,
full_name=full_name,
ssh_keys=ssh_keys,
cauth_id=cauth_id)
# we might have a mapping, but to a wrong user id in the
# service (because the user existed before but was removed
# directly from the service, for example)
mapped = sfmanager.user.mapping.get_service_mapping(
service.service_name,
user_id)
if mapped and mapped != s_id:
msg = u'[%s] user %s wrongly mapped to id %s, removing'
logger.debug(msg % (service.service_name,
infos.get('username'),
mapped))
sfmanager.user.mapping.delete(user_id,
service.service_name,
mapped)
sfmanager.user.mapping.set(user_id,
service.service_name,
s_id)
msg = u'[%s] user %s mapped to %s id %s'
logger.debug(msg % (service.service_name,
infos.get('username'),
service.service_name,
s_id))
except exceptions.UnavailableActionError:
pass
@expose('json')
def get(self, **kwargs):
_policy = 'managesf.user:get'
if not authorize(_policy,
target={'username': kwargs.get('username')}):
return abort(401,
detail='Failure to comply with policy %s' % _policy)
return sfmanager.user.get(**kwargs)
@expose()
def delete(self, id=None, email=None, username=None):
_policy = 'managesf.user:delete'
if not authorize(_policy,
target={'username': username}):
return abort(401,
detail='Failure to comply with policy %s' % _policy)
d_id = id
if not d_id and (email or username):
logger.debug(u'[delete] looking for %s %s' % (email, username))
d_id = sfmanager.user.get(username=username,
email=email).get('id')
if not d_id:
response.status = 404
return
logger.debug(u'found %s %s with id %s' % (email, username, d_id))
try:
for service in SF_SERVICES:
try:
service.user.delete(username=username)
sfmanager.user.mapping.delete(d_id,
service.service_name)
except exceptions.UnavailableActionError:
pass
sfmanager.user.delete(id=d_id)
except Exception as e:
return report_unhandled_error(e)
response.status = 204
class HooksController(RestController):
def get_project_by_repo(self, reponame):
for _, project in resource_manager.resources.get().get(
'resources', {}).get('projects', {}).items():
for repo in project.get('source-repositories', []):
if reponame in repo:
return project
return None
@expose('json')
def post(self, hook_name):
"""Trigger hook {hook_name}."""
_policy = 'managesf.hooks:trigger'
if not authorize(_policy,
target={}):
return abort(401,
detail='Failure to comply with policy %s' % _policy)
change = request.json if request.content_length else {}
# Get hook engine configuration for project
project_name = change.get('project')
if not project_name:
return abort(400, detail=u"Hooks: Invalid change %s" % change)
project = self.get_project_by_repo(project_name)
if not project:
logger.info("Hooks: Repository %s is not part of any project" %
change.get('project'))
return abort(204, detail=u"No issue-tracker defined")
hook = project.get('issue-tracker')
status = 200
try:
msg = getattr(SERVICES[hook].hooks, hook_name)(**change)
except Exception as e:
status = 400
msg = str(e)
logger.error(u"[%s] hook %s failed with %s" % (
hook, hook_name, msg))
response.status = status
return {'msg': msg}
load_services()
# API v2 - will be in its own file hierarchy once dependencies are sorted out
class V2Controller(object):
# Mimic api v1 and replace endpoints incrementally
user = LocalUserController()
bind = LocalUserBindController()
about = introspection.IntrospectionController()
services_users = ServicesUsersController()
hooks = HooksController()
resources = v2_resources.ResourcesRootController()
configurations = v2_configurations.ConfigurationController()
class RootController(object):
def __init__(self, *args, **kwargs):
try:
# just try to get the api config
_ = conf.api.v2 # noQA
self.v2 = V2Controller()
except AttributeError:
# TODO have a generic blank REST controller that returns
# 'Not Implemented' error code
logger.info('API v2 is not configured, skipping endpoint.')
self.v2 = RestController()
self.user = LocalUserController()
self.bind = LocalUserBindController()
self.about = introspection.IntrospectionController()
self.services_users = ServicesUsersController()
self.hooks = HooksController()
| 40.531128
| 79
| 0.541881
|
c3912c15824e85f0f06cfc039795fbcc19e022a8
| 11,032
|
py
|
Python
|
trove/extensions/mgmt/instances/models.py
|
ISCAS-VDI/trove-base
|
6a13b8771f8c9f259577e79d12355f9142964169
|
[
"Apache-2.0"
] | null | null | null |
trove/extensions/mgmt/instances/models.py
|
ISCAS-VDI/trove-base
|
6a13b8771f8c9f259577e79d12355f9142964169
|
[
"Apache-2.0"
] | null | null | null |
trove/extensions/mgmt/instances/models.py
|
ISCAS-VDI/trove-base
|
6a13b8771f8c9f259577e79d12355f9142964169
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import remote
from trove.common import utils
from trove.extensions.mysql import models as mysql_models
from trove.instance import models as imodels
from trove.instance import models as instance_models
from trove.instance.models import load_instance, InstanceServiceStatus
from trove import rpc
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def load_mgmt_instances(context, deleted=None, client=None,
include_clustered=None):
if not client:
client = remote.create_nova_client(context)
try:
mgmt_servers = client.rdservers.list()
except AttributeError:
mgmt_servers = client.servers.list(search_opts={'all_tenants': 1})
LOG.info(_("Found %d servers in Nova") %
len(mgmt_servers if mgmt_servers else []))
args = {}
if deleted is not None:
args['deleted'] = deleted
if not include_clustered:
args['cluster_id'] = None
db_infos = instance_models.DBInstance.find_all(**args)
instances = MgmtInstances.load_status_from_existing(context, db_infos,
mgmt_servers)
return instances
def load_mgmt_instance(cls, context, id, include_deleted):
try:
instance = load_instance(cls, context, id, needs_server=True,
include_deleted=include_deleted)
client = remote.create_nova_client(context)
try:
server = client.rdservers.get(instance.server_id)
except AttributeError:
server = client.servers.get(instance.server_id)
if hasattr(server, 'host'):
instance.server.host = server.host
elif hasattr(server, 'hostId'):
instance.server.host = server.hostId
if hasattr(server, 'deleted'):
instance.server.deleted = server.deleted
if hasattr(server, 'deleted_at'):
instance.server.deleted_at = server.deleted_at
if hasattr(server, 'local_id'):
instance.server.local_id = server.local_id
assert instance.server is not None
except Exception as e:
LOG.error(e)
instance = load_instance(cls, context, id, needs_server=False,
include_deleted=include_deleted)
return instance
class SimpleMgmtInstance(imodels.BaseInstance):
def __init__(self, context, db_info, server, datastore_status):
super(SimpleMgmtInstance, self).__init__(context, db_info, server,
datastore_status)
@property
def status(self):
if self.deleted:
return imodels.InstanceStatus.SHUTDOWN
return super(SimpleMgmtInstance, self).status
@property
def deleted(self):
return self.db_info.deleted
@property
def deleted_at(self):
return self.db_info.deleted_at
@classmethod
def load(cls, context, id, include_deleted=False):
return load_mgmt_instance(cls, context, id, include_deleted)
@property
def task_description(self):
return self.db_info.task_description
class DetailedMgmtInstance(SimpleMgmtInstance):
def __init__(self, *args, **kwargs):
super(DetailedMgmtInstance, self).__init__(*args, **kwargs)
self.volume = None
self.volume_used = None
self.volume_total = None
self.root_history = None
@classmethod
def load(cls, context, id, include_deleted=False):
instance = load_mgmt_instance(cls, context, id, include_deleted)
client = remote.create_cinder_client(context)
try:
instance.volume = client.volumes.get(instance.volume_id)
except Exception:
instance.volume = None
# Populate the volume_used attribute from the guest agent.
instance_models.load_guest_info(instance, context, id)
instance.root_history = mysql_models.RootHistory.load(context=context,
instance_id=id)
return instance
class MgmtInstance(imodels.Instance):
def get_diagnostics(self):
return self.get_guest().get_diagnostics()
def stop_db(self):
return self.get_guest().stop_db()
def get_hwinfo(self):
return self.get_guest().get_hwinfo()
def rpc_ping(self):
return self.get_guest().rpc_ping()
class MgmtInstances(imodels.Instances):
@staticmethod
def load_status_from_existing(context, db_infos, servers):
def load_instance(context, db, status, server=None):
return SimpleMgmtInstance(context, db, server, status)
if context is None:
raise TypeError("Argument context not defined.")
find_server = imodels.create_server_list_matcher(servers)
instances = imodels.Instances._load_servers_status(load_instance,
context,
db_infos,
find_server)
_load_servers(instances, find_server)
return instances
def _load_servers(instances, find_server):
for instance in instances:
db = instance.db_info
instance.server = None
try:
server = find_server(db.id, db.compute_instance_id)
instance.server = server
except Exception as ex:
LOG.error(ex)
return instances
def publish_exist_events(transformer, admin_context):
notifier = rpc.get_notifier("taskmanager")
notifications = transformer()
# clear out admin_context.auth_token so it does not get logged
admin_context.auth_token = None
for notification in notifications:
notifier.info(admin_context, "trove.instance.exists", notification)
class NotificationTransformer(object):
def __init__(self, **kwargs):
pass
@staticmethod
def _get_audit_period():
now = datetime.datetime.now()
audit_start = utils.isotime(
now - datetime.timedelta(
seconds=CONF.exists_notification_interval),
subsecond=True)
audit_end = utils.isotime(now, subsecond=True)
return audit_start, audit_end
def _get_service_id(self, datastore_manager, id_map):
if datastore_manager in id_map:
datastore_manager_id = id_map[datastore_manager]
else:
datastore_manager_id = cfg.UNKNOWN_SERVICE_ID
LOG.error(_("Datastore ID for Manager (%s) is not configured")
% datastore_manager)
return datastore_manager_id
def transform_instance(self, instance, audit_start, audit_end):
payload = {
'audit_period_beginning': audit_start,
'audit_period_ending': audit_end,
'created_at': instance.created,
'display_name': instance.name,
'instance_id': instance.id,
'instance_name': instance.name,
'instance_type_id': instance.flavor_id,
'launched_at': instance.created,
'nova_instance_id': instance.server_id,
'region': CONF.region,
'state_description': instance.status.lower(),
'state': instance.status.lower(),
'tenant_id': instance.tenant_id
}
payload['service_id'] = self._get_service_id(
instance.datastore_version.manager, CONF.notification_service_id)
return payload
def __call__(self):
audit_start, audit_end = NotificationTransformer._get_audit_period()
messages = []
db_infos = instance_models.DBInstance.find_all(deleted=False)
for db_info in db_infos:
try:
service_status = InstanceServiceStatus.find_by(
instance_id=db_info.id)
except exception.ModelNotFoundError:
# There is a small window of opportunity during when the db
# resource for an instance exists, but no InstanceServiceStatus
# for it has yet been created. We skip sending the notification
# message for all such instances. These instance are too new
# and will get picked up the next round of notifications.
LOG.debug("InstanceServiceStatus not found for %s. "
"Will wait to send notification." % db_info.id)
continue
instance = SimpleMgmtInstance(None, db_info, None, service_status)
message = self.transform_instance(instance, audit_start, audit_end)
messages.append(message)
return messages
class NovaNotificationTransformer(NotificationTransformer):
def __init__(self, **kwargs):
super(NovaNotificationTransformer, self).__init__(**kwargs)
self.context = kwargs['context']
self.nova_client = remote.create_admin_nova_client(self.context)
self._flavor_cache = {}
def _lookup_flavor(self, flavor_id):
if flavor_id in self._flavor_cache:
LOG.debug("Flavor cache hit for %s" % flavor_id)
return self._flavor_cache[flavor_id]
# fetch flavor resource from nova
LOG.info(_("Flavor cache miss for %s") % flavor_id)
flavor = self.nova_client.flavors.get(flavor_id)
self._flavor_cache[flavor_id] = flavor.name if flavor else 'unknown'
return self._flavor_cache[flavor_id]
def __call__(self):
audit_start, audit_end = NotificationTransformer._get_audit_period()
instances = load_mgmt_instances(self.context, deleted=False,
client=self.nova_client)
messages = []
for instance in filter(
lambda inst: inst.status != 'SHUTDOWN' and inst.server,
instances):
message = {
'instance_type': self._lookup_flavor(instance.flavor_id),
'user_id': instance.server.user_id
}
message.update(self.transform_instance(instance,
audit_start,
audit_end))
messages.append(message)
return messages
| 38.708772
| 79
| 0.636784
|
419826e81b30122854ca3b0f340e66f8fbe9669f
| 151
|
py
|
Python
|
inheritance/players_and_monsters_exe/blade_knight.py
|
PetkoAndreev/Python-OOP
|
2cc3094940cdf078f0ee60be938e883f843766e4
|
[
"MIT"
] | 1
|
2021-05-27T07:59:17.000Z
|
2021-05-27T07:59:17.000Z
|
inheritance/players_and_monsters_exe/blade_knight.py
|
PetkoAndreev/Python-OOP
|
2cc3094940cdf078f0ee60be938e883f843766e4
|
[
"MIT"
] | null | null | null |
inheritance/players_and_monsters_exe/blade_knight.py
|
PetkoAndreev/Python-OOP
|
2cc3094940cdf078f0ee60be938e883f843766e4
|
[
"MIT"
] | null | null | null |
from dark_knight import DarkKnight
class BladeKnight(DarkKnight):
def __init__(self, username, level):
super().__init__(username, level)
| 21.571429
| 41
| 0.735099
|
ded99a14b3f0ee4df778fb60b1baebb998bb804c
| 4,482
|
py
|
Python
|
cryptotoolkits/ui/cryptowidgets.py
|
eliiik/CryptoToolkits
|
788270dc95485416849ab34e18f438c6006b311e
|
[
"MIT"
] | null | null | null |
cryptotoolkits/ui/cryptowidgets.py
|
eliiik/CryptoToolkits
|
788270dc95485416849ab34e18f438c6006b311e
|
[
"MIT"
] | null | null | null |
cryptotoolkits/ui/cryptowidgets.py
|
eliiik/CryptoToolkits
|
788270dc95485416849ab34e18f438c6006b311e
|
[
"MIT"
] | null | null | null |
from lib2to3.pgen2 import token
import ipywidgets as widgets
def create_slider(name, in_value, in_min, in_max, in_step, in_format):
return widgets.FloatSlider(
value=in_value,
min=in_min,
max=in_max,
step=in_step,
description=name+": ",
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
readout_format=in_format,
)
def create_range_slider(name, in_value, in_min, in_max):
return create_slider(name, in_value, in_min, in_max, 0.01, ".2f")
def create_token_slider(token_name, token_default_price=10, token_max_price=100):
return create_range_slider(token_name, token_default_price, 0, token_max_price)
def create_01_float_slider(name, in_value):
return create_range_slider(name, in_value, 0, 1)
class Token(object):
def __init__(self, token_name, token_price) -> None:
self._token_name = token_name
self._token_price = token_price
@property
def token(self):
return self._token_name
@property
def price(self):
return self._token_price
class BorrowToken(Token):
def __init__(self, token_name, token_amount=0,
token_default_price=10,
token_max_price=100,
borrow_apr=-0.06) -> None:
super().__init__(token_name, token_default_price)
self._amount = token_amount
self._token_max_price = token_max_price
self._borrow_apr = borrow_apr
self.token_widget = None
self.view = None
self.ui_widgets = {}
self.create_view()
def create_view(self):
self.token_widget = create_token_slider(self.token, self.price, self._token_max_price)
self.token_amount_widget = create_range_slider("Amount", self._amount, 0, 100000)
self.token_borrow_apr = create_slider("Borrow APR", self._borrow_apr, -0.2, 0.4, 0.001, ".3f")
token_view = widgets.HBox([self.token_widget, self.token_amount_widget])
self.view = widgets.VBox([token_view, self.token_borrow_apr])
self.ui_widgets["b"+self.token+"_price"] = self.token_widget
self.ui_widgets["b"+self.token+"_amount"] = self.token_amount_widget
self.ui_widgets["b"+self.token+"_apr"] = self.token_borrow_apr
class SupplyToken(Token):
def __init__(self, token_name, token_amount=0,
token_default_price=10,
token_max_price=100,
token_collat_factor=0.7,
token_liquid_factor=0.8,
supply_apr=0.07) -> None:
super().__init__(token_name, token_default_price)
self._amount = token_amount
self._token_max_price = token_max_price
self._token_collat_factor = token_collat_factor
self._token_liquid_factor = token_liquid_factor
self._supply_apr = supply_apr
self.token_widget = None
self.view = None
self.ui_widgets = {}
self.create_view()
def create_view(self):
self.token_widget = create_token_slider(self.token, self.price, self._token_max_price)
self.token_amount_widget = create_range_slider("Amount", self._amount, 0, 100000)
self.token_collat_widget = create_01_float_slider("Col. Factor", self._token_collat_factor)
self.token_liquid_widget = create_01_float_slider("Liquid Factor", self._token_liquid_factor)
self.token_supply_apr = create_slider("Supply APR", self._supply_apr, 0, 0.4, 0.001, ".3f")
token_view = widgets.HBox([self.token_widget, self.token_amount_widget])
factor_view = widgets.HBox([self.token_collat_widget, self.token_liquid_widget])
self.view = widgets.VBox([token_view, factor_view, self.token_supply_apr])
self.ui_widgets["s"+self.token+"_price"] = self.token_widget
self.ui_widgets["s"+self.token+"_amount"] = self.token_amount_widget
self.ui_widgets["s"+self.token+"_collat"] = self.token_collat_widget
self.ui_widgets["s"+self.token+"_liquid"] = self.token_liquid_widget
self.ui_widgets["s"+self.token+"_apr"] = self.token_supply_apr
def create_token_accordion(tokens, title):
views = [i.view for i in tokens]
w = widgets.VBox(views)
return widgets.Accordion([w], titles=[title])
| 44.376238
| 102
| 0.652834
|
0523b779399c1991cee415084e500e9f8b15dbcc
| 1,480
|
py
|
Python
|
matrix/kronecker.py
|
wesselb/matrix
|
ba343d69945d7cbc67330d01a87eee363ce5d408
|
[
"MIT"
] | 3
|
2021-07-30T17:38:08.000Z
|
2021-11-21T04:41:40.000Z
|
matrix/kronecker.py
|
wesselb/matrix
|
ba343d69945d7cbc67330d01a87eee363ce5d408
|
[
"MIT"
] | null | null | null |
matrix/kronecker.py
|
wesselb/matrix
|
ba343d69945d7cbc67330d01a87eee363ce5d408
|
[
"MIT"
] | null | null | null |
import lab as B
from lab.shape import Shape
from .matrix import AbstractMatrix, repr_format
from .util import dtype_str, indent
__all__ = ["Kronecker"]
class Kronecker(AbstractMatrix):
"""Kronecker product.
The data type of a Kronecker product is the data type of the left matrix
in the product.
Attributes:
left (matrix): Left matrix in the product.
right (matrix): Right matrix in the product.
cholesky (:class:`.kronecker.Kronecker` or None): Cholesky decomposition
of the matrix, once it has been computed.
dense (matrix or None): Dense version of the matrix, once it has been
computed.
Args:
left (matrix): Left matrix in the product.
right (matrix): Right matrix in the product.
"""
def __init__(self, left, right):
self.left = left
self.right = right
self.cholesky = None
self.dense = None
def __str__(self):
return (
f"<Kronecker product:"
f" batch={Shape(*B.shape_batch(self))},"
f" shape={Shape(*B.shape_matrix(self))},"
f" dtype={dtype_str(self)}>"
)
def __repr__(self):
return (
str(self)[:-1]
+ "\n"
+ f" left="
+ indent(repr_format(self.left), " " * 6).strip()
+ "\n"
+ f" right="
+ indent(repr_format(self.right), " " * 7).strip()
+ ">"
)
| 27.407407
| 80
| 0.558784
|
5cca01befd3d3dc6049fc3a5b81f716f582345b5
| 3,488
|
py
|
Python
|
baselines/ppo2/runner.py
|
LanxinL/baselines
|
e167dde99a4609910724fd7817e18ba9a54e3f94
|
[
"MIT"
] | null | null | null |
baselines/ppo2/runner.py
|
LanxinL/baselines
|
e167dde99a4609910724fd7817e18ba9a54e3f94
|
[
"MIT"
] | null | null | null |
baselines/ppo2/runner.py
|
LanxinL/baselines
|
e167dde99a4609910724fd7817e18ba9a54e3f94
|
[
"MIT"
] | null | null | null |
import numpy as np
from baselines.common.runners import AbstractEnvRunner
from collections import deque
class Runner(AbstractEnvRunner):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
# Lambda used in GAE (General Advantage Estimation)
self.lam = lam
# Discount rate
self.gamma = gamma
self.reward_history = {}
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for i,info in enumerate(infos):
maybeepinfo = info.get('episode')
if maybeepinfo:
epinfos.append(maybeepinfo)
if i not in self.reward_history.keys():
self.reward_history[i] = deque(maxlen=100)
self.reward_history[i].extend([maybeepinfo['r']])
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, S=self.states, M=self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
| 42.024096
| 109
| 0.607798
|
c053be01d3c765715bc87ea778944f61768748e9
| 784
|
py
|
Python
|
custom_components/iaquk/const.py
|
SovakPaleny/ha-iaquk
|
34b14e8aad076e164c85d7ae761f7049801ebf54
|
[
"MIT"
] | null | null | null |
custom_components/iaquk/const.py
|
SovakPaleny/ha-iaquk
|
34b14e8aad076e164c85d7ae761f7049801ebf54
|
[
"MIT"
] | null | null | null |
custom_components/iaquk/const.py
|
SovakPaleny/ha-iaquk
|
34b14e8aad076e164c85d7ae761f7049801ebf54
|
[
"MIT"
] | null | null | null |
"""Constants for calculate IAQ UK index."""
# Base component constants
DOMAIN = "iaquk"
VERSION = "1.2.0"
ISSUE_URL = "https://github.com/Limych/ha-iaquk/issues"
ATTRIBUTION = None
DATA_IAQUK = 'iaquk'
SUPPORT_LIB_URL = "https://github.com/Limych/iaquk/issues/new/choose"
CONF_SOURCES = "sources"
CONF_TEMPERATURE = "temperature"
CONF_HUMIDITY = "humidity"
CONF_CO2 = "co2"
CONF_TVOC = "tvoc"
CONF_PM = "pm"
CONF_NO2 = "no2"
CONF_CO = "co"
CONF_HCHO = "hcho" # Formaldehyde
ATTR_SOURCES_SET = 'sources_set'
ATTR_SOURCES_USED = 'sources_used'
LEVEL_EXCELLENT = "Excellent"
LEVEL_GOOD = "Good"
LEVEL_FAIR = "Fair"
LEVEL_POOR = "Poor"
LEVEL_INADEQUATE = "Inadequate"
UNIT_PPM = {
'ppm': 1,
'ppb': 0.001,
}
UNIT_PPB = {
'ppb': 1,
'ppm': 1000,
}
UNIT_MGM3 = 'µg/m3'
| 19.6
| 69
| 0.695153
|
106f57f21ca5295a42c0f0ac4fdf4b9af33a2aff
| 688
|
py
|
Python
|
apiv2/migrations/0002_Formulas_manytomany_in_formula_index.py
|
deka108/meas_deka
|
9646b04b878f325ade0a59e41bfcb10ab962d753
|
[
"Apache-2.0"
] | null | null | null |
apiv2/migrations/0002_Formulas_manytomany_in_formula_index.py
|
deka108/meas_deka
|
9646b04b878f325ade0a59e41bfcb10ab962d753
|
[
"Apache-2.0"
] | 1
|
2018-06-19T16:27:31.000Z
|
2018-06-21T02:57:03.000Z
|
apiv2/migrations/0002_Formulas_manytomany_in_formula_index.py
|
deka108/mathqa-server
|
9646b04b878f325ade0a59e41bfcb10ab962d753
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-03-15 02:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
replaces = [('apiv2', '0002_Formula_manytomany_in_formula_index'), ('apiv2', '0003_Remove_docsids_in_formulaindex')]
dependencies = [
('apiv2', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='formulaindex',
name='formulas',
field=models.ManyToManyField(to=b'apiv2.Formula'),
),
migrations.RemoveField(
model_name='formulaindex',
name='docsids',
),
]
| 25.481481
| 120
| 0.62064
|
8e5208231cfb7eba4100c2c9d24fa3ea6776ba89
| 2,613
|
py
|
Python
|
tests/test_database.py
|
aryan040501/wikilabels
|
ea110da2b969cc978a0f288c4da6250dc9d67e72
|
[
"MIT"
] | 15
|
2015-07-16T17:56:43.000Z
|
2018-08-20T14:59:16.000Z
|
tests/test_database.py
|
aryan040501/wikilabels
|
ea110da2b969cc978a0f288c4da6250dc9d67e72
|
[
"MIT"
] | 122
|
2015-06-10T15:58:11.000Z
|
2018-08-16T14:56:23.000Z
|
tests/test_database.py
|
aryan040501/wikilabels
|
ea110da2b969cc978a0f288c4da6250dc9d67e72
|
[
"MIT"
] | 27
|
2015-07-15T22:12:35.000Z
|
2018-08-06T23:10:28.000Z
|
from wikilabels.database import db
import os
item1 = {'wiki': "cawiki", 'name': "ching", 'form': "chan",
'view': "bivicyt", 'labels_per_task': 1,
'task_per_task': 50, 'active': True,
'info_url': "https://www.mediawiki.org/wiki/ORES#Edit_quality"}
dbs = db.DB(
1, 5, database=os.getenv('WIKILABELS_DATABASE', "wikilabels"),
host=os.getenv('WIKILABELS_DATABASE_HOST', "localhost"),
user=os.getenv('WIKILABELS_DATABASE_USER', "wikilabels"),
password=os.getenv('WIKILABELS_DATABASE_PASSWORD', "wikilabels-admin"))
user = 608705
def test_campaign_create():
assert dbs.campaigns.create(item1.get('wiki'), item1.get('name'),
item1.get('form'), item1.get('view'),
item1.get('labels_per_task'),
item1.get('task_per_task'),
item1.get('active'), item1.get('info_url'))
def test_campaign_checkwikiexists():
assert dbs.campaigns.wiki_name_exists(item1.get('wiki'), item1.get('name'))
def test_campaign_getitems():
assert dbs.campaigns.get(1)
def test_campaign_statsfor():
assert dbs.campaigns.stats_for(1)
def test_campaign_hasopentasks():
assert dbs.campaigns.has_open_tasks(1, user)
def test_campaign_forwiki():
assert dbs.campaigns.for_wiki('enwiki')
def test_campaign_foruser():
assert dbs.campaigns.for_user(608705)
def test_campaign_wikis():
assert dbs.campaigns.wikis()
def test_campaign_users():
assert dbs.campaigns.users(1)
def test_labels_upsertupdate():
assert dbs.labels.upsert(1, 608705,
'{"damaging": true, "good-faith": true}')
def test_labels_CRUD():
assert dbs.labels.insert(3, user, '{"damaging": true, "good-faith": true}')
assert dbs.labels.update(3, user,
'{"damaging": false, "good-faith": true}')
assert dbs.labels.clear_data(3, user)
def tests_worksets_get():
assert dbs.worksets.get(1)
def tests_worksets_statsfor():
assert dbs.worksets.stats_for(1)
def tests_worksets_forcampaign():
assert dbs.worksets.for_campaign(1)
def tests_worksets_foruser():
assert dbs.worksets.for_user(user)
def test_worksets_openworksetsforuser():
assert dbs.worksets.open_workset_for_user(1, user)
def test_worksets_assign():
assert dbs.worksets.assign(2, user)
def test_worksets_users():
assert dbs.worksets.users()
def test_worksets_abandon():
assert dbs.worksets.abandon(1, user)
def test_worksets_abandontask():
assert dbs.worksets.abandon_task(1, user, 1)
| 25.125
| 79
| 0.666667
|
2aa1e1c74089046d3244cc2d99d767a9136db0c6
| 583
|
py
|
Python
|
lego/apps/users/migrations/0021_auto_20190829_1632.py
|
ollfkaih/lego
|
b15aacaf09efe90e7f984d25b0e7bddbe12647e8
|
[
"MIT"
] | 45
|
2017-10-24T12:09:06.000Z
|
2021-11-03T21:21:03.000Z
|
lego/apps/users/migrations/0021_auto_20190829_1632.py
|
ollfkaih/lego
|
b15aacaf09efe90e7f984d25b0e7bddbe12647e8
|
[
"MIT"
] | 980
|
2017-10-24T12:29:07.000Z
|
2022-03-31T04:04:31.000Z
|
lego/apps/users/migrations/0021_auto_20190829_1632.py
|
wahello/lego
|
a0b02f3abc997fe96326e9c9c05b49847170041b
|
[
"MIT"
] | 23
|
2018-04-11T16:34:22.000Z
|
2021-11-23T12:28:30.000Z
|
# Generated by Django 2.1.11 on 2019-08-29 16:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("users", "0020_abakusgroup_show_badge")]
operations = [
migrations.AlterField(
model_name="user",
name="first_name",
field=models.CharField(max_length=50, verbose_name="first name"),
),
migrations.AlterField(
model_name="user",
name="last_name",
field=models.CharField(max_length=30, verbose_name="last name"),
),
]
| 26.5
| 77
| 0.610635
|
e383f5c019e78399635383d6117ffcf9bebf2acb
| 887
|
py
|
Python
|
Singleton/interface.py
|
sirajirasajiki/design_pattern_python
|
4879469bdb36848f3919f447e0e23f4f240bc27b
|
[
"MIT"
] | null | null | null |
Singleton/interface.py
|
sirajirasajiki/design_pattern_python
|
4879469bdb36848f3919f447e0e23f4f240bc27b
|
[
"MIT"
] | null | null | null |
Singleton/interface.py
|
sirajirasajiki/design_pattern_python
|
4879469bdb36848f3919f447e0e23f4f240bc27b
|
[
"MIT"
] | 1
|
2020-03-27T08:28:49.000Z
|
2020-03-27T08:28:49.000Z
|
from abc import *
class Mahjong(metaclass=ABCMeta):
__mahjong = None
__score = [25000] * 4
@classmethod
def get_instance(cls):
if not hasattr(cls, "_instance"):
cls._instance = cls()
else:
cls._instance.input = input
return cls._instance
@classmethod
def get_score(cls):
return cls.__score
@classmethod
def score_calculation(cls, add_list):
"""
点数の計算を行う。
減るときは-とする
:param add_list:計算リスト
"""
cls.__score = [a + b for a, b in zip(cls.__score, add_list)]
if __name__ == '__main__':
a = Mahjong().get_instance()
b = Mahjong().get_instance()
if a is b:
print('同じオブジェクト')
else:
print('異なるオブジェクト')
print(a.get_score())
a.score_calculation([0,8000, -8000, 0])
print(a.get_score())
print(b.get_score())
| 20.627907
| 68
| 0.569335
|
acd2ce34cf0339b53891a02b486bb288228ca6ee
| 936
|
py
|
Python
|
Python/Ejercicios/Trenes.py
|
Camiloasc1/OperativeSystemsUNAL
|
a07bfc235789b7a8848280a549a6b2c9602e61b5
|
[
"MIT"
] | null | null | null |
Python/Ejercicios/Trenes.py
|
Camiloasc1/OperativeSystemsUNAL
|
a07bfc235789b7a8848280a549a6b2c9602e61b5
|
[
"MIT"
] | null | null | null |
Python/Ejercicios/Trenes.py
|
Camiloasc1/OperativeSystemsUNAL
|
a07bfc235789b7a8848280a549a6b2c9602e61b5
|
[
"MIT"
] | null | null | null |
import random
import thread
import threading
import time
def tren(i, route, pL):
cStation = None # In garage
while True:
for nStation in route:
nStation[1].acquire()
if cStation == None: # Depart == None
pL.acquire()
print "Train", i, "from", "-garage-", "to", nStation[0]
pL.release()
else:
cStation[1].release()
pL.acquire()
print "Train", i, "from", cStation[0], "to", nStation[0]
pL.release()
cStation = nStation
time.sleep(5 + random.randint(1, 3)) # Travel + Stop
# Ready to next
def main():
route = [("'" + str(i) + "'", threading.BoundedSemaphore(2)) for i in xrange(5)]
pL = threading.Lock()
for i in xrange(8):
thread.start_new_thread(tren, (i, route, pL))
while True:
time.sleep(5)
main()
| 27.529412
| 84
| 0.508547
|
06a770061257201d20bc3f238d405e21461c1dde
| 6,127
|
py
|
Python
|
examples/plot_nested-cv.py
|
AdirthaBorgohain/MAPIE
|
f5ae6d33c9def18eab46f9f375a95e18fb7affc1
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_nested-cv.py
|
AdirthaBorgohain/MAPIE
|
f5ae6d33c9def18eab46f9f375a95e18fb7affc1
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_nested-cv.py
|
AdirthaBorgohain/MAPIE
|
f5ae6d33c9def18eab46f9f375a95e18fb7affc1
|
[
"BSD-3-Clause"
] | null | null | null |
"""
===========================================================
Nested cross-validation for estimating prediction intervals
===========================================================
This example compares non-nested and nested cross-validation strategies for
estimating prediction intervals with :class:`mapie.estimators.MapieRegressor`.
In the regular sequential method, a cross-validation parameter search is
carried out over the entire training set.
The model with the set of parameters that gives the best score is then used in
MAPIE to estimate the prediction intervals associated with the predictions.
A limitation of this method is that residuals used by MAPIE are computed on
the validation dataset, which can be subject to overfitting as far as
hyperparameter tuning is concerned.
This fools MAPIE into being slightly too optimistic with confidence intervals.
To solve this problem, an alternative option is to perform a nested
cross-validation parameter search directly within the MAPIE estimator on each
*out-of-fold* dataset.
For each testing fold used by MAPIE to store residuals, an internal
cross-validation occurs on the training fold, optimizing hyperparameters.
This ensures that residuals seen by MAPIE are never seen by the algorithm
beforehand. However, this method is much heavier computationally since
it results in :math:`N * P` calculations, where *N* is the number of
*out-of-fold* models and *P* the number of parameter search cross-validations,
versus :math:`N + P` for the non-nested approach.
Here, we compare the two strategies on the Boston dataset. We use the Random
Forest Regressor as a base regressor for the CV+ strategy. For the sake of
light computation, we adopt a RandomizedSearchCV parameter search strategy
with a low number of iterations and with a reproducible random state.
The two approaches give slightly different predictions with the nested CV
approach estimating slightly larger prediction interval widths by a
few percents at most (apart from a handful of exceptions).
For this example, the two approaches result in identical scores and identical
effective coverages.
In the general case, the recommended approach is to use nested
cross-validation, since it does not underestimate residuals and hence
prediction intervals. However, in this particular example, effective
coverages of both nested and non-nested methods are the same.
"""
import matplotlib.pyplot as plt
from scipy.stats import randint
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import mean_squared_error
from mapie.estimators import MapieRegressor
from mapie.metrics import coverage_score
# Load the Boston data
X_boston, y_boston = load_boston(return_X_y=True)
# Split the data into training and test sets.
X_train, X_test, y_train, y_test = train_test_split(
X_boston, y_boston, test_size=0.2, random_state=42
)
# Define the Random Forest model as base regressor with parameter ranges.
rf_model = RandomForestRegressor(random_state=59, verbose=0)
rf_params = {
"max_depth": randint(2, 30),
"n_estimators": randint(10, 1e3)
}
# Cross-validation and prediction-interval parameters.
cv = 5
n_iter = 5
alpha = 0.05
random_state = 59
# Non-nested approach with the CV+ strategy using the Random Forest model.
cv_obj = RandomizedSearchCV(
rf_model,
param_distributions=rf_params,
n_iter=n_iter,
cv=cv,
scoring="neg_root_mean_squared_error",
return_train_score=True,
verbose=0,
random_state=random_state,
n_jobs=-1,
)
cv_obj.fit(X_train, y_train)
best_est = cv_obj.best_estimator_
mapie_non_nested = MapieRegressor(
best_est,
method="plus",
cv=cv,
ensemble=True,
n_jobs=-1
)
mapie_non_nested.fit(X_train, y_train)
y_pred_non_nested, y_pis_non_nested = mapie_non_nested.predict(
X_test, alpha=alpha
)
widths_non_nested = y_pis_non_nested[:, 1, 0] - y_pis_non_nested[:, 0, 0]
coverage_non_nested = coverage_score(
y_test, y_pis_non_nested[:, 0, 0], y_pis_non_nested[:, 1, 0]
)
score_non_nested = mean_squared_error(
y_test, y_pred_non_nested, squared=False
)
# Nested approach with the CV+ strategy using the Random Forest model.
cv_obj = RandomizedSearchCV(
rf_model,
param_distributions=rf_params,
n_iter=n_iter,
cv=cv,
scoring="neg_root_mean_squared_error",
return_train_score=True,
verbose=0,
random_state=random_state,
n_jobs=-1,
)
mapie_nested = MapieRegressor(
cv_obj,
method="plus",
cv=cv,
ensemble=True
)
mapie_nested.fit(X_train, y_train)
y_pred_nested, y_pis_nested = mapie_nested.predict(X_test, alpha=alpha)
widths_nested = y_pis_nested[:, 1, 0] - y_pis_nested[:, 0, 0]
coverage_nested = coverage_score(
y_test, y_pis_nested[:, 0, 0], y_pis_nested[:, 1, 0]
)
score_nested = mean_squared_error(y_test, y_pred_nested, squared=False)
# Print scores and effective coverages.
print(
"Scores and effective coverages for the CV+ strategy using the "
"Random Forest model."
)
print(
"Score on the test set for the non-nested and nested CV approaches: ",
f"{score_non_nested: .3f}, {score_nested: .3f}"
)
print(
"Effective coverage on the test set for the non-nested "
"and nested CV approaches: ",
f"{coverage_non_nested: .3f}, {coverage_nested: .3f}"
)
# Compare prediction interval widths.
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 6))
min_x = 14.5
max_x = 16.0
ax1.set_xlabel("Prediction interval width using the nested CV approach")
ax1.set_ylabel("Prediction interval width using the non-nested CV approach")
ax1.set_xlim([min_x, max_x])
ax1.set_ylim([min_x, max_x])
ax1.scatter(widths_nested, widths_non_nested)
ax1.plot([min_x, max_x], [min_x, max_x], ls="--", color="k")
ax2.axvline(x=0, color="r", lw=2)
ax2.set_xlabel(
"[width(non-nested CV) - width(nested CV)] / width(non-nested CV)"
)
ax2.set_ylabel("Counts")
ax2.hist(
(widths_non_nested - widths_nested)/widths_non_nested,
bins=15,
edgecolor="black"
)
plt.show()
| 35.212644
| 78
| 0.754366
|
abc6b2686ff15a150bf2f3eb8e9596cf90a4afb1
| 4,154
|
py
|
Python
|
im2scene/giraffev3/config.py
|
fireofearth/giraffe
|
ac3e0ec197dfc2b5d6c95e2971582dd596a671d2
|
[
"MIT"
] | null | null | null |
im2scene/giraffev3/config.py
|
fireofearth/giraffe
|
ac3e0ec197dfc2b5d6c95e2971582dd596a671d2
|
[
"MIT"
] | null | null | null |
im2scene/giraffev3/config.py
|
fireofearth/giraffe
|
ac3e0ec197dfc2b5d6c95e2971582dd596a671d2
|
[
"MIT"
] | null | null | null |
import os
from im2scene.discriminator import discriminator_dict
from im2scene.giraffev3 import models, training, rendering
from copy import deepcopy
import numpy as np
def get_model(cfg, device=None, len_dataset=0, **kwargs):
''' Returns the giraffe model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
len_dataset (int): length of dataset
'''
decoder = cfg['model']['decoder']
discriminator = cfg['model']['discriminator']
generator = cfg['model']['generator']
background_generator = cfg['model']['background_generator']
decoder_kwargs = cfg['model']['decoder_kwargs']
discriminator_kwargs = cfg['model']['discriminator_kwargs']
generator_kwargs = cfg['model']['generator_kwargs']
background_generator_kwargs = \
cfg['model']['background_generator_kwargs']
bounding_box_generator = cfg['model']['bounding_box_generator']
bounding_box_generator_kwargs = \
cfg['model']['bounding_box_generator_kwargs']
neural_renderer = cfg['model']['neural_renderer']
neural_renderer_kwargs = cfg['model']['neural_renderer_kwargs']
z_dim = cfg['model']['z_dim']
z_dim_bg = cfg['model']['z_dim_bg']
img_size = cfg['data']['img_size']
# Load always the decoder
decoder = models.decoder_dict[decoder](
z_dim=z_dim, **decoder_kwargs
)
if discriminator is not None:
discriminator = discriminator_dict[discriminator](
img_size=img_size, **discriminator_kwargs)
if background_generator is not None:
background_generator = \
models.background_generator_dict[background_generator](
z_dim=z_dim_bg, **background_generator_kwargs)
if bounding_box_generator is not None:
bounding_box_generator = \
models.bounding_box_generator_dict[bounding_box_generator](
z_dim=z_dim, **bounding_box_generator_kwargs)
if neural_renderer is not None:
neural_renderer = models.neural_renderer_dict[neural_renderer](
z_dim=z_dim, img_size=img_size, **neural_renderer_kwargs
)
if generator is not None:
generator = models.generator_dict[generator](
device, z_dim=z_dim, z_dim_bg=z_dim_bg, decoder=decoder,
background_generator=background_generator,
bounding_box_generator=bounding_box_generator,
neural_renderer=neural_renderer, **generator_kwargs)
if cfg['test']['take_generator_average']:
generator_test = deepcopy(generator)
else:
generator_test = None
model = models.GIRAFFE(
device=device,
discriminator=discriminator, generator=generator,
generator_test=generator_test,
)
return model
def get_trainer(model, optimizer, optimizer_d, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the GIRAFFE model
optimizer (optimizer): generator optimizer object
optimizer_d (optimizer): discriminator optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
overwrite_visualization = cfg['training']['overwrite_visualization']
multi_gpu = cfg['training']['multi_gpu']
n_eval_iterations = (
cfg['training']['n_eval_images'] // cfg['training']['batch_size'])
fid_file = cfg['data']['fid_file']
assert(fid_file is not None)
fid_dict = np.load(fid_file)
trainer = training.Trainer(
model, optimizer, optimizer_d, device=device, vis_dir=vis_dir,
overwrite_visualization=overwrite_visualization, multi_gpu=multi_gpu,
fid_dict=fid_dict,
n_eval_iterations=n_eval_iterations,
)
return trainer
def get_renderer(model, cfg, device, **kwargs):
''' Returns the renderer object.
Args:
model (nn.Module): GIRAFFE model
cfg (dict): imported yaml config
device (device): pytorch device
'''
renderer = rendering.Renderer(
model,
device=device,)
return renderer
| 34.907563
| 77
| 0.679827
|
fd72f02dcd19de728f32cc6d7d84197051e453e9
| 1,089
|
py
|
Python
|
setup.py
|
xmurobi/pymarketstore
|
6a38f140ab8b37b35e122b895655b1512b4b3f9f
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
xmurobi/pymarketstore
|
6a38f140ab8b37b35e122b895655b1512b4b3f9f
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
xmurobi/pymarketstore
|
6a38f140ab8b37b35e122b895655b1512b4b3f9f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import ast
import re
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pymarketstore/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
with open('README.md') as readme_file:
README = readme_file.read()
setup(
name='pymarketstore2',
version=version,
description='Marketstore python driver',
long_description=README,
long_description_content_type='text/markdown',
author='Alpaca',
author_email='oss@alpaca.markets',
url='https://github.com/xmurobi/pymarketstore',
keywords='database,pandas,financial,timeseries',
packages=['pymarketstore', ],
install_requires=[
'msgpack-python',
'numpy',
'requests',
'pandas',
'six',
'urllib3',
'pytest',
'websocket-client',
],
tests_require=[
'pytest',
'pytest-cov',
'coverage>=4.4.1',
'mock>=1.0.1'
],
setup_requires=['pytest-runner', 'flake8'],
)
| 23.170213
| 54
| 0.615243
|
4fec63034415a2591ce653992c702e338bc47a6e
| 3,795
|
py
|
Python
|
tensorflow_datasets/image/aflw2k3d.py
|
sourcery-ai-bot/datasets
|
b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28
|
[
"Apache-2.0"
] | 1
|
2021-05-10T10:41:27.000Z
|
2021-05-10T10:41:27.000Z
|
tensorflow_datasets/image/aflw2k3d.py
|
sourcery-ai-bot/datasets
|
b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/image/aflw2k3d.py
|
sourcery-ai-bot/datasets
|
b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28
|
[
"Apache-2.0"
] | 1
|
2021-07-04T11:07:35.000Z
|
2021-07-04T11:07:35.000Z
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AFLW2000-3D Dataset."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
AFLW2000-3D is a dataset of 2000 images that have been annotated with image-level
68-point 3D facial landmarks.
This dataset is typically used for evaluation of 3D facial landmark detection
models. The head poses are very diverse and often hard to be detected by a
cnn-based face detector.
The 2D landmarks are skipped in this dataset, since some of the data are not
consistent to 21 points, as the original paper mentioned.
"""
_CITATION = """\
@article{DBLP:journals/corr/ZhuLLSL15,
author = {Xiangyu Zhu and
Zhen Lei and
Xiaoming Liu and
Hailin Shi and
Stan Z. Li},
title = {Face Alignment Across Large Poses: {A} 3D Solution},
journal = {CoRR},
volume = {abs/1511.07212},
year = {2015},
url = {http://arxiv.org/abs/1511.07212},
archivePrefix = {arXiv},
eprint = {1511.07212},
timestamp = {Mon, 13 Aug 2018 16:48:23 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/ZhuLLSL15},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
class Aflw2k3d(tfds.core.GeneratorBasedBuilder):
"""AFLW2000-3D dataset."""
VERSION = tfds.core.Version("1.0.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image":
tfds.features.Image(
shape=(450, 450, 3), encoding_format="jpeg"),
"landmarks_68_3d_xy_normalized":
tfds.features.Tensor(shape=(68, 2), dtype=tf.float32),
"landmarks_68_3d_z":
tfds.features.Tensor(shape=(68, 1), dtype=tf.float32),
}),
homepage=
"http://www.cbsr.ia.ac.cn/users/xiangyuzhu/projects/3DDFA/main.htm",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
extracted_path = dl_manager.download_and_extract(
"http://www.cbsr.ia.ac.cn/users/xiangyuzhu/projects/3DDFA/Database/AFLW2000-3D.zip"
)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"image_dir_path": os.path.join(extracted_path, "AFLW2000"),
}),
]
def _generate_examples(self, image_dir_path):
image_files = tf.io.gfile.glob(
pattern=os.path.join(image_dir_path, "image0*.jpg"))
label_files = [s.replace("jpg", "mat") for s in image_files]
for image_file, label_file in zip(image_files, label_files):
with tf.io.gfile.GFile(label_file, "rb") as f:
mat = tfds.core.lazy_imports.scipy.io.loadmat(f)
landmarks_68_3d_xyz = mat["pt3d_68"].T.astype(np.float32)
landmarks_68_3d_xy_normalized = landmarks_68_3d_xyz[..., 0:2] / 450.0
landmarks_68_3d_z = landmarks_68_3d_xyz[..., 2:]
yield os.path.basename(image_file), {
"image": image_file,
"landmarks_68_3d_xy_normalized": landmarks_68_3d_xy_normalized,
"landmarks_68_3d_z": landmarks_68_3d_z,
}
| 36.142857
| 91
| 0.667194
|
140dd3656cadce1d57cbb4214c9db1855c3c83f3
| 177
|
py
|
Python
|
FWCore/Integration/python/test/unscheduled_fail_on_output_Rethrow_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
FWCore/Integration/python/test/unscheduled_fail_on_output_Rethrow_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
FWCore/Integration/python/test/unscheduled_fail_on_output_Rethrow_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from FWCore.Integration.test.unscheduled_fail_on_output_cfg import process
process.options.Rethrow = cms.untracked.vstring('NotFound')
| 35.4
| 74
| 0.853107
|
1833a95d7f1e41e9c2d6e82f19f20e8779c03752
| 13,163
|
py
|
Python
|
rpython/translator/platform/__init__.py
|
ctrl-shift-make/pypy
|
6d2f7a78baa8d4d2f94f5fb709f697a560b45f4e
|
[
"Apache-2.0",
"OpenSSL"
] | 333
|
2015-08-08T18:03:38.000Z
|
2022-03-22T18:13:12.000Z
|
rpython/translator/platform/__init__.py
|
ctrl-shift-make/pypy
|
6d2f7a78baa8d4d2f94f5fb709f697a560b45f4e
|
[
"Apache-2.0",
"OpenSSL"
] | 7
|
2020-02-16T16:49:05.000Z
|
2021-11-26T09:00:56.000Z
|
rpython/translator/platform/__init__.py
|
ctrl-shift-make/pypy
|
6d2f7a78baa8d4d2f94f5fb709f697a560b45f4e
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
"""Platform-specific support for compiling/executing C sources."""
import py, os, sys
from rpython.tool.runsubprocess import run_subprocess as _run_subprocess
from rpython.tool.udir import udir
from rpython.tool.version import rpythonroot
from rpython.tool.ansi_print import AnsiLogger
log = AnsiLogger("platform")
class CompilationError(Exception):
def __init__(self, out, err):
self.out = out.replace('\r\n', '\n')
self.err = err.replace('\r\n', '\n')
def __repr__(self):
if self.err:
attr = 'err'
else:
attr = 'out'
text = getattr(self, attr).replace('\n', '\n\t')
return 'CompilationError(%s="""\n\t%s""")' % (attr, text)
__str__ = __repr__
class ExecutionResult(object):
def __init__(self, returncode, out, err):
self.returncode = returncode
self.out = out.replace('\r\n', '\n')
self.err = err.replace('\r\n', '\n')
def __repr__(self):
return "<ExecutionResult retcode=%d>" % (self.returncode,)
class Platform(object):
name = "abstract platform"
c_environ = None
# which branch to check out in get_external.py
externals_branch='default'
# where to put the externals, as an absolute path
externals = str(py.path.local(__file__).parts()[-5] / 'externals')
relevant_environ = ()
log_errors = True
so_prefixes = ('',)
extra_libs = ()
def __init__(self, cc):
if self.__class__ is Platform:
raise TypeError("You should not instantiate Platform class directly")
self.cc = cc
def compile(self, cfiles, eci, outputfilename=None, standalone=True):
ofiles = self._compile_o_files(cfiles, eci, standalone)
return self._finish_linking(ofiles, eci, outputfilename, standalone)
def _all_cfiles(self, cfiles, eci):
seen = set()
result = []
for cfile in list(cfiles) + list(eci.separate_module_files):
cfile = py.path.local(cfile)
if cfile not in seen:
seen.add(cfile)
result.append(cfile)
return result
def _compile_o_files(self, cfiles, eci, standalone=True):
cfiles = self._all_cfiles(cfiles, eci)
compile_args = self._compile_args_from_eci(eci, standalone)
ofiles = []
for cfile in cfiles:
# Windows hack: use masm for files ending in .asm
if str(cfile).lower().endswith('.asm'):
ofiles.append(self._compile_c_file(self.masm, cfile, []))
else:
ofiles.append(self._compile_c_file(self.cc, cfile, compile_args))
return ofiles
def execute(self, executable, args=None, env=None, compilation_info=None):
if env is None:
env = os.environ.copy()
else:
env = env.copy()
# On Windows, %SystemRoot% must be present for most programs to start
if (os.name == 'nt' and
"SystemRoot" not in env and
"SystemRoot" in os.environ):
env["SystemRoot"] = os.environ["SystemRoot"]
# Set LD_LIBRARY_PATH on posix platforms
if os.name == 'posix' and compilation_info is not None:
library_path = ':'.join([str(i) for i in compilation_info.library_dirs])
if sys.platform == 'darwin':
env['DYLD_LIBRARY_PATH'] = library_path
else:
env['LD_LIBRARY_PATH'] = library_path
returncode, stdout, stderr = _run_subprocess(str(executable), args,
env)
return ExecutionResult(returncode, stdout, stderr)
def gen_makefile(self, cfiles, eci, exe_name=None, path=None,
shared=False, headers_to_precompile=[],
no_precompile_cfiles = [], profopt=False, config=None):
raise NotImplementedError("Pure abstract baseclass")
def __repr__(self):
return '<%s cc=%s>' % (self.__class__.__name__, self.cc)
def __hash__(self):
return hash(self.__class__.__name__)
def __ne__(self, other):
return not self == other
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.__dict__ == other.__dict__)
def key(self):
bits = [self.__class__.__name__, 'cc=%r' % self.cc]
for varname in self.relevant_environ:
bits.append('%s=%r' % (varname, os.environ.get(varname)))
# adding sys.maxint to disambiguate windows
bits.append('%s=%r' % ('sys.maxint', sys.maxint))
return ' '.join(bits)
# some helpers which seem to be cross-platform enough
def _execute_c_compiler(self, cc, args, outname, cwd=None):
#log.execute(cc + ' ' + ' '.join(args))
# 'cc' can also contain some options for the C compiler;
# e.g. it can be "gcc -m32". We handle it by splitting on ' '.
cclist = cc.split()
cc = cclist[0]
args = cclist[1:] + args
returncode, stdout, stderr = _run_subprocess(cc, args, self.c_environ,
cwd)
self._handle_error(returncode, stdout, stderr, outname)
def _handle_error(self, returncode, stdout, stderr, outname):
if returncode != 0:
errorfile = outname.new(ext='errors')
errorfile.write(stderr, 'wb')
if self.log_errors:
stderrlines = stderr.splitlines()
for line in stderrlines:
log.Error(line)
# ^^^ don't use ERROR, because it might actually be fine.
# Also, ERROR confuses lib-python/conftest.py.
raise CompilationError(stdout, stderr)
else:
if self.log_errors:
for line in stderr.splitlines():
log.WARNING(line)
def _make_o_file(self, cfile, ext):
"""Create an object file name under the udir for a .c file"""
ofile = cfile.new(ext=ext)
if ofile.relto(udir):
return ofile
assert ofile.relto(rpythonroot), (
"%r should be relative to either %r or %r" % (
ofile, rpythonroot, udir))
ofile = udir.join(ofile.relto(rpythonroot))
ofile.dirpath().ensure(dir=True)
return ofile
def preprocess_include_dirs(self, include_dirs):
if 'PYPY_LOCALBASE' in os.environ:
dirs = list(self._preprocess_include_dirs(include_dirs))
return [os.environ['PYPY_LOCALBASE'] + '/include'] + dirs
return self._preprocess_include_dirs(include_dirs)
def _preprocess_include_dirs(self, include_dirs):
return include_dirs
def _compile_args_from_eci(self, eci, standalone):
include_dirs = self.preprocess_include_dirs(eci.include_dirs)
args = self._includedirs(include_dirs)
if standalone:
extra = self.standalone_only
else:
extra = self.get_shared_only_compile_flags()
cflags = list(self.cflags) + list(extra)
return (cflags + list(eci.compile_extra) + args)
def get_shared_only_compile_flags(self):
return tuple(self.shared_only)
def preprocess_library_dirs(self, library_dirs):
if 'PYPY_LOCALBASE' in os.environ:
dirs = list(self._preprocess_library_dirs(library_dirs))
return [os.environ['PYPY_LOCALBASE'] + '/lib'] + dirs
return self._preprocess_library_dirs(library_dirs)
def _preprocess_library_dirs(self, library_dirs):
return library_dirs
def _link_args_from_eci(self, eci, standalone):
library_dirs = self.preprocess_library_dirs(eci.library_dirs)
library_dirs = self._libdirs(library_dirs)
libraries = self._libs(eci.libraries)
link_files = self._linkfiles(eci.link_files)
export_flags = self._exportsymbols_link_flags()
return (library_dirs + list(self.link_flags) + export_flags +
link_files + list(eci.link_extra) + libraries +
list(self.extra_libs))
def _exportsymbols_link_flags(self):
return []
def _finish_linking(self, ofiles, eci, outputfilename, standalone):
if outputfilename is None:
outputfilename = ofiles[0].purebasename
if ofiles:
dirname = ofiles[0].dirpath()
else:
dirname = udir.join('module_cache')
exe_name = dirname.join(outputfilename, abs=True)
if standalone:
if self.exe_ext:
exe_name += '.' + self.exe_ext
else:
exe_name += '.' + self.so_ext
if eci.use_cpp_linker:
cc_link = 'g++' # XXX hard-coded so far
else:
cc_link = self.cc
largs = self._link_args_from_eci(eci, standalone)
return self._link(cc_link, ofiles, largs, standalone, exe_name)
# below are some detailed information for platforms
def include_dirs_for_libffi(self):
dirs = self._include_dirs_for_libffi()
if 'PYPY_LOCALBASE' in os.environ:
return [os.environ['PYPY_LOCALBASE'] + '/include'] + dirs
return dirs
def library_dirs_for_libffi(self):
dirs = self._library_dirs_for_libffi()
if 'PYPY_LOCALBASE' in os.environ:
return [os.environ['PYPY_LOCALBASE'] + '/lib'] + dirs
return dirs
def _include_dirs_for_libffi(self):
raise NotImplementedError("Needs to be overwritten")
def _library_dirs_for_libffi(self):
raise NotImplementedError("Needs to be overwritten")
def check___thread(self):
return True
if sys.platform.startswith('linux'):
from rpython.translator.platform.linux import Linux, LinuxPIC
import platform
# Only required on armhf and mips{,el}, not armel. But there's no way to
# detect armhf without shelling out
if (platform.architecture()[0] == '64bit'
or platform.machine().startswith(
('arm', 'm68k', 'mips', 'parisc', 'ppc', 'sh4'))):
host_factory = LinuxPIC
else:
host_factory = Linux
elif sys.platform == 'darwin':
from rpython.translator.platform.darwin import Darwin_i386, Darwin_x86_64, Darwin_PowerPC
import platform
assert platform.machine() in ('Power Macintosh', 'i386', 'x86_64')
if platform.machine() == 'Power Macintosh':
host_factory = Darwin_PowerPC
elif sys.maxint <= 2147483647:
host_factory = Darwin_i386
else:
host_factory = Darwin_x86_64
elif "gnukfreebsd" in sys.platform:
from rpython.translator.platform.freebsd import GNUkFreebsd, GNUkFreebsd_64
import platform
if platform.architecture()[0] == '32bit':
host_factory = GNUkFreebsd
else:
host_factory = GNUkFreebsd_64
elif "freebsd" in sys.platform:
from rpython.translator.platform.freebsd import Freebsd, Freebsd_64
import platform
if platform.architecture()[0] == '32bit':
host_factory = Freebsd
else:
host_factory = Freebsd_64
elif sys.platform.startswith('netbsd'):
from rpython.translator.platform.netbsd import Netbsd, Netbsd_64
import platform
if platform.architecture()[0] == '32bit':
host_factory = Netbsd
else:
host_factory = Netbsd_64
elif "openbsd" in sys.platform:
from rpython.translator.platform.openbsd import OpenBSD, OpenBSD_64
import platform
if platform.architecture()[0] == '32bit':
host_factory = OpenBSD
else:
host_factory = OpenBSD_64
elif sys.platform.startswith('gnu'):
from rpython.translator.platform.hurd import Hurd
import platform
if platform.architecture()[0] == '32bit':
host_factory = Hurd
else:
host_factory = Hurd_64
elif os.name == 'nt':
from rpython.translator.platform.windows import Windows, Windows_x64
import platform
if platform.architecture()[0] == '32bit':
host_factory = Windows
else:
host_factory = Windows_x64
elif sys.platform == 'cygwin':
from rpython.translator.platform.cygwin import Cygwin, Cygwin64
import platform
if platform.architecture()[0] == '32bit':
host_factory = Cygwin
else:
host_factory = Cygwin64
else:
raise ValueError('unknown sys.platform "%s"', sys.platform)
platform = host = host_factory()
def pick_platform(new_platform, cc):
if new_platform == 'host':
return host_factory(cc)
elif new_platform == 'arm':
from rpython.translator.platform.arm import ARM
return ARM(cc)
else:
raise ValueError("platform = %s" % (new_platform,))
def set_platform(new_platform, cc):
global platform
platform = pick_platform(new_platform, cc)
if not platform:
raise ValueError("pick_platform(%r, %s) failed"%(new_platform, cc))
log.msg("Set platform with %r cc=%s, using cc=%r, version=%r" % (new_platform, cc,
getattr(platform, 'cc','Unknown'),
getattr(platform, 'version','Unknown'),
))
if new_platform == 'host':
global host
host = platform
def is_host_build():
return host == platform
| 36.261708
| 93
| 0.625389
|
e48c2e6954e12e04eecb6bd633c7887078957ca7
| 1,087
|
py
|
Python
|
tis/nets.py
|
gditzler/tis-experiment
|
8972707820f4f0a0e4b9779dedf396ce31bbe5c9
|
[
"MIT"
] | null | null | null |
tis/nets.py
|
gditzler/tis-experiment
|
8972707820f4f0a0e4b9779dedf396ce31bbe5c9
|
[
"MIT"
] | null | null | null |
tis/nets.py
|
gditzler/tis-experiment
|
8972707820f4f0a0e4b9779dedf396ce31bbe5c9
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.utils import to_categorical
def NeuralNetwork(X, Y):
num_classes = len(np.unique(Y))
Y_train = to_categorical(Y, num_classes)
X_train = X
# Set the input shape
input_shape = (X.shape[1],)
# Create the model
model = Sequential()
model.add(Dense(128, input_shape=input_shape, activation='relu'))
model.add(Dense(256, activation='relu'))
#model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
#model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# Configure the model and start training
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, Y_train,
epochs=1000,
batch_size=256,
verbose=0,
validation_split=0.2)
return model
| 30.194444
| 69
| 0.654094
|
d02ee68158eb7897da7f8d349db40e4f8c491699
| 10,882
|
py
|
Python
|
src/models/model_builder.py
|
ZIZUN/korean_extractive_summarization
|
9290de7f6d7c7771f63269956b4c122cb52a0681
|
[
"MIT"
] | 3
|
2021-12-03T20:01:59.000Z
|
2021-12-04T14:07:48.000Z
|
src/models/model_builder.py
|
ZIZUN/korean_extractive_summarization
|
9290de7f6d7c7771f63269956b4c122cb52a0681
|
[
"MIT"
] | null | null | null |
src/models/model_builder.py
|
ZIZUN/korean_extractive_summarization
|
9290de7f6d7c7771f63269956b4c122cb52a0681
|
[
"MIT"
] | null | null | null |
import copy
import torch
import torch.nn as nn
from transformers import BertModel, BertConfig, AutoModel, RobertaConfig, AutoConfig
from torch.nn.init import xavier_uniform_
from models.decoder import TransformerDecoder
from models.encoder import Classifier, ExtTransformerEncoder
from models.optimizers import Optimizer
def build_optim(args, model, checkpoint):
""" Build optimizer """
if checkpoint is not None:
optim = checkpoint['optim'] #[0] ['optims'] [0] 원래 ['optim'][0]이었는데 오류남
saved_optimizer_state_dict = optim.optimizer.state_dict()
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
if args.visible_gpus != '-1':
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
else:
optim = Optimizer(
args.optim, args.lr, args.max_grad_norm,
beta1=args.beta1, beta2=args.beta2,
decay_method='noam',
warmup_steps=args.warmup_steps)
optim.set_parameters(list(model.named_parameters()))
return optim
def build_optim_bert(args, model, checkpoint):
""" Build optimizer """
if checkpoint is not None:
optim = checkpoint['optims'][0]
saved_optimizer_state_dict = optim.optimizer.state_dict()
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
if args.visible_gpus != '-1':
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
else:
optim = Optimizer(
args.optim, args.lr_bert, args.max_grad_norm,
beta1=args.beta1, beta2=args.beta2,
decay_method='noam',
warmup_steps=args.warmup_steps_bert)
params = [(n, p) for n, p in list(model.named_parameters()) if n.startswith('bert.model')]
optim.set_parameters(params)
return optim
def build_optim_dec(args, model, checkpoint):
""" Build optimizer """
if checkpoint is not None:
optim = checkpoint['optims'][1]
saved_optimizer_state_dict = optim.optimizer.state_dict()
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
if args.visible_gpus != '-1':
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
else:
optim = Optimizer(
args.optim, args.lr_dec, args.max_grad_norm,
beta1=args.beta1, beta2=args.beta2,
decay_method='noam',
warmup_steps=args.warmup_steps_dec)
params = [(n, p) for n, p in list(model.named_parameters()) if not n.startswith('bert.model')]
optim.set_parameters(params)
return optim
def get_generator(vocab_size, dec_hidden_size, device):
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(dec_hidden_size, vocab_size),
gen_func
)
generator.to(device)
return generator
class Bert(nn.Module):
def __init__(self, large, temp_dir, finetune=False):
super(Bert, self).__init__()
# if(large):
# self.model = BertModel.from_pretrained('bert-large-uncased', cache_dir=temp_dir)
# else:
# self.model = BertModel.from_pretrained("monologg/kobert", cache_dir=temp_dir)
self.model = AutoModel.from_pretrained('klue/roberta-large')
self.finetune = finetune
def forward(self, x, segs, mask):
if(self.finetune):
# top_vec, _ = self.model(x, token_type_ids=segs, attention_mask=mask)
# outputs = self.model(x, token_type_ids=segs, attention_mask=mask)
# print(x.shape, segs.shape, mask.shape)
outputs = self.model(input_ids=x)#, token_type_ids=segs)# attention_mask=mask)
top_vec = outputs.last_hidden_state
else:
self.eval()
with torch.no_grad():
# top_vec, _ = self.model(x, token_type_ids=segs, attention_mask=mask)
outputs = self.model(x)#, token_type_ids=segs, attention_mask=mask)
top_vec = outputs.last_hidden_state
return top_vec
class ExtSummarizer(nn.Module):
def __init__(self, args, device, checkpoint):
super(ExtSummarizer, self).__init__()
self.args = args
self.device = device
self.bert = Bert(args.large, args.temp_dir, args.finetune_bert)
self.ext_layer = ExtTransformerEncoder(self.bert.model.config.hidden_size, args.ext_ff_size, args.ext_heads,
args.ext_dropout, args.ext_layers)
if (args.encoder == 'baseline'):
bert_config = AutoConfig(self.bert.model.config.vocab_size, hidden_size=args.ext_hidden_size,
num_hidden_layers=args.ext_layers, num_attention_heads=args.ext_heads, intermediate_size=args.ext_ff_size)
self.bert.model = BertModel(bert_config)
self.ext_layer = Classifier(self.bert.model.config.hidden_size)
if(args.max_pos>512):
my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size)
my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data
my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][None,:].repeat(args.max_pos-512,1)
self.bert.model.embeddings.position_embeddings = my_pos_embeddings
if checkpoint is not None:
self.load_state_dict(checkpoint['model'], strict=True)
else:
if args.param_init != 0.0:
for p in self.ext_layer.parameters():
p.data.uniform_(-args.param_init, args.param_init)
if args.param_init_glorot:
for p in self.ext_layer.parameters():
if p.dim() > 1:
xavier_uniform_(p)
self.to(device)
def forward(self, src, segs, clss, mask_src, mask_cls):
top_vec = self.bert(src, segs, mask_src)
# print(top_vec)
sents_vec = top_vec[torch.arange(top_vec.size(0)).unsqueeze(1), clss]
sents_vec = sents_vec * mask_cls[:, :, None].float()
sent_scores = self.ext_layer(sents_vec, mask_cls).squeeze(-1)
return sent_scores, mask_cls
class AbsSummarizer(nn.Module):
def __init__(self, args, device, checkpoint=None, bert_from_extractive=None):
super(AbsSummarizer, self).__init__()
self.args = args
self.device = device
self.bert = Bert(args.large, args.temp_dir, args.finetune_bert)
if bert_from_extractive is not None:
self.bert.model.load_state_dict(
dict([(n[11:], p) for n, p in bert_from_extractive.items() if n.startswith('bert.model')]), strict=True)
if (args.encoder == 'baseline'):
bert_config = BertConfig(self.bert.model.config.vocab_size, hidden_size=args.enc_hidden_size,
num_hidden_layers=args.enc_layers, num_attention_heads=8,
intermediate_size=args.enc_ff_size,
hidden_dropout_prob=args.enc_dropout,
attention_probs_dropout_prob=args.enc_dropout)
self.bert.model = BertModel(bert_config)
if(args.max_pos>512):
my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size)
my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data
my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][None,:].repeat(args.max_pos-512,1)
self.bert.model.embeddings.position_embeddings = my_pos_embeddings
self.vocab_size = self.bert.model.config.vocab_size
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0)
if (self.args.share_emb):
tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight)
self.decoder = TransformerDecoder(
self.args.dec_layers,
self.args.dec_hidden_size, heads=self.args.dec_heads,
d_ff=self.args.dec_ff_size, dropout=self.args.dec_dropout, embeddings=tgt_embeddings)
self.generator = get_generator(self.vocab_size, self.args.dec_hidden_size, device)
self.generator[0].weight = self.decoder.embeddings.weight
if checkpoint is not None:
self.load_state_dict(checkpoint['model'], strict=True)
else:
for module in self.decoder.modules():
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
for p in self.generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
else:
p.data.zero_()
if(args.use_bert_emb):
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0)
tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight)
self.decoder.embeddings = tgt_embeddings
self.generator[0].weight = self.decoder.embeddings.weight
self.to(device)
def forward(self, src, tgt, segs, clss, mask_src, mask_tgt, mask_cls):
top_vec = self.bert(src, segs, mask_src)
dec_state = self.decoder.init_decoder_state(src, top_vec)
decoder_outputs, state = self.decoder(tgt[:, :-1], top_vec, dec_state)
return decoder_outputs, None
| 42.84252
| 147
| 0.625161
|
82c7155ec7cf2e6608b690a57c7e3c4e1ac2af97
| 1,074
|
py
|
Python
|
tools/flip-wkt/run.py
|
crosscompute/crosscompute-examples
|
b341994d9353206fe0ee6b69688e70999bc2196e
|
[
"MIT"
] | 6
|
2015-12-18T15:54:24.000Z
|
2021-05-19T17:30:39.000Z
|
tools/flip-wkt/run.py
|
crosscompute/crosscompute-examples
|
b341994d9353206fe0ee6b69688e70999bc2196e
|
[
"MIT"
] | 7
|
2016-04-22T16:31:08.000Z
|
2022-02-07T17:54:46.000Z
|
tools/flip-wkt/run.py
|
crosscompute/crosscompute-examples
|
b341994d9353206fe0ee6b69688e70999bc2196e
|
[
"MIT"
] | 7
|
2016-04-20T21:03:57.000Z
|
2022-02-04T16:45:35.000Z
|
from invisibleroads_macros_geometry import flip_xy, transform_geometries
from os.path import join
from pandas import read_csv
from shapely import wkt
from shapely.errors import WKTReadingError
from sys import argv
def get_wkt_columns(table):
try:
row = table.iloc[0]
except IndexError:
return []
wkt_columns = []
for column in table.columns:
try:
wkt.loads(row[column])
except (AttributeError, UnicodeEncodeError, WKTReadingError):
pass
else:
wkt_columns.append(column)
return wkt_columns
def flip_geometry_wkts(geometry_wkts):
geometries = [wkt.loads(x) for x in geometry_wkts]
geometries = transform_geometries(geometries, flip_xy)
return [x.wkt for x in geometries]
if __name__ == '__main__':
input_folder, output_folder = argv[1:3]
t = read_csv(join(input_folder, 'geometries.csv'))
for wkt_column in get_wkt_columns(t):
t[wkt_column] = flip_geometry_wkts(t[wkt_column])
t.to_csv(join(output_folder, 'geometries.csv'), index=False)
| 29.027027
| 72
| 0.700186
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.